Server configuration
I already install cluster software, and my architecture like thisAlso i have two disks, one for quorum, one for oracle database.
My partition map for system disk is:
root@sc01-n1:~ # prtvtoc /dev/rdsk/c2t0d0s2
* /dev/rdsk/c2t0d0s2 partition map
*
* Dimensions:
* 512 bytes/sector
* 848 sectors/track
* 24 tracks/cylinder
* 20352 sectors/cylinder
* 14089 cylinders
* 14087 accessible cylinders
*
* Flags:
* 1: unmountable
* 10: read-only
*
* First Sector Last
* Partition Tag Flags Sector Count Sector Mount Directory
0 2 00 41945472 33967488 75912959 /
1 3 01 0 41945472 41945471
2 5 00 0 286698624 286698623
3 7 00 75912960 83890944 159803903 /var
5 0 00 159803904 83890944 243694847 /opt
6 0 00 243694848 1058304 244753151
7 0 00 244753152 41945472 286698623
Create Metaset
First you must create meta device database (metadb) on both cluster node, for this purpose i have slice 6 512mb in size:
root@sc01-n1:~ # metadb -fac 3 c2t0d0s6
Next step is create metaset with two members, sc01-n1, sc01-n2
root@sc01-n1:~ # metaset -s testdb-disk -a -h sc01-n1 sc01-n2root@sc01-n1:~ # metadb -fac 3 c2t0d0s6
Next step is create metaset with two members, sc01-n1, sc01-n2
root@sc01-n1:~ # metaset -s testdb-disk -a /dev/did/rdsk/d12
Then we create metadevice in this set
root@sc01-n1:~ # metainit -s testdb-disk d11 1 1 /dev/did/rdsk/d12s0
testdb-disk/d11: Concat/Stripe is setup
root@sc01-n1:~ # metainit -s testdb-disk d10 -m d11
testdb-disk/d10: Mirror is setup
For recovery purpose add information in md.tab:
root@sc01-n1:~ # metastat -s testdb-disk -p >> /etc/lvm/md.tab
root@sc01-n1:~ # cat /etc/lvm/md.tab | egrep -v "^$|^#"
testdb-disk/d10 -m testdb-disk/d11 1
testdb-disk/d11 1 1 /dev/did/rdsk/d12s0
Create new FileSystem, if your lun is greate then 2Tb user -T flag in newfs command:
root@sc01-n1:~ # newfs /dev/md/testdb-disk/rdsk/d10
newfs: /dev/md/testdb-disk/rdsk/d10 last mounted as /u01
newfs: construct a new file system /dev/md/testdb-disk/rdsk/d10: (y/n)? y
Warning: 4096 sector(s) in last cylinder unallocated
/dev/md/testdb-disk/rdsk/d10: 262123520 sectors in 42664 cylinders of 48 tracks, 128 sectors
127990.0MB in 2667 cyl groups (16 c/g, 48.00MB/g, 5824 i/g)
super-block backups (for fsck -F ufs -o b=#) at:
32, 98464, 196896, 295328, 393760, 492192, 590624, 689056, 787488, 885920,
Initializing cylinder groups:
.....................................................
super-block backups for last 10 cylinder groups at:
261197984, 261296416, 261394848, 261493280, 261591712, 261690144, 261788576,
261887008, 261985440, 262083872
On both node add an entry in vfstab file
# ORACLE HA CLUSTER
/dev/md/testdb-disk/dsk/d10 /dev/md/testdb-disk/rdsk/d10 /u01 ufs - no forcedirectio,logging
On both node create directory /u01
root@sc01-n1:~ # mkdir /u01
root@sc01-n2:~ # mkdir /u01
Try mount manualy this disk on both node
root@sc01-n1:~ # mount /u01
root@sc01-n1:~ # umount /u01
root@sc01-n1:~ # cldevicegroup switch -n sc01-n2 testdb-disk
root@sc01-n2:~ # mount /u01
root@sc01-n2:~ # umount /u01
root@sc01-n2:~ # cldevicegroup switch -n sc01-n1 testdb-disk
root@sc01-n1:~ # chown -R oracle:dba /u01
Create Oracle Database
Switch user to oracle user and create parameter file like thisoracle@sc01-n1:~/app/product/11.2.0/dbs > cat inittestdb.ora
db_name='testdb'
memory_target=8G
processes = 250
audit_trail ='db'
db_block_size=8192
db_recovery_file_dest='/u01/flash_recovery_area'
db_recovery_file_dest_size=8G
dispatchers='(PROTOCOL=TCP) (SERVICE=ORCLXDB)'
open_cursors=300
remote_login_passwordfile='EXCLUSIVE'
undo_tablespace='UNDOTBS1'
control_files = ('/u01/testdb/testdb_control1', '/u01/testdb/testdb_control2')
compatible ='11.2.0'
Create password file
oracle@sc01-n1:~/app/product/11.2.0/dbs > orapwd file=orapwtestdb entries=10 password=123123ora
Create directory for database on both cluster nodes
oracle@sc01-n1:~/app/product/11.2.0/dbs > mkdir -p /u01/flash_recovery_area
oracle@sc01-n1:~/app/product/11.2.0/dbs > mkdir -p /u01/testdb/
Create new database
root@sc01-n1:~ # cat /tmp/create_database.sql
CREATE DATABASE testdb
USER SYS IDENTIFIED BY "sysorapassword"
USER SYSTEM IDENTIFIED BY "systemorapassword"
DATAFILE '/u01/testdb/SYSTEM01.DBF' SIZE 325M REUSE AUTOEXTEND ON NEXT 10240K MAXSIZE UNLIMITED
SYSAUX DATAFILE '/u01/testdb/SYSAUX01.DAT' SIZE 120M REUSE AUTOEXTEND ON NEXT 5M MAXSIZE 2048M
DEFAULT TABLESPACE USERS DATAFILE '/u01/testdb/USERS01.DBF' SIZE 50M REUSE AUTOEXTEND ON MAXSIZE UNLIMITED
DEFAULT TEMPORARY TABLESPACE TEMP TEMPFILE '/u01/testdb/TEMP01.DBF' SIZE 40M REUSE AUTOEXTEND ON NEXT 640K MAXSIZE UNLIMITED
UNDO TABLESPACE "UNDOTBS1" DATAFILE '/u01/testdb/UNDOTBS01.DBF'
SIZE 200M REUSE AUTOEXTEND ON NEXT 5120K MAXSIZE UNLIMITED
CHARACTER SET AL32UTF8
NATIONAL CHARACTER SET AL16UTF16
LOGFILE '/u01/testdb/REDO01.LOG' SIZE 100M REUSE,
'/u01/testdb/REDO02.LOG' SIZE 100M REUSE,
'/u01/testdb/REDO03.LOG' SIZE 100M REUSE
EXTENT MANAGEMENT LOCAL;
oracle@sc01-n1:~ > sqlplus / as sysdba
SQL*Plus: Release 11.2.0.3.0 Production on Wed Aug 28 11:52:53 2013
Copyright (c) 1982, 2011, Oracle. All rights reserved.
Connected to an idle instance.
SQL> startup nomount;
ORACLE instance started.
Total System Global Area 8554127360 bytes
Fixed Size 2176432 bytes
Variable Size 4362078800 bytes
Database Buffers 4143972352 bytes
Redo Buffers 45899776 bytes
SQL>
SQL> @/tmp/create_database.sql
Database created.
Run flowing scripts to complete database creation
@?/rdbms/admin/catalog.sql;
@?/rdbms/admin/catproc.sql;
Сreate listener
oracle@sc01-n1:~/app/product/11.2.0/network/admin > cat listener.ora
SID_LIST_LISTENER_TESTDB =
(SID_LIST =
(SID_DESC =
(ORACLE_HOME = /opt/oracle/app/product/11.2.0)
(SID_NAME = testdb)
)
)
LISTENER_TESTDB =
(DESCRIPTION_LIST =
(DESCRIPTION =
(ADDRESS_LIST =
(ADDRESS = (PROTOCOL = TCP)(HOST = 172.28.128.215)(PORT = 1521))
)
)
)
Copy configuration files
oracle@sc01-n1:~/app/product/11.2.0/network/admin > scp $ORACLE_HOME/dbs/* oracle@sc01-n2:$ORACLE_HOME/dbs
Password:
hc_testdb.dat 100% |**************************************| 1544 00:00
init.ora 100% |*****************************************| 2851 00:00
inittestdb.ora 100% |************************************| 396 00:00
lkTESTDB 100% |***********************************| 24 00:00
orapwtestdb 100% |**********************************| 2560 00:00
oracle@sc01-n1:~/app/product/11.2.0/network/admin > scp /opt/oracle/app/product/11.2.0/network/admin/* oracle@sc01-n2:$ORACLE_HOME/network/admin
Password:
listener.ora 100% |*************************************| 315 00:00
/opt/oracle/app/product/11.2.0/network/admin/samples: not a regular file
shrept.lst 100% |**********************************************| 205 00:00
Remove unnesessory from second node files: hc_testdb.dat lkTESTDB
Stop database, and try to run it on another node
oracle@sc01-n1:~/app/product/11.2.0/network/admin > sqlplus / as sysdba
SQL*Plus: Release 11.2.0.3.0 Production on Wed Aug 28 12:29:43 2013
Copyright (c) 1982, 2011, Oracle. All rights reserved.
Connected to:
Oracle Database 11g Enterprise Edition Release 11.2.0.3.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
SQL> shutdown immediate;
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.3.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
oracle@sc01-n1:~/app/product/11.2.0/network/admin > logout
root@sc01-n1:~ # umount /u01
root@sc01-n1:~ # cldevicegroup switch -n sc01-n2 testdb-disk
root@sc01-n2:~ # mount /u01
root@sc01-n2:~ # su - oracle
oracle@sc01-n2:~ > sqlplus / as sysdba
SQL*Plus: Release 11.2.0.3.0 Production on Wed Aug 28 12:33:36 2013
Copyright (c) 1982, 2011, Oracle. All rights reserved.
Connected to an idle instance.
SQL> startup;
ORACLE instance started.
Total System Global Area 8554127360 bytes
Fixed Size 2176432 bytes
Variable Size 4362078800 bytes
Database Buffers 4143972352 bytes
Redo Buffers 45899776 bytes
Database mounted.
Database opened.
SQL>
Switch testdb-disk group back to sc01-n1 node
SQL> shutdown immediate;
Database closed.
Database dismounted.
ORACLE instance shut down.
SQL> Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.3.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
oracle@sc01-n2:~ > logout
root@sc01-n2:~ # umount /u01
root@sc01-n2:~ # cldevicegroup switch -n sc01-n1 testdb-disk
Add host entries to /etc/hosts on both node
172.28.128.215 testdb-host # cluster ip address for database testdb
Startup database and add user for fault monitoring
SQL> create user scmon identified by "scmon";
User created.
SQL> alter user scmon default tablespace system quota 1m on system;
User altered.
SQL> grant select on v_$sysstat to scmon;
Grant succeeded.
SQL> grant select on v_$archive_dest to scmon;
Grant succeeded.
SQL> grant select on v_$database to scmon;
Grant succeeded.
SQL> grant create session to scmon;
Grant succeeded.
SQL> grant create table to scmon;
Grant succeeded.
SQL> Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.3.0 - 64bit Production
With the Partitioning, OLAP, Data Mining and Real Application Testing options
oracle@sc01-n1:~ >
For ferther steps we need to know
1) ORACLE_HOME
root@sc01-n1:~ # pargs -e 29559| grep ORACLE_HOME
envp[19]: ORACLE_HOME=/opt/oracle/app/product/11.2.0
2) ORACLE_SID
testdb
3) Alert_log_file
/opt/oracle/app/product/diag/rdbms/testdb/testdb/trace/alert_testdb.log
SQL> select value from v$parameter where name = 'background_dump_dest';
VALUE
--------------------------------------------------------------------------------
/opt/oracle/app/product/diag/rdbms/testdb/testdb/trace
4) Connect_string
scmon/scmon
Register andConfigureSunClusterHA
root@sc01-n1:~ # scsetup
>>> Initial Cluster Setup <<<
This program has detected that the cluster "installmode" attribute is
still enabled. As such, certain initial cluster setup steps will be
performed at this time. This includes adding any necessary quorum
devices, then resetting both the quorum vote counts and the
"installmode" property.
Please do not proceed if any additional nodes have yet to join the
cluster.
Is it okay to continue (yes/no) [yes]?
Do you want to add any quorum devices (yes/no) [yes]?
Following are supported Quorum Devices types in Oracle Solaris
Cluster. Please refer to Oracle Solaris Cluster documentation for
detailed information on these supported quorum device topologies.
What is the type of device you want to use?
1) Directly attached shared disk
2) Network Attached Storage (NAS) from Network Appliance
3) Quorum Server
q) Return to the quorum menu
Option: 1
>>> Add a SCSI Quorum Disk <<<
A SCSI quorum device is considered to be any Oracle Solaris Cluster
supported attached storage which connected to two or more nodes of the
cluster. Dual-ported SCSI-2 disks may be used as quorum devices in
two-node clusters. However, clusters with more than two nodes require
that SCSI-3 PGR disks be used for all disks with more than two
node-to-disk paths.
You can use a disk containing user data or one that is a member of a
device group as a quorum device.
For more information on supported quorum device topologies, see the
Oracle Solaris Cluster documentation.
Is it okay to continue (yes/no) [yes]?
Which global device do you want to use (d<N>)? d3
Is it okay to proceed with the update (yes/no) [yes]?
scconf -a -q globaldev=d3
Command completed successfully.
Press Enter to continue:
Do you want to add another quorum device (yes/no) [yes]? no
Once the "installmode" property has been reset, this program will skip
"Initial Cluster Setup" each time it is run again in the future.
However, quorum devices can always be added to the cluster using the
regular menu options. Resetting this property fully activates quorum
settings and is necessary for the normal and safe operation of the
cluster.
Is it okay to reset "installmode" (yes/no) [yes]?
scconf -c -q reset
scconf -a -T node=.
Cluster initialization is complete.
Type ENTER to proceed to the main menu:
*** Main Menu ***
Please select from one of the following options:
1) Quorum
2) Resource groups
3) Data Services
4) Cluster interconnect
5) Device groups and volumes
6) Private hostnames
7) New nodes
8) Other cluster tasks
?) Help with menu options
q) Quit
Option: q
Register necesary resource
root@sc01-n1:~ # clresourcetype register SUNW.oracle_listener
root@sc01-n1:~ # clresourcetype register SUNW.oracle_server
root@sc01-n1:~ # clresourcetype register SUNW.HAStoragePlus
Create new resource group
root@sc01-n1:~ # clrg create testdb
root@sc01-n1:~ # clrg status
=== Cluster Resource Groups ===
Group Name Node Name Suspended Status
---------- --------- --------- ------
testdb sc01-n1 No Unmanaged
sc01-n2 No Unmanaged
register hostname
root@sc01-n1:~ # clreslogicalhostname create -g testdb testdb-host
Register storage resource
root@sc01-n1:~ # clrs create -g testdb -t SUNW.HAStoragePlus -p FilesystemMountPoints='/u01' -p AffinityOn=TRUE testdb-disk
Bring ResourceGroup online
root@sc01-n1:~ # clrg online -M testdb
Create Oracle application resources
root@sc01-n1:~ # clresource create -g testdb \
root@sc01-n1:~ > -t SUNW.oracle_server \
root@sc01-n1:~ > -p Connect_string=cmon/cmon \
root@sc01-n1:~ > -p ORACLE_SID=testdb \
root@sc01-n1:~ > -p ORACLE_HOME=/opt/oracle/app/product/11.2.0 \
root@sc01-n1:~ > -p Alert_log_file=/opt/oracle/app/product/diag/rdbms/testdb/testdb/trace/alert_testdb.log \
root@sc01-n1:~ > -p Resource_dependencies=testdb-disk,testdb-host testdb-oracle
root@sc01-n1:~ # clresource create -g testdb \
root@sc01-n1:~ > -t SUNW.oracle_listener \
root@sc01-n1:~ > -p LISTENER_NAME=LISTENER_TESTDB \
root@sc01-n1:~ > -p ORACLE_HOME=/opt/oracle/app/product/11.2.0 \
root@sc01-n1:~ > -p resource_dependencies=testdb-disk,testdb-host testdb-listener
root@sc01-n1:~ # clrg status
=== Cluster Resource Groups ===
Group Name Node Name Suspended Status
---------- --------- --------- ------
testdb sc01-n1 No Online
sc01-n2 No Offline
root@sc01-n1:~ # clrs show
=== Resources ===
Resource: testdb-host
Type: SUNW.LogicalHostname:4
Type_version: 4
Group: testdb
R_description:
Resource_project_name: default
Enabled{sc01-n1}: True
Enabled{sc01-n2}: True
Monitored{sc01-n1}: True
Monitored{sc01-n2}: True
Resource: testdb-disk
Type: SUNW.HAStoragePlus:9
Type_version: 9
Group: testdb
R_description:
Resource_project_name: default
Enabled{sc01-n1}: True
Enabled{sc01-n2}: True
Monitored{sc01-n1}: True
Monitored{sc01-n2}: True
Resource: testdb-oracle
Type: SUNW.oracle_server:7
Type_version: 7
Group: testdb
R_description:
Resource_project_name: default
Enabled{sc01-n1}: True
Enabled{sc01-n2}: True
Monitored{sc01-n1}: True
Monitored{sc01-n2}: True
Resource: testdb-listener
Type: SUNW.oracle_listener:5
Type_version: 5
Group: testdb
R_description:
Resource_project_name: default
Enabled{sc01-n1}: True
Enabled{sc01-n2}: True
Monitored{sc01-n1}: True
Monitored{sc01-n2}: True
Try to switch resouce group
root@sc01-n1:~ # scswitch -z -g testdb -h sc01-n2
root@sc01-n1:~ # scswitch -z -g testdb -h sc01-n1
No comments:
Post a Comment