1) add zone in fabric
2) add disk mapping in your storage
2.1) rescan scsi on server
# cfgadm -al
3) disk must be viasible in 4 ways, 2 path in each fabric (best practice)
# mpathadm list lu
/dev/rdsk/c8t60060E80056213000000621300000030d0s2
Total Path Count: 4
Operational Path Count: 4
2) add disk mapping in your storage
2.1) rescan scsi on server
# cfgadm -al
3) disk must be viasible in 4 ways, 2 path in each fabric (best practice)
# mpathadm list lu
/dev/rdsk/c8t60060E80056213000000621300000030d0s2
Total Path Count: 4
Operational Path Count: 4
# mpathadm show lu /dev/rdsk/c8t60060E80056213000000621300000030d0s2
Logical Unit: /dev/rdsk/c8t60060E80056213000000621300000030d0s2
mpath-support: libmpscsi_vhci.so
Vendor: HITACHI
Product: OPEN-V -SUN
Revision: 6008
Name Type: unknown type
Name: 60060e80056213000000621300000030
Asymmetric: no
Current Load Balance: round-robin
Logical Unit Group ID: NA
Auto Failback: on
Auto Probing: NA
Paths:
Initiator Port Name: 10000000c980f2fc
Target Port Name: 50060e8005621371
Override Path: NA
Path State: OK
Disabled: no
Initiator Port Name: 10000000c980f2fc
Target Port Name: 50060e8005621320
Override Path: NA
Path State: OK
Disabled: no
Initiator Port Name: 10000000c97af548
Target Port Name: 50060e8005621331
Override Path: NA
Path State: OK
Disabled: no
Initiator Port Name: 10000000c97af548
Target Port Name: 50060e8005621301
Override Path: NA
Path State: OK
Disabled: no
4) update did devices
# scdidadm -r
# scdidadm -u
# scdidadm -i
check did device on both nodes
# scdidadm -L | grep 21300000130d0
24 node-n2:/dev/rdsk/c8t60060E80056213000000621300000130d0 /dev/did/rdsk/d24
24 node-n1:/dev/rdsk/c8t60060E80056213000000621300000130d0 /dev/did/rdsk/d24
5) label disk using format command
6) add disk to nesosary diskset
# metaset -s oracle-disk -a /dev/did/rdsk/d74
7) create metadevice
# metainit -s oracle-disk d81 1 1 /dev/did/rdsk/d74s0
oracle-disk/d81: Concat/Stripe is setup
# metainit -s oracle-disk d80 -m d81
oracle-disk/d80: Mirror is setup
8) create fs
newfs /dev/md/oracle-disk/rdsk/d80
9) add entry to /etc/vfstab, try manualy mount disk to directory (on both node)
10) get current cluster configuration for disk resource
[root@KGBI-SRV003-n1:/export/apenner]# scha_resource_get -O extension -R oralce-disk -G oracle FileSystemMountPoints
STRINGARRAY
/oracle/u01
/oracle/u02
/oracle/u03
/oracle/u04
/oracle/u05
/oracle/u06
/oracle/u07
11) modefy resource
scrgadm -c -j oracle-disk -x FileSystemMountPoints="/oracle/u01,/oracle/u02,/oracle/u03,/oracle/u04,/oracle/u05,/oracle/u06,/oracle/u07,/oracle/u08"
No comments:
Post a Comment