rhel6.4搭建rac前共享存储配置(iscsi+multipath+udev)

rhel6.4搭建rac前共享存储配置(iscsi+multipath+udev)
server:
IP配置:
192.168.12.30
192.168.12.40
 
添加一个100G磁盘/dev/sdb
添加两个网络适配器,即两个网卡,并分配IP。
 
由于目的是配置oracle rac集群,所以在服务端就要把磁盘配好。
[root@server ~]# pvcreate /dev/sdb
Physical volume "/dev/sdb" successfully created
[root@server ~]# pvs
PV VG Fmt Attr PSize PFree
/dev/sdb lvm2 a-- 100.00g 100.00g
[root@server ~]# vgcreate iscsi_store /dev/sdb
Volume group "iscsi_store" successfully created
[root@server ~]# vgs
VG #PV #LV #SN Attr VSize VFree
iscsi_store 1 0 0 wz--n- 100.00g 100.00g
[root@server ~]# lvcreate -L 10G -n ocr_vd1 iscsi_store
Logical volume "ocr_vd1" created
[root@server ~]# lvcreate -L 10G -n ocr_vd2 iscsi_store
Logical volume "ocr_vd2" created
[root@server ~]# lvcreate -L 10G -n ocr_vd3 iscsi_store
Logical volume "ocr_vd3" created
[root@server ~]# lvcreate -L 50G -n data iscsi_store
Logical volume "data" created
[root@server ~]# lvcreate -l 5119 -n arch iscsi_store
Logical volume "arch" created
 
 
[root@server ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 500M 0 part /boot
├─sda2 8:2 0 2G 0 part [SWAP]
└─sda3 8:3 0 97.5G 0 part /
sdb 8:16 0 100G 0 disk
├─iscsi_store-ocr_vd1 (dm-0) 253:0 0 10G 0 lvm
├─iscsi_store-ocr_vd2 (dm-1) 253:1 0 10G 0 lvm
├─iscsi_store-ocr_vd3 (dm-2) 253:2 0 10G 0 lvm
├─iscsi_store-data (dm-3) 253:3 0 50G 0 lvm
└─iscsi_store-arch (dm-4) 253:4 0 20G 0 lvm
sr0 11:0 1 3.5G 0 rom /mnt/sr0
 
yum install scsi-target-utils -y
 
service tgtd start
 
chkconfig tgtd on
 
chkconfig --list | grep tgtd
 
vi /etc/tgt/targets.conf
<target iqn.2019-11.com.example.server:testtarget>
backing-store /dev/iscsi_store/ocr_vd1
backing-store /dev/iscsi_store/ocr_vd2
backing-store /dev/iscsi_store/ocr_vd3
backing-store /dev/iscsi_store/data
backing-store /dev/iscsi_store/arch
</target>
 
[root@server ~]# service tgtd restart
Stopping SCSI target daemon: [ OK ]
Starting SCSI target daemon: [ OK ]
 
[root@server ~]# tgt-admin --show
Target 1: iqn.2019-11.com.example.server:testtarget
System information:
Driver: iscsi
State: ready
I_T nexus information:
LUN information:
LUN: 0
Type: controller
SCSI ID: IET 00010000
SCSI SN: beaf10
Size: 0 MB, Block size: 1
Online: Yes
Removable media: No
Prevent removal: No
Readonly: No
Backing store type: null
Backing store path: None
Backing store flags:
LUN: 1
Type: disk
SCSI ID: IET 00010001
SCSI SN: beaf11
Size: 21471 MB, Block size: 512
Online: Yes
Removable media: No
Prevent removal: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/iscsi_store/arch
Backing store flags:
LUN: 2
Type: disk
SCSI ID: IET 00010002
SCSI SN: beaf12
Size: 53687 MB, Block size: 512
Online: Yes
Removable media: No
Prevent removal: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/iscsi_store/data
Backing store flags:
LUN: 3
Type: disk
SCSI ID: IET 00010003
SCSI SN: beaf13
Size: 10737 MB, Block size: 512
Online: Yes
Removable media: No
Prevent removal: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/iscsi_store/ocr_vd1
Backing store flags:
LUN: 4
Type: disk
SCSI ID: IET 00010004
SCSI SN: beaf14
Size: 10737 MB, Block size: 512
Online: Yes
Removable media: No
Prevent removal: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/iscsi_store/ocr_vd2
Backing store flags:
LUN: 5
Type: disk
SCSI ID: IET 00010005
SCSI SN: beaf15
Size: 10737 MB, Block size: 512
Online: Yes
Removable media: No
Prevent removal: No
Readonly: No
Backing store type: rdwr
Backing store path: /dev/iscsi_store/ocr_vd3
Backing store flags:
Account information:
ACL information:
192.168.12.91
192.168.12.92
 
 
 
客户端(192.168.12.20、192.168.12.21)
RACDB1:
[root@racdb1 ~]# yum install iscsi-initiator-utils -y
[root@racdb1 ~]# chkconfig --list|grep scsi
iscsi 0:off 1:off 2:off 3:on 4:on 5:on 6:off
iscsid 0:off 1:off 2:off 3:on 4:on 5:on 6:off
[root@racdb1 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.12.30
192.168.12.30:3260,1 iqn.2019-11.com.example.server:testtarget
[root@racdb1 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.12.40
192.168.12.40:3260,1 iqn.2019-11.com.example.server:testtarget
[root@racdb1 ~]# iscsiadm -m node -T iqn.2019-11.com.example.server:testtarget --login
Logging in to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.30,3260] (multiple)
Logging in to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.40,3260] (multiple)
Login to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.30,3260] successful.
Login to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.40,3260] successful.
[root@racdb1 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 500M 0 part /boot
├─sda2 8:2 0 2G 0 part [SWAP]
└─sda3 8:3 0 97.5G 0 part /
sr0 11:0 1 3.5G 0 rom /mnt/sr0
sdb 8:16 0 20G 0 disk
sdc 8:32 0 50G 0 disk
sdd 8:48 0 20G 0 disk
sdf 8:80 0 50G 0 disk
sdg 8:96 0 10G 0 disk
sde 8:64 0 10G 0 disk
sdh 8:112 0 10G 0 disk
sdi 8:128 0 10G 0 disk
sdj 8:144 0 10G 0 disk
sdk 8:160 0 10G 0 disk
[root@racdb1 ~]# iscsiadm -m node
192.168.12.30:3260,1 iqn.2019-11.com.example.server:testtarget
192.168.12.40:3260,1 iqn.2019-11.com.example.server:testtarget
[root@racdb1 ~]# ls -lrt /var/lib/iscsi/nodes/
total 4
drw------- 4 root root 4096 Nov 29 06:09 iqn.2019-11.com.example.server:testtarget
 
RACDB2:
[root@racdb2 ~]# yum install iscsi-initiator-utils -y
[root@racdb2 ~]# chkconfig --list | grep scsi
iscsi 0:off 1:off 2:off 3:on 4:on 5:on 6:off
iscsid 0:off 1:off 2:off 3:on 4:on 5:on 6:off
[root@racdb2 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.12.30
192.168.12.30:3260,1 iqn.2019-11.com.example.server:testtarget
[root@racdb2 ~]# iscsiadm -m discovery -t sendtargets -p 192.168.12.40
192.168.12.40:3260,1 iqn.2019-11.com.example.server:testtarget
[root@racdb2 ~]# iscsiadm -m node -T iqn.2019-11.com.example.server:testtarget --login
Logging in to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.30,3260] (multiple)
Logging in to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.40,3260] (multiple)
Login to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.30,3260] successful.
Login to [iface: default, target: iqn.2019-11.com.example.server:testtarget, portal: 192.168.12.40,3260] successful.
[root@racdb2 ~]# lsblk
NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
sda 8:0 0 100G 0 disk
├─sda1 8:1 0 500M 0 part /boot
├─sda2 8:2 0 2G 0 part [SWAP]
└─sda3 8:3 0 97.5G 0 part /
sr0 11:0 1 3.5G 0 rom /mnt/sr0
sdb 8:16 0 20G 0 disk
sdc 8:32 0 50G 0 disk
sdd 8:48 0 20G 0 disk
sde 8:64 0 10G 0 disk
sdg 8:96 0 10G 0 disk
sdi 8:128 0 10G 0 disk
sdh 8:112 0 10G 0 disk
sdf 8:80 0 50G 0 disk
sdj 8:144 0 10G 0 disk
sdk 8:160 0 10G 0 disk
[root@racdb2 ~]# iscsiadm -m node
192.168.12.30:3260,1 iqn.2019-11.com.example.server:testtarget
192.168.12.40:3260,1 iqn.2019-11.com.example.server:testtarget
[root@racdb2 ~]# ls -lrt /var/lib/iscsi/nodes/
total 4
drw------- 4 root root 4096 Nov 29 14:25 iqn.2019-11.com.example.server:testtarget
 
 
多路径配置,设置开机启动:
[root@racdb1 ~]# rpm -qa | grep device-mapper-multipath
device-mapper-multipath-libs-0.4.9-64.el6.x86_64
device-mapper-multipath-0.4.9-64.el6.x86_64
[root@racdb1 ~]# chkconfig multipathd on
[root@racdb1 ~]# chkconfig --list|grep multipathd
multipathd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
 
[root@racdb2 ~]# rpm -qa | grep device-mapper-multipath
device-mapper-multipath-libs-0.4.9-64.el6.x86_64
device-mapper-multipath-0.4.9-64.el6.x86_64
[root@racdb2 ~]# chkconfig multipathd on
[root@racdb2 ~]# chkconfig --list|grep multipathd
multipathd 0:off 1:off 2:on 3:on 4:on 5:on 6:off
 
生成多路径配置文件
[root@racdb1 ~]# /sbin/mpathconf --enable
[root@racdb2 ~]# /sbin/mpathconf --enable
 
启动服务
[root@racdb1 ~]# service multipathd status
multipathd is stopped
[root@racdb1 ~]# service multipathd start
Starting multipathd daemon: [ OK ]
 
[root@racdb2 ~]# service multipathd status
multipathd is stopped
[root@racdb2 ~]# service multipathd start
Starting multipathd daemon: [ OK ]
 
[root@racdb1 ~]# multipath -ll
mpathe (1IET 00010005) dm-4 IET,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:5 sdi 8:128 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:5 sdk 8:160 active ready running
mpathd (1IET 00010004) dm-3 IET,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:4 sdg 8:96 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:4 sdj 8:144 active ready running
mpathc (1IET 00010003) dm-2 IET,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:3 sde 8:64 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:3 sdh 8:112 active ready running
mpathb (1IET 00010002) dm-1 IET,VIRTUAL-DISK
size=50G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:2 sdc 8:32 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:2 sdf 8:80 active ready running
mpatha (1IET 00010001) dm-0 IET,VIRTUAL-DISK
size=20G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:1 sdb 8:16 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:1 sdd 8:48 active ready running
 
[root@racdb2 ~]# multipath -ll
mpathe (1IET 00010005) dm-4 IET,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 33:0:0:5 sdk 8:160 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 34:0:0:5 sdj 8:144 active ready running
mpathd (1IET 00010003) dm-3 IET,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 33:0:0:3 sdg 8:96 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 34:0:0:3 sdf 8:80 active ready running
mpathc (1IET 00010004) dm-2 IET,VIRTUAL-DISK
size=10G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:4 sdh 8:112 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:4 sdi 8:128 active ready running
mpathb (1IET 00010002) dm-1 IET,VIRTUAL-DISK
size=50G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:2 sdc 8:32 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:2 sde 8:64 active ready running
mpatha (1IET 00010001) dm-0 IET,VIRTUAL-DISK
size=20G features='0' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=1 status=active
| `- 34:0:0:1 sdb 8:16 active ready running
`-+- policy='round-robin 0' prio=1 status=enabled
`- 33:0:0:1 sdd 8:48 active ready running
 
 
 
修改多路径配置文件
multipaths {
multipath {
wwid 1IET_00010001
alias arch
path_grouping_policy failover
}
multipath {
wwid 1IET_00010002
alias data
path_grouping_policy failover
}
multipath {
wwid 1IET_00010003
alias ocr_vd1
path_grouping_policy failover
}
multipath {
wwid 1IET_00010004
alias ocr_vd2
path_grouping_policy failover
}
multipath {
wwid 1IET_00010005
alias ocr_vd3
path_grouping_policy failover
}
}
 
 
 
 
udev绑定磁盘:
[root@racdb1 /]# vi /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-1IET\x20\x20\x20\x20\x2000010005",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-1IET\x20\x20\x20\x20\x2000010001",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-1IET\x20\x20\x20\x20\x2000010003",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-1IET\x20\x20\x20\x20\x2000010004",OWNER="grid",GROUP="asmadmin",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-1IET\x20\x20\x20\x20\x2000010002",OWNER="grid",GROUP="asmadmin",MODE="0660"
 
同样配置节点二udev配置文件。
 
[root@racdb1 ~]# ls -lrt /dev/dm-*
brw-rw---- 1 grid asmadmin 253, 3 Dec 2 17:55 /dev/dm-3
brw-rw---- 1 grid asmadmin 253, 0 Dec 2 17:55 /dev/dm-0
brw-rw---- 1 grid asmadmin 253, 4 Dec 2 17:55 /dev/dm-4
brw-rw---- 1 grid asmadmin 253, 1 Dec 2 17:55 /dev/dm-1
brw-rw---- 1 grid asmadmin 253, 2 Dec 2 17:55 /dev/dm-2
 
[root@racdb2 ~]# ls -lrt /dev/dm-*
brw-rw---- 1 grid asmadmin 253, 2 Dec 3 10:53 /dev/dm-2
brw-rw---- 1 grid asmadmin 253, 4 Dec 3 10:53 /dev/dm-4
brw-rw---- 1 grid asmadmin 253, 1 Dec 3 10:53 /dev/dm-1
brw-rw---- 1 grid asmadmin 253, 0 Dec 3 10:53 /dev/dm-0
brw-rw---- 1 grid asmadmin 253, 3 Dec 3 10:53 /dev/dm-3
 
[root@racdb1 ~]# ls -lrt /dev/mapper/*
lrwxrwxrwx 1 root root 7 Dec 1 23:51 /dev/mapper/mpathd -> ../dm-4
lrwxrwxrwx 1 root root 7 Dec 1 23:51 /dev/mapper/mpathc -> ../dm-1
lrwxrwxrwx 1 root root 7 Dec 1 23:51 /dev/mapper/mpathb -> ../dm-0
lrwxrwxrwx 1 root root 7 Dec 1 23:51 /dev/mapper/mpatha -> ../dm-3
lrwxrwxrwx 1 root root 7 Dec 1 23:51 /dev/mapper/mpathe -> ../dm-2
 
[root@racdb2 ~]# ls -lrt /dev/mapper/*
lrwxrwxrwx 1 root root 7 Dec 2 19:07 /dev/mapper/mpathe -> ../dm-2
lrwxrwxrwx 1 root root 7 Dec 2 19:07 /dev/mapper/mpathb -> ../dm-3
lrwxrwxrwx 1 root root 7 Dec 2 19:07 /dev/mapper/mpathd -> ../dm-1
lrwxrwxrwx 1 root root 7 Dec 2 19:07 /dev/mapper/mpatha -> ../dm-0
lrwxrwxrwx 1 root root 7 Dec 2 19:07 /dev/mapper/mpathc -> ../dm-4
 
上面多路径已经将磁盘设置别名,不知道为何/dev/mapper/名字没有改变,猜测是虚拟机磁盘UUID的问题。
 

posted @ 2019-12-03 11:00  orcl  阅读(712)  评论(0编辑  收藏  举报