RHEL6.8 Oracle RAC 11g R2 ASM磁盘在线扩容
RHEL6.8 Oracle RAC 11g R2 ASM磁盘在线扩容
1.使两个节点从OS级别识别到新添加的LUN,dm-11、dm-12为新识别的LUN
多路径配置
cat /etc/multipath.conf
defaults {
polling_interval 10
user_friendly_names no
find_multipaths yes
}
devices {
device {
vendor "IBM"
product "2145"
path_grouping_policy "group_by_prio"
path_selector "round-robin 0"
# path_selector "service-time 0" # Used by Red Hat 7.x
prio "alua"
path_checker "tur"
failback "immediate"
no_path_retry 5
rr_weight uniform
rr_min_io_rq "1"
dev_loss_tmo 120
}
}
multipaths {
multipath {
wwid 360050768018107c75800000000000095
alias mpatha
}
multipath {
wwid 360050768018107c75800000000000094
alias mpathb
}
multipath {
wwid 360050768018107c7580000000000009e
alias mpathc
}
multipath {
wwid 360050768018107c758000000000000ae
alias mpathd
}
multipath {
wwid 360050768018107c758000000000000ef
alias mpathe
}
multipath {
wwid 360050768018107c758000000000000f0
alias mpathf
}
}
# multipath -ll
mpathd (360050768018107c758000000000000ae) dm-5 IBM,2145
size=300G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 1:0:1:3 sdg 8:96 active ready running
| `- 4:0:1:3 sdi 8:128 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 1:0:3:3 sdo 8:224 active ready running
`- 4:0:3:3 sdq 65:0 active ready running
mpathc (360050768018107c7580000000000009e) dm-4 IBM,2145
size=300G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 1:0:3:2 sdm 8:192 active ready running
| `- 4:0:3:2 sdp 8:240 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 4:0:1:2 sdh 8:112 active ready running
`- 1:0:1:2 sde 8:64 active ready running
mpathf (360050768018107c758000000000000f0) dm-12 IBM,2145
size=300G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 1:0:3:5 sdu 65:64 active ready running
| `- 4:0:3:5 sdy 65:128 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 1:0:1:5 sds 65:32 active ready running
`- 4:0:1:5 sdw 65:96 active ready running
mpathb (360050768018107c75800000000000094) dm-2 IBM,2145
size=1.0G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 1:0:3:0 sdj 8:144 active ready running
| `- 4:0:3:0 sdk 8:160 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 1:0:1:0 sdb 8:16 active ready running
`- 4:0:1:0 sdc 8:32 active ready running
mpathe (360050768018107c758000000000000ef) dm-11 IBM,2145
size=300G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 1:0:1:4 sdr 65:16 active ready running
| `- 4:0:1:4 sdv 65:80 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 1:0:3:4 sdt 65:48 active ready running
`- 4:0:3:4 sdx 65:112 active ready running
mpatha (360050768018107c75800000000000095) dm-3 IBM,2145
size=300G features='1 queue_if_no_path' hwhandler='0' wp=rw
|-+- policy='round-robin 0' prio=50 status=active
| |- 1:0:1:1 sdd 8:48 active ready running
| `- 4:0:1:1 sdf 8:80 active ready running
`-+- policy='round-robin 0' prio=10 status=enabled
|- 1:0:3:1 sdl 8:176 active ready running
`- 4:0:3:1 sdn 8:208 active ready running
存储lun
ll /dev/dm-*
brw-rw---- 1 root disk 253, 0 Feb 3 20:06 /dev/dm-0
brw-rw---- 1 root disk 253, 1 Feb 3 20:06 /dev/dm-1
brw-rw---- 1 root disk 253, 10 Feb 3 20:06 /dev/dm-10
brw-rw---- 1 root disk 253, 11 Feb 3 22:06 /dev/dm-11
brw-rw---- 1 root disk 253, 12 Feb 3 22:06 /dev/dm-12
brw-rw---- 1 grid asmadmin 253, 2 Feb 3 22:06 /dev/dm-2
brw-rw---- 1 grid asmadmin 253, 3 Feb 3 22:06 /dev/dm-3
brw-rw---- 1 grid asmadmin 253, 4 Feb 3 22:06 /dev/dm-4
brw-rw---- 1 grid asmadmin 253, 5 Feb 3 22:06 /dev/dm-5
ll /dev/mapper/*
lrwxrwxrwx 1 root root 8 Feb 3 22:03 /dev/mapper/mpathe -> ../dm-11
lrwxrwxrwx 1 root root 8 Feb 3 22:03 /dev/mapper/mpathf -> ../dm-12
lrwxrwxrwx 1 root root 7 Feb 3 22:03 /dev/mapper/mpatha -> ../dm-3
lrwxrwxrwx 1 root root 7 Feb 3 22:01 /dev/mapper/mpathb -> ../dm-2
lrwxrwxrwx 1 root root 7 Feb 3 22:01 /dev/mapper/mpathc -> ../dm-4
lrwxrwxrwx 1 root root 7 Feb 3 22:03 /dev/mapper/mpathd -> ../dm-5
2. 将新识别的dm-11、dm-12的属主设置为grid,两个节点都要设置
chown grid:asmadmin /dev/dm-11
chown grid:asmadmin /dev/dm-12
3.绑定设备,修改udev的rules文件/etc/udev/rules.d/12-dm-permissions.rules
(不同系统文件名字会有不同),添加mpathc的一行,两个节点都要操作
[grid@tkhoadb2 ~]$ cat /etc/udev/rules.d/12-dm-permissions.rules
ENV{DM_NAME}=="mpatha", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="asm-$env{DM_NAME}"
ENV{DM_NAME}=="mpathb", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="asm-$env{DM_NAME}"
ENV{DM_NAME}=="mpathc", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="asm-$env{DM_NAME}"
ENV{DM_NAME}=="mpathe", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="asm-$env{DM_NAME}"
ENV{DM_NAME}=="mpathf", OWNER:="grid", GROUP:="asmadmin", MODE:="660", SYMLINK+="asm-$env{DM_NAME}"
4.分别在RAC的两个节点,载入配置文件,使其规则生效
/sbin/udevadm control --reload-rules
/sbin/udevadm trigger --type=devices --action=change检查磁盘是否属组正确
*注意:业务运行期间,禁止通过执行/sbin/start_udev使规则生效!
5.为磁盘组在线新增磁盘
若新建磁盘组,可运行ASMCA命令,创建DG,即可识别新添加的disk,选择CREATE DG.
或者create diskgroup DATA2 external redundancy disk '/dev/mapper/mpathe';
grid用户登陆ASM实例
[grid@tkhoadb2 ~]$ sqlplus / as sysasm
5.1确认两个节点都能够识别
select name, path, mode_status, state, disk_number,failgroup from v$asm_disk
NAME PATH MODE_STATUS STATE DISK_NUMBER FAILGROUP
-------------------- -------------------------------------------------- -------------- ---------------- -----------
/dev/mapper/mpathf ONLINE NORMAL 0
/dev/mapper/mpathe ONLINE NORMAL 1
DATA_0001 /dev/mapper/mpathd ONLINE NORMAL 1 DATA_0001
OCR_0000 /dev/mapper/mpathb ONLINE NORMAL 0 OCR_0000
ARCH_0000 /dev/mapper/mpathc ONLINE NORMAL 0 ARCH_0000
DATA_0000 /dev/mapper/mpatha ONLINE NORMAL 0 DATA_0000
6 rows selected.
磁盘组大小
SQL> select GROUP_NUMBER,NAME,TOTAL_MB,FREE_MB from v$asm_diskgroup;
GROUP_NUMBER NAME TOTAL_MB FREE_MB
------------ -------------------- ---------- ----------
1 OCR 1024 628
2 DATA 921600 425113
3 ARCH 307200 294439
5.2 磁盘组扩容
alter diskgroup DATA add disk '/dev/mapper/mpathe';
alter diskgroup DATA add disk '/dev/mapper/mpathf';
SQL> select GROUP_NUMBER,NAME,TOTAL_MB,FREE_MB from v$asm_diskgroup;
GROUP_NUMBER NAME TOTAL_MB FREE_MB
------------ -------------------- ---------- ----------
1 OCR 1024 628
2 DATA 1228800 732310
3 ARCH 307200 294439
ASM磁盘进行Rebalance
SQL> select * from v$asm_operation;
GROUP_NUMBER OPERATION STATE POWER ACTUAL SOFAR EST_WORK EST_RATE EST_MINUTES ERROR_CODE
------------ ---------- -------- ---------- ---------- ---------- ---------- ---------- ----------- ----------------------------------------------------------------------------------------
2 REBAL RUN 1 1 192 246571 1953 126