centos 7.4 + udev + 12.2.0.1 + gi + asm+ rac 双节点安装
vm: oracle vm 5.2.8 r121009 (Qt5.6.2)
os: centos7.4
db: oracle 12.2.0.1 + gi + asm
ip 规划
nodea nodeb
pub 192.168.56.100 192.168.56.200
priv 192.168.165.100 192.168.165.200
vip 192.168.56.101 192.168.56.201
scan 192.168.56.110
step 1:准备工作
创建两台虚拟机
centos74_12.2.0.1_rac1
centos74_12.2.0.1_rac2
oracle越来越大,越来越耗内存,本次的配置为 2cpu+6G
创建共享磁盘,分配给两台虚拟机
oracle virtualbox创建共享磁盘:
进入 E:\oraclevm\centos74_rac1_rac2_sharedisk文件夹
1、在目录下创建共享磁盘,大小为10g、10g、40g
D:\Oracle\VirtualBox\VBoxManage createmedium disk --filename share01.vdi --size 10000 --format VDI --variant Fixed
Medium created. UUID: 204e4375-b6b2-4b93-9fb1-f6eb823829fd
D:\Oracle\VirtualBox\VBoxManage createmedium disk --filename share02.vdi --size 10000 --format VDI --variant Fixed
Medium created. UUID: a364d619-a7ca-4c18-abb7-4c2a43aa5f91
D:\Oracle\VirtualBox\VBoxManage createmedium disk --filename share03.vdi --size 40000 --format VDI --variant Fixed
Medium created. UUID: 8134c529-5168-4c47-97d7-48ba1078d940
2、给虚拟机(区别大小写)添加共享磁盘
D:\Oracle\VirtualBox\VBoxManage storageattach centos74_12.2.0.1_rac1 --storagectl SATA --port 1 --device 0 --type hdd --medium 204e4375-b6b2-4b93-9fb1-f6eb823829fd --mtype shareable
D:\Oracle\VirtualBox\VBoxManage storageattach centos74_12.2.0.1_rac2 --storagectl SATA --port 1 --device 0 --type hdd --medium 204e4375-b6b2-4b93-9fb1-f6eb823829fd --mtype shareable
D:\Oracle\VirtualBox\VBoxManage storageattach centos74_12.2.0.1_rac1 --storagectl SATA --port 2 --device 0 --type hdd --medium a364d619-a7ca-4c18-abb7-4c2a43aa5f91 --mtype shareable
D:\Oracle\VirtualBox\VBoxManage storageattach centos74_12.2.0.1_rac2 --storagectl SATA --port 2 --device 0 --type hdd --medium a364d619-a7ca-4c18-abb7-4c2a43aa5f91 --mtype shareable
D:\Oracle\VirtualBox\VBoxManage storageattach centos74_12.2.0.1_rac1 --storagectl SATA --port 3 --device 0 --type hdd --medium 8134c529-5168-4c47-97d7-48ba1078d940 --mtype shareable
D:\Oracle\VirtualBox\VBoxManage storageattach centos74_12.2.0.1_rac2 --storagectl SATA --port 3 --device 0 --type hdd --medium 8134c529-5168-4c47-97d7-48ba1078d940 --mtype shareable
–storagectl “SATA” :在虚拟机看到的名称,如果是英文版的,可能名称不一样
–port 1 :第一个SATA接口
–device 0 :IDE接口的辅盘,主盘为0
3、启用共享磁盘:
D:\Oracle\VirtualBox\VBoxManage modifymedium disk 204e4375-b6b2-4b93-9fb1-f6eb823829fd --type shareable
D:\Oracle\VirtualBox\VBoxManage modifymedium disk a364d619-a7ca-4c18-abb7-4c2a43aa5f91 --type shareable
D:\Oracle\VirtualBox\VBoxManage modifymedium disk 8134c529-5168-4c47-97d7-48ba1078d940 --type shareable
nodea、nodeb配置网络
[root@nodea ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:4d:bc:9c brd ff:ff:ff:ff:ff:ff
inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic enp0s3
valid_lft 86049sec preferred_lft 86049sec
inet6 fe80::b61e:73ca:34d6:20ad/64 scope link
valid_lft forever preferred_lft forever
3: enp0s8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:9b:3a:a5 brd ff:ff:ff:ff:ff:ff
inet 192.168.56.100/24 brd 192.168.56.255 scope global enp0s8
valid_lft forever preferred_lft forever
inet6 fe80::2668:a979:15c:d0aa/64 scope link
valid_lft forever preferred_lft forever
4: enp0s9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:d6:e8:2e brd ff:ff:ff:ff:ff:ff
inet 192.168.165.100/24 brd 192.168.165.255 scope global enp0s9
valid_lft forever preferred_lft forever
inet6 fe80::d3e9:3afe:3525:7573/64 scope link
valid_lft forever preferred_lft forever
[root@nodeb ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN qlen 1
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: enp0s3: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:91:f3:46 brd ff:ff:ff:ff:ff:ff
inet 10.0.2.15/24 brd 10.0.2.255 scope global dynamic enp0s3
valid_lft 85943sec preferred_lft 85943sec
inet6 fe80::7db3:6c82:36a3:f295/64 scope link
valid_lft forever preferred_lft forever
3: enp0s8: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:ea:5e:58 brd ff:ff:ff:ff:ff:ff
inet 192.168.56.200/24 brd 192.168.56.255 scope global enp0s8
valid_lft forever preferred_lft forever
inet6 fe80::b62d:2715:66a3:d819/64 scope link
valid_lft forever preferred_lft forever
4: enp0s9: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000
link/ether 08:00:27:eb:82:db brd ff:ff:ff:ff:ff:ff
inet 192.168.165.200/24 brd 192.168.165.255 scope global enp0s9
valid_lft forever preferred_lft forever
inet6 fe80::e89d:cdaa:ec18:29e9/64 scope link
valid_lft forever preferred_lft forever
删除多个默认网关
# route del default enp0s8
# route del default enp0s9
nodea、nodeb修改/etc/hosts
# vi /etc/hosts
192.168.56.100 nodea
192.168.56.200 nodeb
192.168.165.100 nodea-priv
192.168.165.200 nodeb-priv
192.168.56.101 nodea-vip
192.168.56.201 nodeb-vip
192.168.56.110 node-scan
nodea、nodeb修改主机名
# vi /etc/hostname
orada
# vi /etc/hostname
oradb
nodea、nodeb检查 shm 和 swap
shm 是oracle 的amm需要的
# df -h /dev/shm
# free -m
# vi /etc/fstab
tmpfs /dev/shm tmpfs defaults,size=3072m 0 0
关闭selinux:
临时修改:
set enforce 0
永久修改:
vi /etc/selinux/config
SELINUX=DISABLED
vi /etc/sysconfig/selinux
SELINUX=DISABLED
查看两个文件是ln -s 的关系:
ls -l /etc/sysconfig/selinux
/etc/sysconfig/selinux -> ../selinux/config
nodea、nodeb修改内核参数
vi /etc/sysctl.conf
# sysctl settings are defined through files in
# /usr/lib/sysctl.d/, /run/sysctl.d/, and /etc/sysctl.d/.
#
# Vendors settings live in /usr/lib/sysctl.d/.
# To override a whole file, create a new file with the same in
# /etc/sysctl.d/ and put new settings there. To override
# only specific settings, add a file with a lexically later
# name in /etc/sysctl.d/ and put new settings there.
#
# For more information, see sysctl.conf(5) and sysctl.d(5).
#ORACLE SETTING
#kernel.shmall = 2097152
#kernel.shmmax = 536870912 # Bytes
kernel.shmmni = 4096
fs.aio-max-nr = 1048576
fs.file-max = 68116544
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
立即生效
sysctl -p
# lsipc
RESOURCE DESCRIPTION LIMIT USED USE%
MSGMNI Number of message queues 6013 0 0.00%
MSGMAX Max size of message (bytes) 8192 - -
MSGMNB Default max size of queue (bytes) 16384 - -
SHMMNI Shared memory segments 4096 0 0.00%
SHMALL Shared memory pages 18446744073692774399 0 0.00%
SHMMAX Max size of shared memory segment (bytes) 18446744073692774399 - -
SHMMIN Min size of shared memory segment (bytes) 1 - -
SEMMNI Number of semaphore identifiers 128 0 0.00%
SEMMNS Total number of semaphores 32000 0 0.00%
SEMMSL Max semaphores per semaphore set. 250 - -
SEMOPM Max number of operations per semop(2) 100 - -
SEMVMX Semaphore max value 32767 - -
# sysctl -a |grep -i shm
kernel.shm_next_id = -1
kernel.shm_rmid_forced = 0
kernel.shmall = 18446744073692774399
kernel.shmmax = 18446744073692774399
kernel.shmmni = 4096
vm.hugetlb_shm_group = 0
# sysctl -a |grep -i sem
kernel.sem = 250 32000 100 128
kernel.sem_next_id = -1
vi /etc/pam.d/login
#ORACLE SETTING
session required /lib64/security/pam_limits.so
nodea、nodeb禁用linux 的透明大页
# vi /etc/rc.local
echo 'never' > /sys/kernel/mm/transparent_hugepage/enabled
echo 'never' > /sys/kernel/mm/transparent_hugepage/defrag
或者
# vi /etc/default/grub
transparent_hugepage=never
nodea、nodeb禁用numba
vi /etc/default/grub
numa=off
nodea、nodeb修改io scheduler
vi /etc/default/grub
elevator=deadline
nodea、nodeb执行下面命令,生成新的grub
grub2-mkconfig -o /boot/grub2/grub.cfg
nodea、nodeb 使用udev绑定硬盘,参考另外一篇博客
获取RESULT,在 Linux 7下,可以使用如下命令:
/usr/lib/udev/scsi_id -g -u /dev/sdb
1ATA_VBOX_HARDDISK_VB204e4375-fd293882
/usr/lib/udev/scsi_id -g -u /dev/sdc
1ATA_VBOX_HARDDISK_VBa364d619-915faa43
/usr/lib/udev/scsi_id -g -u /dev/sdd
1ATA_VBOX_HARDDISK_VB8134c529-40d97810
vi /etc/udev/rules.d/99-oracle-asmdevices.rules
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB204e4375-fd293882", SYMLINK+="asm-diskb", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VBa364d619-915faa43", SYMLINK+="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", SUBSYSTEM=="block", PROGRAM=="/usr/lib/udev/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="1ATA_VBOX_HARDDISK_VB8134c529-40d97810", SYMLINK+="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
重新加载分区
/sbin/partprobe /dev/sdb
/sbin/partprobe /dev/sdc
/sbin/partprobe /dev/sdd
用udevadm进行测试
udevadm test /sys/block/sdb
udevadm info --query=all --path=/sys/block/sdb
udevadm info --query=all --name=asm-diskb
启动udev
/usr/sbin/udevadm control --reload-rules
systemctl status systemd-udevd.service
systemctl enable systemd-udevd.service
检查设备是否正确绑定
# ls -l /dev/asm* /dev/sdb /dev/sdc /dev/sdd
lrwxrwxrwx 1 root root 3 Mar 20 21:58 /dev/asm-diskb -> sdb
lrwxrwxrwx 1 root root 3 Mar 20 22:05 /dev/asm-diskc -> sdc
lrwxrwxrwx 1 root root 3 Mar 20 22:05 /dev/asm-diskd -> sdd
brw-rw---- 1 grid asmadmin 8, 16 Mar 20 21:58 /dev/sdb
brw-rw---- 1 grid asmadmin 8, 32 Mar 20 21:58 /dev/sdc
brw-rw---- 1 grid asmadmin 8, 48 Mar 20 22:05 /dev/sdd
修改 resolv.conf
# mv /etc/resolv.conf /etc/resolv.conf.20180320
禁用 zeroconf
# vi /etc/sysconfig/network
NOZEROCONF=yes
创建用户组
groupadd -g 10000 oinstall
groupadd -g 10001 dba
groupadd -g 10002 oper
groupadd -g 10003 asmadmin
groupadd -g 10004 asmdba
groupadd -g 10005 asmoper
groupadd -g 10006 backupdba
groupadd -g 10007 dgdba
groupadd -g 10008 kmdba
groupadd -g 10009 racdba
创建用户
useradd -u 10000 -g oinstall -G dba,oper,backupdba,dgdba,kmdba,asmdba,racdba oracle
useradd -u 10001 -g oinstall -G dba,asmadmin,asmdba,asmoper,racdba grid
id oracle
uid=10000(oracle)
gid=10000(oinstall)
groups=10000(oinstall),
10001(dba),
10002(oper),
10004(asmdba),
10006(backupdba),
10007(dgdba),
10008(kmdba),
10009(racdba)
id grid
uid=10001(grid)
gid=10000(oinstall)
groups=10000(oinstall),
10001(dba),
10003(asmadmin),
10004(asmdba),
10005(asmoper),
10009(racdba)
passwd oracle
passwd grid
创建目录
mkdir -p /u01/app/gridbase/12.2.0/grid_1
mkdir -p /u01/app/grid/product/12.2.0/grid_1
mkdir -p /u01/app/oracle/product/12.2.0/db_1
chown -R grid:oinstall /u01
chown -R grid:oinstall /u01/app/gridbase
chown -R grid:oinstall /u01/app/grid
chown -R oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/
grid 用户环境变量
vi /home/grid/.bash_profile
PS1="[`whoami`@`hostname`:"'$PWD]$'
umask 022
export ORACLE_BASE=/u01/app/gridbase/12.2.0/grid_1
export ORACLE_HOME=/u01/app/grid/product/12.2.0/grid_1
export ORACLE_SID=+ASM1
#export ORACLE_SID=+ASM2
export NLS_DATE_FORMAT='yyyy-mm-dd hh24:mi:ss'
export TNS_ADMIN=$ORACLE_HOME/network/admin
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export PATH=$ORACLE_HOME/bin:$PATH
export TEMP=/tmp
export TMP=/tmp
export TMPDIR=/tmp
oracle 用户环境变量
vi /home/oracle/.bash_profile
PS1="[`whoami`@`hostname`:"'$PWD]$'
umask 022
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/12.2.0/db_1
export ORACLE_SID=rac01
#export ORACLE_SID=rac02
export TNS_ADMIN=/u01/app/grid/product/12.2.0/grid_1/network/admin
#export ORA_NLS10=$ORACLE_HOME/nls/data
export NLS_LANG=AMERICAN_AMERICA.AL32UTF8
export NLS_DATE_FORMAT='yyyy-mm-dd hh24:mi:ss'
export PATH=.:${PATH}:$HOME/bin:$ORACLE_HOME/bin
export PATH=${PATH}:/usr/bin:/bin:/usr/bin/X11:/usr/local/bin
export PATH=${PATH}:$ORACLE_BASE/common/oracle/bin
export LD_LIBRARY_PATH=$ORACLE_HOME/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:$ORACLE_HOME/oracm/lib
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/lib:/usr/lib:/usr/local/lib
export CLASSPATH=$ORACLE_HOME/JRE
export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/jlib
export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/rdbms/jlib
export CLASSPATH=${CLASSPATH}:$ORACLE_HOME/network/jlib
export TEMP=/tmp
export TMP=/tmp
export TMPDIR=/tmp
修改资源限制
vi /etc/security/limits.conf
#ORACLE SETTING
grid soft nproc 2047
grid hard nproc 65536
grid soft nofile 1024
grid hard nofile 65536
grid soft stack 10240
grid hard stack 32768
#at least 90 percent of the current RAM when HugePages memory is enabled and
#at least 3145728 KB (3 GB) when HugePages memory is disabled
grid soft memlock unlimited
grid hard memlock unlimited
oracle soft nproc 2047
oracle hard nproc 65536
oracle soft nofile 1024
oracle hard nofile 65536
oracle soft stack 10240
oracle hard stack 32768
#at least 90 percent of the current RAM when HugePages memory is enabled and
#at least 3145728 KB (3 GB) when HugePages memory is disabled
oracle soft memlock unlimited
oracle hard memlock unlimited
yum 安装12.2.0.1 的依赖包
The following packages (or later versions) must be installed:
binutils-2.23.52.0.1-12.el7 (x86_64)
compat-libcap1-1.10-3.el7 (x86_64)
compat-libstdc++-33-3.2.3-71.el7 (i686)
compat-libstdc++-33-3.2.3-71.el7 (x86_64)
glibc-2.17-36.el7 (i686)
glibc-2.17-36.el7 (x86_64)
glibc-devel-2.17-36.el7 (i686)
glibc-devel-2.17-36.el7 (x86_64)
ksh
libaio-0.3.109-9.el7 (i686)
libaio-0.3.109-9.el7 (x86_64)
libaio-devel-0.3.109-9.el7 (i686)
libaio-devel-0.3.109-9.el7 (x86_64)
libgcc-4.8.2-3.el7 (i686)
libgcc-4.8.2-3.el7 (x86_64)
libstdc++-4.8.2-3.el7 (i686)
libstdc++-4.8.2-3.el7 (x86_64)
libstdc++-devel-4.8.2-3.el7 (i686)
libstdc++-devel-4.8.2-3.el7 (x86_64)
libxcb-1.9-5.el7 (i686)
libxcb-1.9-5.el7 (x86_64)
libX11-1.6.0-2.1.el7 (i686)
libX11-1.6.0-2.1.el7 (x86_64)
libXau-1.0.8-2.1.el7 (i686)
libXau-1.0.8-2.1.el7 (x86_64)
libXi-1.7.2-1.el7 (i686)
libXi-1.7.2-1.el7 (x86_64)
libXtst-1.2.2-1.el7 (i686)
libXtst-1.2.2-1.el7 (x86_64)
make-3.82-19.el7 (x86_64)
net-tools-2.0-0.17.20131004git.el7 (x86_64) (for Oracle RAC and Oracle Clusterware)
nfs-utils-1.3.0-0.21.el7.x86_64 (for Oracle ACFS)
smartmontools-6.2-4.el7 (x86_64)
sysstat-10.1.5-1.el7 (x86_64)
unixODBC-2.3.1 or later
yum install binutils compat-libcap1 compat-libstdc++-33 compat-libstdc++-33.i686 elfutils-libelf-devel gcc gcc-c++ glibc glibc*.i686 glibc-devel glibc-devel*.i686 ksh libgcc libgcc*.i686 libstdc++ libstdc++*.i686 libstdc++-devel libstdc++-devel*.i686 libaio libaio*.i686 libaio-devel libaio-devel*.i686 libXext libXext.i686 libXtst libXtst.i686 libX11 libX11.i686 libXau libXau.i686 libxcb libxcb.i686 libXi libXi.i686 make sysstat unixODBC unixODBC*.i686 unixODBC-devel unixODBC-devel*.i686 readline libtermcap-devel pdksh net-tools nfs-utils smartmontools libXp
禁止ntp服务
systemctl stop chronyd
systemctl disable chronyd
systemctl stop ntpd.service
systemctl disable ntpd.service
systemctl stop ntpdate.service
systemctl disable ntpdate.service
mv /etc/chrony.conf /etc/chrony.conf.bak
mv /etc/ntp.conf /etc/ntp.conf.bak
禁止avahi-daemon服务
avahi-daemon 服务会影响 oracle的多波通信,进而导致节点重启
因此,oracle环境下不能启用 avahi-daemon 服务
systemctl stop avahi-daemon.service
systemctl disable avahi-daemon.service
禁用其它一些服务
systemctl disable iptables.service
systemctl disable firewalld.service
systemctl disable cups.service
systemctl disable ip6tables.service
systemctl disable firstboot-graphical.service
systemctl disable postfix.service
systemctl disable NetworkManager.service
最后确保以下服务正常
systemctl enable crond.service
systemctl enable sshd.service
systemctl enable sysstat.service
systemctl enable network.service
重启os
reboot
step 3:安装 gi
下载linuxx64_12201_grid_home.zip 上传到 nodea 的 /tmp 下
切换到grid用户,解压到grid用户下的$ORACLE_HOME
安装 cvuqdisk
# cd /u01/app/grid/product/12.2.0/grid_1/cv/rpm
# rpm -ivh cvuqdisk-1.0.10-1.rpm
运行预检查
$ cd /u01/app/grid/product/12.2.0/grid_1/
$ ./runcluvfy.sh stage -pre crsinst -n nodea,nodeb -verbose
Verifying Single Client Access Name (SCAN) ...FAILED
Verifying DNS/NIS name service 'node-scan' ...FAILED
PRVG-1101 : SCAN name "node-scan" failed to resolve
SCAN 如果使用hosts方式的话,可以忽略
修正后,运行 gridSetup.sh
Configure Oracle Grid Infrastructure for a New Cluster
Configure an Oracle Standalone Cluster
Cluster Name : node-cluster
SCAN Name : node-scan
SCAN Port : 1521
role:HUB LEAF
GIMR data
at least 39,152MB
DNS/NIS
最后以root运行两个脚本
/u01/app/gridbase/12.2.0/oraInventory/orainstRoot.sh
/u01/app/grid/product/12.2.0/grid_1/root.sh
运行后再次检查
$ cd /u01/app/grid/product/12.2.0/grid_1/
$ ./runcluvfy.sh stage -post crsinst -n nodea,nodeb -verbose
step 3:安装 db
下载 linuxx64_12201_database.zip 解压后安装
Install database software only
Oracle Real Application Cluster database installation
最后以root运行脚本
/u01/app/oracle/product/12.2.0/db_1/root.sh
接下来用 dbca 创建数据库
Global database name : rac0
SID Prefix: rac0
em port:5500
多节点安装前后都检测一下
./runcluvfy.sh stage -pre crsinst -n nodea,nodeb -verbose
./runcluvfy.sh stage -pre dbinst -n nodea,nodeb -verbose
./runcluvfy.sh stage -post crsinst -n nodea,nodeb -verbose
./runcluvfy.sh stage -post dbinst -n nodea,nodeb -verbose
./cluvfy stage -pre dbinst -n nodea,nodeb -verbose
./cluvfy stage -post dbinst -n nodea,nodeb -verbose
查看一些信息
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl get css
Usage:
crsctl get css <parameter>
Displays the value of a Cluster Synchronization Services parameter
clusterguid
diagwait
disktimeout
misscount
reboottime
noautorestart
priority
crsctl get css ipmiaddr
Displays the IP address of the local IPMI device as set in the Oracle registry
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl get css diagwait
CRS-4678: Successful get diagwait 0 for Cluster Synchronization Services.
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl get css disktimeout
CRS-4678: Successful get disktimeout 200 for Cluster Synchronization Services.
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl get css misscount
CRS-4678: Successful get misscount 30 for Cluster Synchronization Services.
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl get css reboottime
CRS-4678: Successful get reboottime 3 for Cluster Synchronization Services.
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl lsmodules crs
List CRSD Debug Module: AGENT
List CRSD Debug Module: AGFW
List CRSD Debug Module: CLSCAL
List CRSD Debug Module: CLSCEVT
List CRSD Debug Module: CLSFRAME
List CRSD Debug Module: CLSINET
List CRSD Debug Module: CLSO
List CRSD Debug Module: CLSVER
List CRSD Debug Module: CLUCLS
List CRSD Debug Module: COMMCRS
List CRSD Debug Module: COMMNS
List CRSD Debug Module: CRSAPP
List CRSD Debug Module: CRSCCL
List CRSD Debug Module: CRSCEVT
List CRSD Debug Module: CRSCOMM
List CRSD Debug Module: CRSD
List CRSD Debug Module: CRSEVT
List CRSD Debug Module: CRSMAIN
List CRSD Debug Module: CRSOCR
List CRSD Debug Module: CRSPE
List CRSD Debug Module: CRSPLACE
List CRSD Debug Module: CRSRES
List CRSD Debug Module: CRSRPT
List CRSD Debug Module: CRSRTI
List CRSD Debug Module: CRSSE
List CRSD Debug Module: CRSSEC
List CRSD Debug Module: CRSTIMER
List CRSD Debug Module: CRSUI
List CRSD Debug Module: CSSCLNT
List CRSD Debug Module: OCRAPI
List CRSD Debug Module: OCRASM
List CRSD Debug Module: OCRCAC
List CRSD Debug Module: OCRCLI
List CRSD Debug Module: OCRMAS
List CRSD Debug Module: OCRMSG
List CRSD Debug Module: OCROSD
List CRSD Debug Module: OCRRAW
List CRSD Debug Module: OCRSRV
List CRSD Debug Module: OCRUTL
List CRSD Debug Module: SuiteTes
List CRSD Debug Module: UiServer
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl check has
CRS-4638: Oracle High Availability Services is online
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
# /u01/app/grid/product/12.2.0/grid_1/bin/crsctl stat res -t
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.ASMNET1LSNR_ASM.lsnr
ONLINE ONLINE nodea STABLE
ONLINE ONLINE nodeb STABLE
ora.DG_DATA01.dg
ONLINE ONLINE nodea STABLE
ONLINE ONLINE nodeb STABLE
ora.LISTENER.lsnr
ONLINE ONLINE nodea STABLE
ONLINE ONLINE nodeb STABLE
ora.MGMT.dg
ONLINE ONLINE nodea STABLE
ONLINE ONLINE nodeb STABLE
ora.net1.network
ONLINE ONLINE nodea STABLE
ONLINE ONLINE nodeb STABLE
ora.ons
ONLINE ONLINE nodea STABLE
ONLINE ONLINE nodeb STABLE
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
1 ONLINE ONLINE nodeb STABLE
ora.MGMTLSNR
1 OFFLINE OFFLINE STABLE
ora.asm
1 ONLINE ONLINE nodea Started,STABLE
2 ONLINE ONLINE nodeb Started,STABLE
3 OFFLINE OFFLINE STABLE
ora.cvu
1 ONLINE ONLINE nodeb STABLE
ora.nodea.vip
1 ONLINE ONLINE nodea STABLE
ora.nodeb.vip
1 ONLINE ONLINE nodeb STABLE
ora.qosmserver
1 ONLINE ONLINE nodeb STABLE
ora.rac0.db
1 ONLINE ONLINE nodea Open,HOME=/u01/app/o
racle/product/12.2.0
/db_1,STABLE
2 ONLINE ONLINE nodeb Open,HOME=/u01/app/o
racle/product/12.2.0
/db_1,STABLE
ora.scan1.vip
1 ONLINE ONLINE nodeb STABLE
--------------------------------------------------------------------------------
# ./crsctl stat res -t -init
--------------------------------------------------------------------------------
Name Target State Server State details
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.asm
1 ONLINE ONLINE nodea STABLE
ora.cluster_interconnect.haip
1 ONLINE ONLINE nodea STABLE
ora.crf
1 ONLINE ONLINE nodea STABLE
ora.crsd
1 ONLINE ONLINE nodea STABLE
ora.cssd
1 ONLINE ONLINE nodea STABLE
ora.cssdmonitor
1 ONLINE ONLINE nodea STABLE
ora.ctssd
1 ONLINE ONLINE nodea ACTIVE:0,STABLE
ora.diskmon
1 OFFLINE OFFLINE STABLE
ora.evmd
1 ONLINE ONLINE nodea STABLE
ora.gipcd
1 ONLINE ONLINE nodea STABLE
ora.gpnpd
1 ONLINE ONLINE nodea STABLE
ora.mdnsd
1 ONLINE ONLINE nodea STABLE
ora.storage
1 ONLINE ONLINE nodea STABLE
--------------------------------------------------------------------------------
查看一些进程
# ps -fu root|grep -i init
root 714 1 0 09:06 ? 00:00:00 /usr/sbin/alsactl -s -n 19 -c -E ALSA_CONFIG_PATH=/etc/alsa/alsactl.conf --initfile=/lib/alsa/ini/00main rdaemon
root 15922 2408 0 10:19 pts/0 00:00:00 grep --color=auto -i init
root 17917 1 0 09:31 ? 00:00:00 /bin/sh /etc/init.d/init.tfa run >/dev/null 2>&1 </dev/null
root 19671 1 0 09:33 ? 00:00:00 /bin/sh /etc/init.d/init.ohasd run >/dev/null 2>&1 </dev/null
# ps -fu root|grep -i grid_1
root 15517 2408 0 10:17 pts/0 00:00:00 grep --color=auto -i grid_1
root 17975 1 0 09:31 ? 00:00:25 /u01/app/grid/product/12.2.0/grid_1/jdk/jre/bin/java -Xms128m -Xmx512m oracle.rat.tfa.TFAMain /u01/app/grid/product/12.2.0/grid_1/tfa/nodea/tfa_home
root 20790 1 0 09:33 ? 00:00:08 /u01/app/grid/product/12.2.0/grid_1/bin/ohasd.bin reboot _ORA_BLOCKING_STACK_LOCALE=AMERICAN_AMERICA.AL32UTF8
root 20889 1 0 09:33 ? 00:00:04 /u01/app/grid/product/12.2.0/grid_1/bin/orarootagent.bin
root 21110 1 0 09:33 ? 00:00:02 /u01/app/grid/product/12.2.0/grid_1/bin/cssdmonitor
root 21128 1 0 09:33 ? 00:00:02 /u01/app/grid/product/12.2.0/grid_1/bin/cssdagent
root 21261 1 0 09:34 ? 00:00:07 /u01/app/grid/product/12.2.0/grid_1/bin/octssd.bin reboot
root 21493 1 0 09:34 ? 00:00:15 /u01/app/grid/product/12.2.0/grid_1/bin/osysmond.bin
root 21521 1 0 09:34 ? 00:00:16 /u01/app/grid/product/12.2.0/grid_1/bin/crsd.bin reboot
root 21681 1 0 09:34 ? 00:00:12 /u01/app/grid/product/12.2.0/grid_1/bin/orarootagent.bin
root 21722 1 0 09:34 ? 00:00:13 /u01/app/grid/product/12.2.0/grid_1/bin/ologgerd -M
root 22562 21493 0 09:35 ? 00:00:03 /u01/app/grid/product/12.2.0/grid_1/perl/bin/perl /u01/app/grid/product/12.2.0/grid_1/bin/diagsnap.pl start
# ps -fu grid
UID PID PPID C STIME TTY TIME CMD
grid 23914 1 0 09:37 ? 00:00:00 /u01/app/grid/product/12.2.0/grid_1/bin/tnslsnr LISTENER_SCAN1 -no_crs_notify -inherit
grid 24064 1 0 09:37 ? 00:00:02 /u01/app/grid/product/12.2.0/grid_1/bin/scriptagent.bin
grid 24100 1 0 09:37 ? 00:00:07 /u01/app/grid/product/12.2.0/grid_1/jdk/bin/java -server -Xms128M -Xmx384M -Djava.awt.headless=tru
grid 24451 1 0 09:37 ? 00:00:03 /u01/app/grid/product/12.2.0/grid_1/jdk/bin/java -classpath /u01/app/grid/product/12.2.0/grid_1/jd
grid 21783 1 0 09:34 ? 00:00:14 /u01/app/grid/product/12.2.0/grid_1/bin/oraagent.bin
grid 21807 1 0 09:34 ? 00:00:00 /u01/app/grid/product/12.2.0/grid_1/opmn/bin/ons -d
grid 21808 21807 0 09:34 ? 00:00:00 /u01/app/grid/product/12.2.0/grid_1/opmn/bin/ons -d
grid 23439 1 0 09:36 ? 00:00:00 /u01/app/grid/product/12.2.0/grid_1/bin/tnslsnr ASMNET1LSNR_ASM -no_crs_notify -inherit
grid 881 1 0 09:50 ? 00:00:00 /u01/app/grid/product/12.2.0/grid_1/bin/tnslsnr LISTENER -no_crs_notify -inherit
grid 20960 1 0 09:33 ? 00:00:07 /u01/app/grid/product/12.2.0/grid_1/bin/oraagent.bin
grid 20979 1 0 09:33 ? 00:00:01 /u01/app/grid/product/12.2.0/grid_1/bin/mdnsd.bin
grid 20982 1 0 09:33 ? 00:00:07 /u01/app/grid/product/12.2.0/grid_1/bin/evmd.bin
grid 21011 1 0 09:33 ? 00:00:02 /u01/app/grid/product/12.2.0/grid_1/bin/gpnpd.bin
grid 21058 20982 0 09:33 ? 00:00:02 /u01/app/grid/product/12.2.0/grid_1/bin/evmlogger.bin -o /u01/app/grid/product/12.2.0/grid_1/log/[
grid 21070 1 0 09:33 ? 00:00:05 /u01/app/grid/product/12.2.0/grid_1/bin/gipcd.bin
grid 21141 1 0 09:33 ? 00:00:08 /u01/app/grid/product/12.2.0/grid_1/bin/ocssd.bin
grid 21386 1 0 09:34 ? 00:00:00 asm_pmon_+ASM1
grid 21388 1 0 09:34 ? 00:00:00 asm_clmn_+ASM1
grid 21390 1 0 09:34 ? 00:00:00 asm_psp0_+ASM1
grid 21392 1 1 09:34 ? 00:00:27 asm_vktm_+ASM1
grid 21402 1 0 09:34 ? 00:00:00 asm_gen0_+ASM1
grid 21405 1 0 09:34 ? 00:00:00 asm_mman_+ASM1
grid 21409 1 0 09:34 ? 00:00:00 asm_gen1_+ASM1
grid 21413 1 0 09:34 ? 00:00:01 asm_diag_+ASM1
grid 21415 1 0 09:34 ? 00:00:00 asm_ping_+ASM1
grid 21417 1 0 09:34 ? 00:00:00 asm_pman_+ASM1
grid 21419 1 0 09:34 ? 00:00:04 asm_dia0_+ASM1
grid 21421 1 0 09:34 ? 00:00:02 asm_lmon_+ASM1
grid 21423 1 0 09:34 ? 00:00:02 asm_lmd0_+ASM1
grid 21425 1 0 09:34 ? 00:00:05 asm_lms0_+ASM1
grid 21428 1 0 09:34 ? 00:00:02 asm_lmhb_+ASM1
grid 21431 1 0 09:34 ? 00:00:00 asm_lck1_+ASM1
grid 21433 1 0 09:34 ? 00:00:00 asm_dbw0_+ASM1
grid 21435 1 0 09:34 ? 00:00:00 asm_lgwr_+ASM1
grid 21437 1 0 09:34 ? 00:00:00 asm_ckpt_+ASM1
grid 21439 1 0 09:34 ? 00:00:00 asm_smon_+ASM1
grid 21441 1 0 09:34 ? 00:00:00 asm_lreg_+ASM1
grid 21443 1 0 09:34 ? 00:00:00 asm_pxmn_+ASM1
grid 21445 1 0 09:34 ? 00:00:00 asm_rbal_+ASM1
grid 21447 1 0 09:34 ? 00:00:00 asm_gmon_+ASM1
grid 21449 1 0 09:34 ? 00:00:00 asm_mmon_+ASM1
grid 21451 1 0 09:34 ? 00:00:00 asm_mmnl_+ASM1
grid 21453 1 0 09:34 ? 00:00:03 asm_imr0_+ASM1
grid 21455 1 0 09:34 ? 00:00:00 asm_lck0_+ASM1
grid 21459 1 0 09:34 ? 00:00:02 asm_gcr0_+ASM1
grid 12134 1 0 10:02 ? 00:00:00 asm_ppa7_+ASM1
grid 21602 1 0 09:34 ? 00:00:00 asm_asmb_+ASM1
grid 21558 1 0 09:34 ? 00:00:01 oracle+ASM1_crf (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 21604 1 0 09:34 ? 00:00:00 oracle+ASM1_asmb_+asm1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 21628 1 0 09:34 ? 00:00:00 oracle+ASM1_ocr (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 23530 1 0 09:36 ? 00:00:00 oracle+ASM1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
grid 23564 1 0 09:36 ? 00:00:00 oracle+ASM1 (DESCRIPTION=(LOCAL=YES)(ADDRESS=(PROTOCOL=beq)))
# ps -fu oracle
UID PID PPID C STIME TTY TIME CMD
oracle 2029 1 0 19:19 ? 00:00:00 ora_w005_rac02
oracle 2340 1 0 19:20 ? 00:00:00 ora_w004_rac02
oracle 4239 1 0 18:05 ? 00:00:10 /u01/app/grid/product/12.2.0/grid_1
oracle 4427 1 0 18:06 ? 00:00:00 ora_pmon_rac02
oracle 4429 1 0 18:06 ? 00:00:00 ora_clmn_rac02
oracle 4431 1 0 18:06 ? 00:00:01 ora_psp0_rac02
oracle 4434 1 0 18:06 ? 00:00:01 ora_ipc0_rac02
oracle 4436 1 0 18:06 ? 00:01:02 ora_vktm_rac02
oracle 4442 1 0 18:06 ? 00:00:00 ora_gen0_rac02
oracle 4444 1 0 18:06 ? 00:00:00 ora_mman_rac02
oracle 4448 1 0 18:06 ? 00:00:01 ora_gen1_rac02
oracle 4452 1 0 18:06 ? 00:00:04 ora_diag_rac02
oracle 4454 1 0 18:06 ? 00:00:00 ora_ofsd_rac02
oracle 4458 1 0 18:06 ? 00:00:02 ora_dbrm_rac02
oracle 4460 1 0 18:06 ? 00:00:00 ora_vkrm_rac02
oracle 4462 1 0 18:06 ? 00:00:00 ora_ping_rac02
oracle 4464 1 0 18:06 ? 00:00:00 ora_svcb_rac02
oracle 4466 1 0 18:06 ? 00:00:00 ora_acms_rac02
oracle 4468 1 0 18:06 ? 00:00:00 ora_pman_rac02
oracle 4470 1 0 18:06 ? 00:00:12 ora_dia0_rac02
oracle 4472 1 0 18:06 ? 00:00:08 ora_lmon_rac02
oracle 4474 1 0 18:06 ? 00:00:22 ora_lms0_rac02
oracle 4477 1 0 18:06 ? 00:00:09 ora_lmd0_rac02
oracle 4480 1 0 18:06 ? 00:00:01 ora_rmv0_rac02
oracle 4482 1 0 18:06 ? 00:00:00 ora_rms0_rac02
oracle 4485 1 0 18:06 ? 00:00:06 ora_lmhb_rac02
oracle 4487 1 0 18:06 ? 00:00:00 ora_lck1_rac02
oracle 4491 1 0 18:06 ? 00:00:00 ora_dbw0_rac02
oracle 4493 1 0 18:06 ? 00:00:00 ora_lgwr_rac02
oracle 4495 1 0 18:06 ? 00:00:02 ora_ckpt_rac02
oracle 4497 1 0 18:06 ? 00:00:00 ora_smon_rac02
oracle 4499 1 0 18:06 ? 00:00:00 ora_smco_rac02
oracle 4501 1 0 18:06 ? 00:00:00 ora_reco_rac02
oracle 4505 1 0 18:06 ? 00:00:00 ora_lreg_rac02
oracle 4509 1 0 18:06 ? 00:00:00 ora_pxmn_rac02
oracle 4511 1 0 18:06 ? 00:00:00 ora_rbal_rac02
oracle 4513 1 0 18:06 ? 00:00:00 ora_asmb_rac02
oracle 4515 1 0 18:06 ? 00:00:05 ora_fenc_rac02
oracle 4517 1 0 18:06 ? 00:00:14 ora_mmon_rac02
oracle 4519 1 0 18:06 ? 00:00:01 ora_mmnl_rac02
oracle 4521 1 0 18:06 ? 00:00:00 ora_d000_rac02
oracle 4523 1 0 18:06 ? 00:00:00 ora_s000_rac02
oracle 4525 1 0 18:06 ? 00:00:08 ora_imr0_rac02
oracle 4528 1 0 18:06 ? 00:00:03 ora_scm0_rac02
oracle 4532 1 0 18:06 ? 00:00:01 ora_lck0_rac02
oracle 4536 1 0 18:06 ? 00:00:00 ora_mark_rac02
oracle 4538 1 0 18:06 ? 00:00:00 ora_rsmn_rac02
oracle 4540 1 0 18:06 ? 00:00:00 ora_tmon_rac02
oracle 4548 1 0 18:06 ? 00:00:09 ora_gcr0_rac02
oracle 4620 1 0 18:06 ? 00:00:00 ora_tt00_rac02
oracle 4622 1 0 18:06 ? 00:00:00 ora_tt01_rac02
oracle 4624 1 0 18:06 ? 00:00:00 ora_tt02_rac02
oracle 4657 1 0 18:06 ? 00:00:00 ora_gtx0_rac02
oracle 4660 1 0 18:06 ? 00:00:00 ora_rcbg_rac02
oracle 4663 1 0 18:06 ? 00:00:00 ora_aqpc_rac02
oracle 4685 1 0 18:06 ? 00:00:03 ora_p000_rac02
oracle 4687 1 0 18:06 ? 00:00:03 ora_p001_rac02
oracle 4689 1 0 18:06 ? 00:00:00 ora_p002_rac02
oracle 4692 1 0 18:06 ? 00:00:00 ora_p003_rac02
oracle 4735 1 0 18:06 ? 00:00:00 ora_qm02_rac02
oracle 4737 1 0 18:06 ? 00:00:03 ora_qm05_rac02
oracle 4749 1 0 18:06 ? 00:00:00 ora_q004_rac02
oracle 5002 1 0 18:06 ? 00:00:07 ora_cjq0_rac02
oracle 13707 1 0 20:00 ? 00:00:00 ora_w003_rac02
oracle 14389 1 0 20:01 ? 00:00:00 ora_w000_rac02
oracle 14893 1 0 20:03 ? 00:00:00 ora_w001_rac02
oracle 15155 1 0 20:04 ? 00:00:00 ora_w006_rac02
oracle 16891 1 0 20:10 ? 00:00:00 ora_ppa7_rac02
oracle 17337 1 0 18:20 ? 00:00:05 ora_o000_rac02
oracle 18513 1 0 20:16 ? 00:00:00 ora_w002_rac02
oracle 18520 1 0 20:16 ? 00:00:00 ora_w007_rac02
oracle 19695 1 0 20:21 ? 00:00:00 ora_gcr1_rac02
oracle 21031 1 0 18:33 ? 00:00:00 ora_q005_rac02
oracle 21042 1 0 18:33 ? 00:00:05 ora_o001_rac02
oracle 24722 1 0 20:26 ? 00:00:00 ora_p004_rac02
oracle 24891 1 0 20:26 ? 00:00:00 ora_p005_rac02
oracle 5019 1 0 18:06 ? 00:00:01 oraclerac02 (DESCRIPTION=(LOCAL=YES
oracle 5021 1 0 18:06 ? 00:00:00 oraclerac02 (DESCRIPTION=(LOCAL=YES
oracle 5029 1 0 18:06 ? 00:00:00 oraclerac02 (DESCRIPTION=(LOCAL=YES