十年网站开发经验 + 多家企业客户 + 靠谱的建站团队
量身定制 + 运营维护+专业推广+无忧售后,网站问题一站解决
本篇内容介绍了“虚拟软件vmare内怎么安装linux系统下的ORACLE数据的RAC环境”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!
成都创新互联专注于中大型企业的网站建设、做网站和网站改版、网站营销服务,追求商业策划与数据分析、创意艺术与技术开发的融合,累计客户1000多家,服务满意度达97%。帮助广大客户顺利对接上互联网浪潮,准确优选出符合自己需要的互联网运用,我们将一直专注品牌网站建设和互联网程序开发,在前进的路上,与客户一起成长!
/opt //安装包路径 并且解压grid压缩包
1、安装系统、配置双网卡网络、配置共享磁盘(以rac02为例)
1.1 IP地址规划
192.168.47.151 rac01
192.168.47.152 rac02
10.11.12.51 rac01priv
10.11.12.52 rac02priv
192.168.47.153 rac01vip
192.168.47.154 rac02vip
192.168.47.155 racscanip
1.2 设置共享盘(两个虚拟机配置都需要更改设置)
1.2.1 添加独立 永久磁盘--共享盘路径:H:\sharedisk\diskrac2\sharedisk*
1.2.2 更改独立虚拟机配置(vmdk中添加、修改以下信息)
一:
scsi1.present = "TRUE"scsi1.virtualDev = "lsilogic"scsi1.sharedBus = "virtual"
scsi1:1.present = "TRUE"scsi1:1.mode = "independent-persistent"
scsi1:1.filename = "H:\sharedisk\diskrac2\sharediskOCR.vmdk"
scsi1:1.deviceType = "plainDisk"scsi1:2.present = "TRUE"
scsi1:2.mode = "independent-persistent"
scsi1:2.filename = "H:\sharedisk\diskrac2\sharediskrDate01.vmdk"
scsi1:2.deviceType = "plainDisk"
scsi1:3.present = "TRUE"
scsi1:3.mode = "independent-persistent"
scsi1:3.filename = "H:\sharedisk\diskrac2\sharediskrDate02.vmdk"
scsi1:3.deviceType = "plainDisk"
scsi1:4.present = "TRUE"
scsi1:4.mode = "independent-persistent"
scsi1:4.filename = "H:\sharedisk\diskrac2\sharediskrFlash.vmdk"
scsi1:4.deviceType = "plainDisk"
disk.locking = "false"
diskLib.dataCacheMaxSize = "0"
diskLib.dataCacheMaxReadAheadSize = "0"
diskLib.DataCacheMinReadAheadSize = "0"
diskLib.dataCachePageSize = "4096"
diskLib.maxUnsyncedWrites = "0"
disk.EnableUUID="TRUE"
二:
scsi1.present = "TRUE"
scsi1.virtualDev = "lsilogic"
scsi1.sharedBus = "virtual"
scsi1:0.present = "TRUE"
scsi1:0.fileName = "C:\sharedisk\RAC01\ocr1.vmdk"
scsi1:0.mode = "independent-persistent"
scsi1:0.deviceType = "plainDisk"
scsi1:1.present = "TRUE"
scsi1:1.fileName = "C:\sharedisk\RAC01\ocr2.vmdk"
scsi1:1.mode = "independent-persistent"
scsi1:1.deviceType = "plainDisk"
scsi1:2.present = "TRUE"
scsi1:2.fileName = "C:\sharedisk\RAC01\ocr3.vmdk"
scsi1:2.mode = "independent-persistent"
scsi1:2.deviceType = "plainDisk"
scsi1:3.present = "TRUE"
scsi1:3.fileName = "C:\sharedisk\RAC01\data.vmdk"
scsi1:3.mode = "independent-persistent"
scsi1:3.deviceType = "plainDisk"
scsi1:4.present = "TRUE"
scsi1:4.fileName = "C:\sharedisk\RAC01\fra.vmdk"
scsi1:4.mode = "independent-persistent"
scsi1:4.deviceType = "plainDisk"
floppy0.present = "FALSE"
disk.locking = "false"
diskLib.dataCacheMaxSize = "0"
diskLib.dataCacheMaxReadAheadSize = "0"
diskLib.DataCacheMinReadAheadSize = "0"
diskLib.dataCachePageSize = "4096"
diskLib.maxUnsyncedWrites = "0"
disk.EnableUUID="TRUE"
三:
scsi1.sharedBus = "virtual"
scsi1:0.deviceType = "plainDisk"
scsi1:1.deviceType = "plainDisk"
scsi1:2.deviceType = "plainDisk"
scsi1:3.deviceType = "plainDisk"
scsi1:4.deviceType = "plainDisk"
disk.locking = "false"
diskLib.dataCacheMaxSize = "0"
diskLib.dataCacheMaxReadAheadSize = "0"
diskLib.DataCacheMinReadAheadSize = "0"
diskLib.dataCachePageSize = "4096"
diskLib.maxUnsyncedWrites = "0"
disk.EnableUUID="TRUE"
2、配置OS
2.1 关闭防火墙、selinux
[root@rac02 ~]# chkconfig iptables off
[root@rac02 ~]# service iptables status
[root@rac02 ~]# service iptables stop
[root@rac02 ~]# vi /etc/selinux/config
SELINUX=disabled
2.2 修改host文件
[root@rac02 ~]# vi /etc/hosts
172.19.15.142 racd01
172.19.15.141 racd02
10.10.11.10 racd01priv
10.10.11.22 racd02priv
172.19.15.143 racd01vip
172.19.15.144 racd02vip
172.19.15.145 racdscanip
2.3 配置yum源
2.3.1 配置本地yum源
[root@rac02 ~]#yum clean all
[root@rac02 ~]#mkdir /mnt/cdrommount -t iso9660 /dev/cdrom /mnt/cdrom/
[root@rac02 ~]#vi /etc/yum.repos.d/local.repo
[local]
name=local
baseurl=file:///mnt/cdrom
gpgcheck=0enabled=1
2.3.2 配置163yum源
[root@rac02 ~]#vi /etc/yum.repos.d/163.repo
[163]
name=163
baseurl=http://mirrors.163.com/centos/7/os/x86_64/
gpgcheck=0enabled=1
#yum repolist
2.4 安装rpm包
[root@rac02 ~]#vi yum.sh
yum install compat-lib*
yum install binutils-2*
yum install gcc*
yum install glibc-2*
yum install glibc-devel-2*
yum install ksh
yum install libgcc-4*
yum install libstdc++-4*
yum install libstdc++-d*
yum install libaio-0*
yum install libaio-d*
yum install sysstat-9*
yum install make-3*
yum install binutils-2*
yum install libc*
2.5 修改内核配置文件
[root@rac02 ~]#vi /etc/sysctl.conf
fs.aio-max-nr = 1048576
fs.file-max = 6815744
kernel.shmall = 2097152
kernel.shmmax = 1054472192
kernel.shmmni = 4096
kernel.sem = 250 32000 100 128
net.ipv4.ip_local_port_range = 9000 65500
net.core.rmem_default = 262144
net.core.rmem_max = 4194304
net.core.wmem_default = 262144
net.core.wmem_max = 1048586
net.ipv4.tcp_wmem = 262144 262144 262144
net.ipv4.tcp_rmem = 4194304 4194304 4194304
[root@rac02 ~]##sysctl -p //立即生效。
2.6 修改用户限制
[root@rac02 ~] #vi /etc/security/limits.conf
grid soft nproc 2047
grid hard nproc 16384
grid soft nofile 1024
grid hard nofile 65536
oracle soft nproc 2047
oracle hard nproc 16384
oracle soft nofile 1024
oracle hard nofile 65536
2.7配置Oracle软件安装组、用户、环境变量、安装目录
2.7.1 #创建组
groupadd -g 1300 dbagroupadd -g 1301 opergroupadd -g 1000 oinstallgroupadd -g 1200 asmadmin
groupadd -g 1201 asmdbagroupadd -g 1202 asmoper
#用户
useradd -m -u 1100 -g oinstall -G asmadmin,asmdba,asmoper -s /bin/bash grid
useradd -m -u 1101 -g oinstall -G dba,oper,asmdba -s /bin/bash oracle
2.7.2#配置目录
mkdir -p /u01/app/11.2.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
chown grid:oinstall /u01/app/11.2.0/grid
chown grid:oinstall /u01/app/grid
chown oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/chown -R grid:oinstall /u01
2.7.3#用户环境变量修改home下的 .bash_profile(Oracle_sid需要根据节点不同进行修改)
ORACLE用户
export TMP=/tmp
export TMPDIR=$TMPexport ORACLE_SID=orc1 # rac01
export ORACLE_SID=orc2 # rac02
export ORACLE_UNQNAME=orc
export ORACLE_BASE=/u01/app/oracle
export ORACLE_HOME=$ORACLE_BASE/product/11.2.0/db_1
export TNS_ADMIN=$ORACLE_HOME/network/admin
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
$ source .bash_profile //使配置文件生效
GRID用户
export TMP=/tmp
export TMPDIR=$TMP
export ORACLE_SID=+ASM1 # rac01
export ORACLE_SID=+ASM2 # rac02
export ORACLE_BASE=/u01/app/grid
export ORACLE_HOME=/u01/app/11.2.0/grid
export PATH=/usr/sbin:$PATH
export PATH=$ORACLE_HOME/bin:$PATH
export LD_LIBRARY_PATH=$ORACLE_HOME/lib:/lib:/usr/lib
export CLASSPATH=$ORACLE_HOME/JRE:$ORACLE_HOME/jlib:$ORACLE_HOME/rdbms/jlib
umask 022
$ source .bash_profile //使配置文件生效
2.8.配置grid及oracle用户ssh互信
方法一:
#cd /software/grid/sshsetup
#./sshUserSetup.sh -user grid -hosts "rac01 rac02" -advanced -noPromptPassphrase
#./sshUserSetup.sh -user oracle -hosts "rac01 rac02" -advanced -noPromptPassphrase
方法二:
1)在节点 1 上:
[root@rac01 ~]$ su - grid
[grid@rac01 ~]$ cd /home/grid/
[grid@rac01 ~]$ mkdir~/.ssh
[grid@rac01 ~]$ chmod 700 ~/.ssh
[grid@rac01 ~]$ ssh-keygen -t rsa
[grid@rac01 ~]$ ssh-keygen -t dsa
2)在节点2上:
[root@rac02 ~]$ su - grid
[grid@rac02 ~]$ cd /home/grid/
[grid@rac02 ~]$ mkdir ~/.ssh
[grid@rac02 ~]$ chmod 700 ~/.ssh
[grid@rac01 ~]$ ssh-keygen -t rsa
[grid@rac01 ~]$ ssh-keygen -t dsa
3) 仅在一个节点操作,以rac01为例:
[grid@rac01 ~]$ touch ~/.ssh/authorized_keys
[grid@rac01 ~]$ cd ~/.ssh
[grid@rac01.ssh]$ ssh racd01 cat ~/.ssh/id_rsa.pub >> authorized_keys
[grid@rac01.ssh]$ ssh racd02 cat ~/.ssh/id_rsa.pub >> authorized_keys
[grid@rac01.ssh]$ ssh racd01 cat ~/.ssh/id_dsa.pub >> authorized_keys
[grid@rac01.ssh]$ ssh racd02 cat ~/.ssh/id_dsa.pub >> authorized_keys
[grid@rac01.ssh]$pwd
[grid@rac01.ssh]$scp authorized_keys racd02:`pwd`
4)每个节点均操作
[grid@rac01 ~]$ chmod 600 ~/.ssh/authorized_keys
[grid@rac01 ~]$ chmod 600 ~/.ssh/authorized_keys
5)在要运行oui的接单以grid用户运行,以racd01为例:
[grid@rac01.ssh]$exec /usr/bin/ssh-agent $SHELL
[grid@rac01.ssh]$ssh-add
6)检查互信
$ ssh rac01 date
$ ssh rac02 date
$ ssh rac01priv date
$ ssh rac02priv date
2.9 配置裸设备[root@rac01 ~]#fdisk /dev/sdb
n p 1 +2G
n p 2 w
[root@rac01 ~]# vi /etc/udev/rules.d/60-raw.rules
ACTION=="add", KERNEL=="/dev/sdb1", RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="17", RUN+="/bin/raw /dev/raw/raw1 %M %m"
ACTION=="add", KERNEL=="/dev/sdc1", RUN+="/bin/raw /dev/raw/raw2 %N"
ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="33", RUN+="/bin/raw /dev/raw/raw2 %M %m"
ACTION=="add", KERNEL=="/dev/sdd1", RUN+="/bin/raw /dev/raw/raw3 %N"
ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="49", RUN+="/bin/raw /dev/raw/raw3 %M %m"
ACTION=="add", KERNEL=="/dev/sde1", RUN+="/bin/raw /dev/raw/raw4 %N"
ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="65", RUN+="/bin/raw /dev/raw/raw4 %M %m"
ACTION=="add", KERNEL=="/dev/sdf1", RUN+="/bin/raw /dev/raw/raw5 %N"
ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="81", RUN+="/bin/raw /dev/raw/raw5 %M %m"
ACTION=="add",KERNEL=="raw*",OWNER="grid",GROUP="asmadmin",MODE="660"
[root@rac01 ~]#start_udev //无法正确安装时,使用下列两个语句
// #raw /dev/raw/raw1 /dev/sdb1
// #raw /dev/raw/raw2 /dev/sdb2
[root@rac01 ~]#raw -qa =等价于= ls -lrt /dev/raw
[root@rac01 ~]#ll /dev/raw
[root@rac01 ~]#partprobe
3、安装grid
[root@rac01 /]#xhost +
[root@rac01 /]# su - grid
[grid@rac01 ~]$ export
[grid@rac01 ~]$ cd /software/grid/
[grid@rac01 grid]$ ./runInstaller
运行roo.sh报错,可使用如下命令
[root@rac02 ~]# /u01/app/11.2.0/grid/crs/install/rootcrs.pl -deconfig -force
查看安装日志
[root@rac01 /]# cat /u01/app/oraInventory/logs/installActions2019-03-27_04-25-47PM.log
检查crs状态
[grid@rac01 ~]$ crsctl check crs
检查Clusterware资源
[grid@rac01 ~]$ crs_stat -t -v
[grid@rac01 ~]$ crsctl stat res -t
检查集群节点
[grid@rac01 ~]$ olsnodes -n
检查两个节点上的Oracle TNS监听器进程
[grid@rac01 ~]$ ps -ef|grep lsnr|grep -v 'grep'|grep -v 'ocfs'|awk '{print$9}'
确认当前正在运行已安装的Oracle ASM:
[grid@rac01 ~]$ srvctl status asm -a ./runcluvfy.sh stage -post hwos -n rac01,rac02 -verbose
安装asm [grid@rac01 ~]$ asmca
4、安装oracle
[root@rac01 ~]#xhost +
[root@rac01 ~]# su - oracle
[oracle@rac01 ~]$ cd /software/database
[oracle@rac01 database]$ ./runInstaller
5、检查安装后的配置
检查集群运行状态
[grid@rac01 ~]$srvctl status database -d orc
检查本地节点crs状态
[grid@rac01 ~]$ crsctl check crs
检查集群crs状态
[grid@rac01 ~]$ crsctl check cluster
检查集群节点
[grid@rac01 ~]$ olsnodes -n -i -s -t
检查集群间的表决磁盘信息
[grid@rac01 ~]$ crsctl query css votedisk
检查集群scan ip
[grid@rac01 ~]$ srvctl config scan
检查scan 监听
[grid@rac01 ~]$ srvctl config scan_listener
6、启、停集群数据库进入grid用户
整个集群的数据库启停
[grid@rac01 ~]$ srvctl stop database -d orcl
[grid@rac01 ~]$ srvctl start database -d orcl
进入root用户 只关闭了当前结点
[root@rac01 bin]# pwd/u01/app/11.2.0/grid/bin
[root@rac01 bin]# ./crsctl stop crs
7、em
[oracle@rac1 ~]$ emctl status dbconsole
[oracle@rac1 ~]$ emctl start dbconsole
[oracle@rac1 ~]$ emctl stop dbconsole
“虚拟软件vmare内怎么安装linux系统下的ORACLE数据的RAC环境”的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注创新互联网站,小编将为大家输出更多高质量的实用文章!