oracle extended cluster 测试

1.确认共享存储WWID脚本,用法如下:

for i in  c d e f g h i j k l m n o p;
do
echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      
done




[root@node1 ~]# for i in  c d e f g h i j k l m n o p;
> do
> echo "KERNEL==\"sd*\", BUS==\"scsi\", PROGRAM==\"/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/\$name\", RESULT==\"`/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/sd$i`\", NAME=\"asm-disk$i\", OWNER=\"grid\", GROUP=\"asmadmin\", MODE=\"0660\""      
> done
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45004932325a59732d757266762d4f486c59", NAME="asm-diskc", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45004932325a59732d757266762d4f486c59", NAME="asm-diskd", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007469355367682d513879662d6d344836", NAME="asm-diskg", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45007469355367682d513879662d6d344836", NAME="asm-diskh", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500764e577843332d547243472d37417072", NAME="asm-diskk", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500764e577843332d547243472d37417072", NAME="asm-diskl", OWNER="grid", GROUP="asmadmin", MODE="0660"



sde sdi sdm sdo

KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500786945716b462d47746c4b2d73366f32", NAME="asm-diske", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500786945716b462d47746c4b2d73366f32", NAME="asm-diskf", OWNER="grid", GROUP="asmadmin", MODE="0660"

KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c450077706b396c302d6c74366d2d35316869", NAME="asm-diski", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c450077706b396c302d6c74366d2d35316869", NAME="asm-diskj", OWNER="grid", GROUP="asmadmin", MODE="0660"

KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500334d416d49352d4a6c41342d54764131", NAME="asm-diskm", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c4500334d416d49352d4a6c41342d54764131", NAME="asm-diskn", OWNER="grid", GROUP="asmadmin", MODE="0660"


KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45003750566b57432d465255562d48716d6b", NAME="asm-disko", OWNER="grid", GROUP="asmadmin", MODE="0660"
KERNEL=="sd*", BUS=="scsi", PROGRAM=="/sbin/scsi_id --whitelisted --replace-whitespace --device=/dev/$name", RESULT=="14f504e46494c45003750566b57432d465255562d48716d6b", NAME="asm-diskp", OWNER="grid", GROUP="asmadmin", MODE="0660"

[root@node1 mapper]# ls -l
总用量 0
lrwxrwxrwx 1 root root      7 4月  19 08:19 arch -> ../dm-9
lrwxrwxrwx 1 root root      7 4月  19 08:19 arch_new -> ../dm-6
crw-rw---- 1 root root 10, 58 4月  19 08:01 control
lrwxrwxrwx 1 root root      7 4月  19 08:19 data -> ../dm-8
lrwxrwxrwx 1 root root      7 4月  19 08:19 data_new -> ../dm-4
lrwxrwxrwx 1 root root      7 4月  19 08:01 vg01-Log_ora -> ../dm-1
lrwxrwxrwx 1 root root      7 4月  19 08:01 vg01-LogVol00 -> ../dm-0
lrwxrwxrwx 1 root root      7 4月  19 08:01 vg01-LogVol02 -> ../dm-2
lrwxrwxrwx 1 root root      7 4月  19 08:19 vote_ocr -> ../dm-7
lrwxrwxrwx 1 root root      7 4月  19 08:19 vote_ocr_new1 -> ../dm-3
lrwxrwxrwx 1 root root      7 4月  19 08:19 vote_ocr_new2 -> ../dm-5



2.openfiler 划分共享存储LUN,规划如下:


仲裁盘(冗余度:正常)
         /vote_disk/nfsvote  node1,node2 both mounted
         nas1vote
         nas2vote  5312M


data 磁盘组(冗余度:正常)

data fg1 nas1datafg11 11296M
         nas1dagafg12
     
     fg2 nas2datafg11
         nas2datafg12
         

arch 磁盘组(冗余度:正常)
         
arch fg1 nas1archfg11 7456M
         nas1archfg12
         
     fg2 nas2archfg11
         nas2archfg12




3,编辑多路径配置文件multipath.conf,实践操作过程中建议先挂载一台存储上的lun标识完毕后,在挂载另一台存储上的lun进行标识


defaults {
        user_friendly_names yes
}
devices {
device {
vendor "COMPELNT"
product "Compellent Vol"
path_grouping_policy multibus
getuid_callout "/sbin/scsi_id -g -u -s /block/%n"
path_selector "round-robin 0"
path_checker tur
features "0"
hardware_handler "0"
failback immediate
rr_weight uniform
no_path_retry fail
rr_min_io 1000
}
}
multipaths {
multipath {
wwid 14f504e46494c45004932325a59732d757266762d4f486c59
alias nas1vote
}
multipath {
wwid 14f504e46494c45007469355367682d513879662d6d344836
alias nas1datafg11
}
multipath {
wwid 14f504e46494c4500764e577843332d547243472d37417072
alias nas1archfg11
}
multipath {
wwid 14f504e46494c450077706b396c302d6c74366d2d35316869
alias nas2datafg11
}
multipath {
wwid 14f504e46494c4500786945716b462d47746c4b2d73366f32
alias nas2vote
}
multipath {
wwid 14f504e46494c45003750566b57432d465255562d48716d6b
alias nas2archfg11
}
multipath {
wwid 14f504e46494c45007374663530432d62436f732d346a744e
alias nas1archfg12
}
multipath {
wwid 14f504e46494c450041456f6d71672d323366532d4e524e6d
alias nas1datafg12
}
multipath {
wwid 14f504e46494c45003547326658682d6c746d4f2d4f566957
alias nas2datafg12
}
multipath {
wwid 14f504e46494c45007663496732452d334d44612d3556774d
alias nas2archfg12
}
}

[root@node2 ~]# multipath -ll | grep nas
nas1vote (14f504e46494c45004932325a59732d757266762d4f486c59) dm-4 OPNFILER,VIRTUAL-DISK
nas1archfg12 (14f504e46494c45007374663530432d62436f732d346a744e) dm-8 OPNFILER,VIRTUAL-DISK
nas1archfg11 (14f504e46494c4500764e577843332d547243472d37417072) dm-11 OPNFILER,VIRTUAL-DISK
nas2vote (14f504e46494c4500786945716b462d47746c4b2d73366f32) dm-3 OPNFILER,VIRTUAL-DISK
nas2datafg12 (14f504e46494c45003547326658682d6c746d4f2d4f566957) dm-12 OPNFILER,VIRTUAL-DISK
nas2datafg11 (14f504e46494c450077706b396c302d6c74366d2d35316869) dm-6 OPNFILER,VIRTUAL-DISK
nas2archfg12 (14f504e46494c45007663496732452d334d44612d3556774d) dm-13 OPNFILER,VIRTUAL-DISK
nas2archfg11 (14f504e46494c45003750566b57432d465255562d48716d6b) dm-10 OPNFILER,VIRTUAL-DISK
nas1datafg12 (14f504e46494c450041456f6d71672d323366532d4e524e6d) dm-5 OPNFILER,VIRTUAL-DISK
nas1datafg11 (14f504e46494c45007469355367682d513879662d6d344836) dm-9 OPNFILER,VIRTUAL-DISK


4. 首次安装时共享盘的权限调整使用的/etc/rc.local,安装过程中发现此配置会有问题(见截图报错一,报错三),实际项目操作建议采用本文下面报错三处理方法中的方式处理


[root@node2 ~]# cat /etc/rc.local
#!/bin/sh
#
# This script will be executed *after* all the other init scripts.
# You can put your own initialization stuff in here if you don't
# want to do the full Sys V style init stuff.
touch /var/lock/subsys/local
chown oragridinstall /dev/mapper/nas1*
chown oragridinstall /dev/mapper/nas2*
chmod 0660 /dev/mapper/nas1*
chmod 0660 /dev/mapper/nas2*

[root@node2 ~]# /etc/rc.local
[root@node2 ~]# cd /dev
[root@node2 dev]# ls -l dm*
brw-rw----  1 root    disk     253, 0 4月  19 18:30 dm-0
brw-rw----  1 root    disk     253, 1 4月  19 18:30 dm-1
brw-rw----  1 root    disk     253, 2 4月  19 18:30 dm-2
brw-rw----  1 oragrid oinstall 253, 3 4月  19 21:08 dm-3
brw-rw----  1 oragrid oinstall 253, 4 4月  19 21:08 dm-4
brw-rw----  1 oragrid oinstall 253, 5 4月  19 21:08 dm-5
brw-rw----  1 root    disk     253, 6 4月  19 21:08 dm-6
brw-rw----  1 oragrid oinstall 253, 7 4月  19 21:08 dm-7
brw-rw----  1 oragrid oinstall 253, 8 4月  19 21:08 dm-8
brw-rw----  1 oragrid oinstall 253, 9 4月  19 21:08 dm-9
crw-rw----+ 1 root    audio     14, 9 4月  19 18:30 dmmidi





5.nfs server 节点node3主机上dd 创建nfs 仲裁文件




node3 nfs dd 创建 raw 文件


[root@node3 ~]# su - oragrid
[oragrid@node3 ~]$ cd /votedisk/
[oragrid@node3 votedisk]$ ls
[oragrid@node3 votedisk]$ pwd
/votedisk
[oragrid@node3 votedisk]$ su
Password:

[root@node3 votedisk]#  dd if=/dev/zero of=nfsvote  bs=1024k count=5120
5120+0 records in
5120+0 records out
5368709120 bytes (5.4 GB) copied, 126.308 s, 42.5 MB/s
[root@node3 votedisk]# ls -l
total 5242884
-rw-r--r--. 1 root root 5368709120 Apr 19 08:50 nfsvote

[root@node3 /]# cd /
[root@node3 /]# ls -l | grep votedisk
drwxr-xr-x.   2 oragrid oinstall  4096 Apr 19 08:48 votedisk

[root@node3 /]# cd /votedisk/
[root@node3 votedisk]# ls -l
total 5242884
-rw-r--r--. 1 root root 5368709120 Apr 19 08:50 nfsvote
[root@node3 votedisk]# chmod 660 nfsvote
[root@node3 votedisk]# chown oragridinstall nfsvote
[root@node3 votedisk]# ls -l
total 5242884
-rw-rw----. 1 oragrid oinstall 5368709120 Apr 19 08:50 nfsvote



6.调整rac 节点参数文件,创建用户及相关目录,步骤参考文档《北京电力指挥系统 Oracle 11g RAC For RHEL6.7 x86_64 安装(ASM) 实践操作》



7.上传软件安装grid,db仅安装软件,开始安装,简要描述详见截图文件夹
node1 上传软件

db 软件上传至 /home/oracle

grid 软件上传至 /home/grid


用对应用户解压,其他目录unzip sotwarename.zip  -d /home/oracle/
                        unzip softwarename.zip -d /home/grid/




         


操作台启动Xmanaged-Passive


node1 上开始安装

[root@node1 voting_disk]# export DISPLAY=192.168.1.1:0.0
[root@node1 voting_disk]# xhost +
access control disabled, clients can connect from any host
[root@node1 voting_disk]# su - oragrid
[oragrid@node1 ~]$ xhost +
access control disabled, clients can connect from any host
[oragrid@node1 ~]$ cd
[oragrid@node1 ~]$ cd grid
[oragrid@node1 grid]$ ls
install  readme.html  response  rpm  runcluvfy.sh  runInstaller  sshsetup  stage  welcome.html
[oragrid@node1 grid]$









8.安装grid过程中的问题处理


第一个报错:

安装节点执行/etc/rc.local



第二个报错:

调整 nfs mount 选项为下面,参考官方 Mount Options for Oracle files for RAC databases and Clusterware when used with NFS on NAS devices (Doc ID 359515.1)


rw,bg,hard,nointr,rsize=32768,wsize=32768,tcp,noac,vers=3,timeo=600,actimeo=0


第三个报错:

与第一个问题相同,也是执行过程中安装节点的共享盘权限变化导致,看来此处并不能
使用在另一个终端执行 /etc/rc.local 的方法解决

可以选择的方法有两个,
1.安装asmlib 包
2.udev

此处使用udev的方式

[root@node1 ~]# cd /dev/mapper
[root@node1 mapper]# ls -l | grep nas
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas1archfg11 -> ../dm-9
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas1archfg12 -> ../dm-6
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas1datafg11 -> ../dm-8
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas1datafg12 -> ../dm-5
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas1vote -> ../dm-3
lrwxrwxrwx 1 root root      8 4月  20 06:49 nas2archfg11 -> ../dm-11
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas2archfg12 -> ../dm-7
lrwxrwxrwx 1 root root      8 4月  20 06:49 nas2datafg11 -> ../dm-12
lrwxrwxrwx 1 root root      7 4月  20 06:49 nas2datafg12 -> ../dm-4
lrwxrwxrwx 1 root root      8 4月  20 06:49 nas2vote -> ../dm-10

nas1archfg11 nas1archfg12 nas1datafg11 nas1datafg12 nas1vote nas2archfg11 nas2archfg12 nas2datafg11 nas2datafg12 nas2vote


[root@node1 mapper]# for i in nas1archfg11 nas1archfg12 nas1datafg11 nas1datafg12 nas1vote nas2archfg11 nas2archfg12 nas2datafg11 nas2datafg12 nas2vote; do printf "%s %s\n" "$i" "$(udevadm info --query=all --name=/dev/mapper/$i | grep -i dm_uuid)"; done
nas1archfg11 E: DM_UUID=mpath-14f504e46494c4500764e577843332d547243472d37417072
nas1archfg12 E: DM_UUID=mpath-14f504e46494c45007374663530432d62436f732d346a744e
nas1datafg11 E: DM_UUID=mpath-14f504e46494c45007469355367682d513879662d6d344836
nas1datafg12 E: DM_UUID=mpath-14f504e46494c450041456f6d71672d323366532d4e524e6d
nas1vote E: DM_UUID=mpath-14f504e46494c45004932325a59732d757266762d4f486c59
nas2archfg11 E: DM_UUID=mpath-14f504e46494c45003750566b57432d465255562d48716d6b
nas2archfg12 E: DM_UUID=mpath-14f504e46494c45007663496732452d334d44612d3556774d
nas2datafg11 E: DM_UUID=mpath-14f504e46494c450077706b396c302d6c74366d2d35316869
nas2datafg12 E: DM_UUID=mpath-14f504e46494c45003547326658682d6c746d4f2d4f566957
nas2vote E: DM_UUID=mpath-14f504e46494c4500786945716b462d47746c4b2d73366f32

Create a file labeled 99-oracle-asmdevices.rules within /etc/udev/rules.d/Within 99-oracle-asmdevices.rules file, create rules for each device similar to the
                                                                     
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c4500764e577843332d547243472d37417072",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45007374663530432d62436f732d346a744e",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45007469355367682d513879662d6d344836",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c450041456f6d71672d323366532d4e524e6d",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45004932325a59732d757266762d4f486c59",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45003750566b57432d465255562d48716d6b",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45007663496732452d334d44612d3556774d",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c450077706b396c302d6c74366d2d35316869",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c45003547326658682d6c746d4f2d4f566957",OWNER="oragrid",GROUP="oinstall",MODE="0660"
KERNEL=="dm-*",ENV{DM_UUID}=="mpath-14f504e46494c4500786945716b462d47746c4b2d73366f32",OWNER="oragrid",GROUP="oinstall",MODE="0660"




执行start_udev


注销 /etc/rc.local中调整权限的脚本


完成后重新执行grid/grid_home/root.sh 执行成功

**************[root@node1 dev]# watch -n 1 'ls -l  dm*'  监控执行过程中权限是否变化的小命令



安装完GRID软件后,安装数据库软件(仅安装软件不建库)




此时查询一些信息:
9.安装完grid,db 后(仅安装软件)完成后

[oragrid@node1 ~]$ crsctl check crs
CRS-4638: Oracle High Availability Services is online
CRS-4537: Cluster Ready Services is online
CRS-4529: Cluster Synchronization Services is online
CRS-4533: Event Manager is online
[oragrid@node1 ~]$ crsctl status res -t
--------------------------------------------------------------------------------
NAME           TARGET  STATE        SERVER                   STATE_DETAILS      
--------------------------------------------------------------------------------
Local Resources
--------------------------------------------------------------------------------
ora.LISTENER.lsnr
               ONLINE  ONLINE       node1                                       
               ONLINE  ONLINE       node2                                       
ora.OCRVOTE.dg
               ONLINE  ONLINE       node1                                       
               ONLINE  ONLINE       node2                                       
ora.asm
               ONLINE  ONLINE       node1                    Started            
               ONLINE  ONLINE       node2                    Started            
ora.gsd
               OFFLINE OFFLINE      node1                                       
               OFFLINE OFFLINE      node2                                       
ora.net1.network
               ONLINE  ONLINE       node1                                       
               ONLINE  ONLINE       node2                                       
ora.ons
               ONLINE  ONLINE       node1                                       
               ONLINE  ONLINE       node2                                       
ora.registry.acfs
               ONLINE  ONLINE       node1                                       
               ONLINE  ONLINE       node2                                       
--------------------------------------------------------------------------------
Cluster Resources
--------------------------------------------------------------------------------
ora.LISTENER_SCAN1.lsnr
      1        ONLINE  ONLINE       node2                                       
ora.cvu
      1        ONLINE  ONLINE       node1                                       
ora.node1.vip
      1        ONLINE  ONLINE       node1                                       
ora.node2.vip
      1        ONLINE  ONLINE       node2                                       
ora.oc4j
      1        ONLINE  ONLINE       node1                                       
ora.scan1.vip
      1        ONLINE  ONLINE       node2                                       
[oragrid@node1 ~]$ asmcmd
ASMCMD> lsdsk
Path
/dev/mapper/nas1vote
/dev/mapper/nas2vote
/voting_disk/nfsvote
ASMCMD> lsdg
State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
MOUNTED  NORMAL  N         512   4096  1048576     16192    15266             5728            4769              0             Y  OCRVOTE/
ASMCMD>



9.开始手工创建asm 磁盘组 node1 节点创建

data 磁盘组(冗余度:正常)

data fg1 nas1datafg11 11296M
         nas1dagafg12
     
     fg2 nas2datafg11
         nas2datafg12
         

arch 磁盘组(冗余度:正常)
         
arch fg1 nas1archfg11 7456M
         nas1archfg12
         
     fg2 nas2archfg11
         nas2archfg12
         



         
create diskgroup data normal redundancy
failgroup fg1 disk '/dev/mapper/nas1datafg11','/dev/mapper/nas1datafg12'
failgroup fg2 disk '/dev/mapper/nas2datafg11','/dev/mapper/nas2datafg12';


create diskgroup arch normal redundancy
failgroup fg1 disk '/dev/mapper/nas1archfg11','/dev/mapper/nas1archfg12'
failgroup fg2 disk '/dev/mapper/nas2archfg11','/dev/mapper/nas2archfg12';



过程:
node1;
[oragrid@node1 ~]$ whoami
oragrid
[oragrid@node1 ~]$ echo $ORACLE_SID
+ASM1
[oragrid@node1 ~]$ sqlplus /nolog

SQL*Plus: Release 11.2.0.4.0 Production on Thu Apr 20 08:43:21 2017

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

SQL> connect /as sysasm
Connected.
SQL> create diskgroup data normal redundancy
failgroup fg1 disk '/dev/mapper/nas1datafg11','/dev/mapper/nas1datafg12'
failgroup fg2 disk '/dev/mapper/nas2datafg11','/dev/mapper/nas2datafg12';  2    3  

Diskgroup created.

SQL>
SQL>
SQL>
SQL>
SQL>
SQL> create diskgroup arch normal redundancy
failgroup fg1 disk '/dev/mapper/nas1archfg11','/dev/mapper/nas1archfg12'
failgroup fg2 disk '/dev/mapper/nas2archfg11','/dev/mapper/nas2archfg12';  2    3  

Diskgroup created.

SQL>


SQL> exit
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Real Application Clusters and Automatic Storage Management options
[oragrid@node1 ~]$ asmcmd
ASMCMD> lsdg
State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
MOUNTED  NORMAL  N         512   4096  1048576     30912    30806             8544           11131              0             N  ARCH/
MOUNTED  NORMAL  N         512   4096  1048576     46464    46358            12576           16891              0             N  DATA/
MOUNTED  NORMAL  N         512   4096  1048576     16192    15266             5728            4769              0             Y  OCRVOTE/
ASMCMD> lsdsk
Path
/dev/mapper/nas1archfg11
/dev/mapper/nas1archfg12
/dev/mapper/nas1datafg11
/dev/mapper/nas1datafg12
/dev/mapper/nas1vote
/dev/mapper/nas2archfg11
/dev/mapper/nas2archfg12
/dev/mapper/nas2datafg11
/dev/mapper/nas2datafg12
/dev/mapper/nas2vote
/voting_disk/nfsvote
ASMCMD>

node2:

[oragrid@node2 ~]$ sqlplus /nolog

SQL*Plus: Release 11.2.0.4.0 Production on Thu Apr 20 08:45:04 2017

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

SQL> connect /as sysasm
Connected.
SQL> alter diskgroup data mount;

Diskgroup altered.

SQL> alter diskgroup arch mount;

Diskgroup altered.

SQL> exit
Disconnected from Oracle Database 11g Enterprise Edition Release 11.2.0.4.0 - 64bit Production
With the Real Application Clusters and Automatic Storage Management options
[oragrid@node2 ~]$ asmcmd
ASMCMD> lsdg     
State    Type    Rebal  Sector  Block       AU  Total_MB  Free_MB  Req_mir_free_MB  Usable_file_MB  Offline_disks  Voting_files  Name
MOUNTED  NORMAL  N         512   4096  1048576     30912    30722             8544           11089              0             N  ARCH/
MOUNTED  NORMAL  N         512   4096  1048576     46464    46274            12576           16849              0             N  DATA/
MOUNTED  NORMAL  N         512   4096  1048576     16192    15266             5728            4769              0             Y  OCRVOTE/
ASMCMD> lsdsk
Path
/dev/mapper/nas1archfg11
/dev/mapper/nas1archfg12
/dev/mapper/nas1datafg11
/dev/mapper/nas1datafg12
/dev/mapper/nas1vote
/dev/mapper/nas2archfg11
/dev/mapper/nas2archfg12
/dev/mapper/nas2datafg11
/dev/mapper/nas2datafg12
/dev/mapper/nas2vote
/voting_disk/nfsvote
ASMCMD>





10.调整asm 实例参数
alter system set asm_preferred_read_failure_groups='DATA.FG1','ARCH.FG1' scope=both sid='+ASM1';

alter system set asm_preferred_read_failure_groups='DATA.FG2','ARCH.FG2' scope=both sid='+ASM2';


node1 上调整

[oragrid@node1 ~]$ sqlplus /nolog

SQL*Plus: Release 11.2.0.4.0 Production on Thu Apr 20 08:47:00 2017

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

SQL> connect /as sysasm
Connected.
SQL> show parameter asm_pre            

NAME                                     TYPE         VALUE
------------------------------------ ----------- ------------------------------
asm_preferred_read_failure_groups    string
SQL> alter system set asm_preferred_read_failure_groups='DATA.FG1','ARCH.FG1' scope=both sid='+ASM1';

System altered.

SQL> alter system set asm_preferred_read_failure_groups='DATA.FG2','ARCH.FG2' scope=both sid='+ASM2';

System altered.

node1 验证:
SQL> show parameter asm_pre

NAME                                     TYPE         VALUE
------------------------------------ ----------- ------------------------------
asm_preferred_read_failure_groups    string         DATA.FG1, ARCH.FG1
SQL>

node2验证:
[oragrid@node2 ~]$ sqlplus /nolog

SQL*Plus: Release 11.2.0.4.0 Production on Thu Apr 20 08:52:00 2017

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

SQL> connect /as sysasm
Connected.
SQL> show parameter asm_pre

NAME                                     TYPE         VALUE
------------------------------------ ----------- ------------------------------
asm_preferred_read_failure_groups    string         DATA.FG2, ARCH.FG2
SQL>



11.调整asm磁盘组兼容性

先确认

[oragrid@node2 ~]$ sqlplus /nolog

SQL*Plus: Release 11.2.0.4.0 Production on Thu Apr 20 08:57:38 2017

Copyright (c) 1982, 2013, Oracle.  All rights reserved.

SQL> connect /as sysasm
Connected.
SQL> col name for a20;
SQL> set linesize 200;
SQL> select name,compatibility from v$asm_diskgroup;

NAME                     COMPATIBILITY
-------------------- ------------------------------------------------------------
OCRVOTE              11.2.0.0.0
DATA                     10.1.0.0.0
ARCH                     10.1.0.0.0


开始调整(一个节点调整即可):




DATADG::
alter diskgroup data set attribute 'compatible.asm' = '11.2.0.0.0';
alter diskgroup data set attribute 'compatible.rdbms' = '11.2.0.0.0';
FRADG:
alter diskgroup arch set attribute 'compatible.asm' = '11.2.0.0.0';
alter diskgroup arch set attribute 'compatible.rdbms' = '11.2.0.0.0';
OCRDG:
alter diskgroup ocrdg set attribute 'compatible.rdbms' = '11.2.0.0.0'; ######这个不必调整

过程:

SQL> connect /as sysasm
Connected.
SQL> col name for a20;
SQL> set linesize 200;
SQL> select name,compatibility from v$asm_diskgroup;

NAME                     COMPATIBILITY
-------------------- ------------------------------------------------------------
OCRVOTE              11.2.0.0.0
DATA                     10.1.0.0.0
ARCH                     10.1.0.0.0

SQL> alter diskgroup data set attribute 'compatible.asm' = '11.2.0.0.0';

Diskgroup altered.

SQL> alter diskgroup data set attribute 'compatible.rdbms' = '11.2.0.0.0';

Diskgroup altered.

SQL> alter diskgroup arch set attribute 'compatible.asm' = '11.2.0.0.0';
alter diskgroup arch set attribute 'compatible.rdbms' = '11.2.0.0.0';


Diskgroup altered.

SQL>

Diskgroup altered.

SQL> SQL> SQL> select name,compatibility from v$asm_diskgroup;

NAME                     COMPATIBILITY
-------------------- ------------------------------------------------------------
OCRVOTE              11.2.0.0.0
DATA                     11.2.0.0.0
ARCH                     11.2.0.0.0

SQL>



SQL> show parameter asm_power

NAME                                     TYPE         VALUE
------------------------------------ ----------- ------------------------------
asm_power_limit                      integer         1
SQL> show parameter instance;

NAME                                     TYPE         VALUE
------------------------------------ ----------- ------------------------------
cluster_database_instances             integer         4
instance_name                             string         +ASM2
instance_number                      integer         2
instance_type                             string         asm




12.开始建库:


建库完成后查询

[oragrid@node2 ~]$ crs_stat -t
Name           Type           Target    State     Host        
------------------------------------------------------------
ora.ARCH.dg    ora....up.type ONLINE    ONLINE    node1      
ora.DATA.dg    ora....up.type ONLINE    ONLINE    node1      
ora....ER.lsnr ora....er.type ONLINE    ONLINE    node1      
ora....N1.lsnr ora....er.type ONLINE    ONLINE    node2      
ora.OCRVOTE.dg ora....up.type ONLINE    ONLINE    node1      
ora.asm        ora.asm.type   ONLINE    ONLINE    node1      
ora.cvu        ora.cvu.type   ONLINE    ONLINE    node1      
ora.gsd        ora.gsd.type   OFFLINE   OFFLINE               
ora....network ora....rk.type ONLINE    ONLINE    node1      
ora....SM1.asm application    ONLINE    ONLINE    node1      
ora....E1.lsnr application    ONLINE    ONLINE    node1      
ora.node1.gsd  application    OFFLINE   OFFLINE               
ora.node1.ons  application    ONLINE    ONLINE    node1      
ora.node1.vip  ora....t1.type ONLINE    ONLINE    node1      
ora....SM2.asm application    ONLINE    ONLINE    node2      
ora....E2.lsnr application    ONLINE    ONLINE    node2      
ora.node2.gsd  application    OFFLINE   OFFLINE               
ora.node2.ons  application    ONLINE    ONLINE    node2      
ora.node2.vip  ora....t1.type ONLINE    ONLINE    node2      
ora.oc4j       ora.oc4j.type  ONLINE    ONLINE    node1      
ora.ons        ora.ons.type   ONLINE    ONLINE    node1      
ora.orcc.db    ora....se.type ONLINE    ONLINE    node1      
ora....ry.acfs ora....fs.type ONLINE    ONLINE    node1      
ora.scan1.vip  ora....ip.type ONLINE    ONLINE    node2      
[oragrid@node2 ~]$
[oragrid@node2 ~]$
[oragrid@node2 ~]$
[oragrid@node2 ~]$ srvctl status database -d orcc
实例 orcc1 正在节点 node1 上运行
实例 orcc2 正在节点 node2 上运行
[oragrid@node2 ~]$

13.建库完成后测试见截图最后部分







SQL> ALTER DISKGROUP DATA ADD FAILGROUP SAN2 DISK
'/dev/rdsk/c3t19d3s4' FORCE,'/dev/rdsk/c3t19d4s4' FORCE,
'/dev/rdsk/c3t19d5s4' FORCE, '/dev/rdsk/c3t19d6s4' FORCE REBALANCE POWER <power number 1-11>;



SQL> connect /as sysasm
Connected.
alter diskgroup data add failgroup fg2 disk '/dev/mapper/nas2datafg11' force,'/dev/mapper/nas2datafg12' force rebalance power 10;



alter diskgroup arch add failgroup fg2 disk '/dev/mapper/nas2archfg11' force,'/dev/mapper/nas2archfg12' force rebalance power 10;


[oragrid@node2 ~]$ crsctl query css votedisk
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
1. ONLINE   8de708933a684f61bf045fa63959a66f (/dev/mapper/nas2vote) [OCRVOTE]
2. ONLINE   8d2309a2e3774fbdbf0fc1b96d3678c1 (/voting_disk/nfsvote) [OCRVOTE]
Located 2 voting disk(s).

alter diskgroup ocrvote add disk '/dev/mapper/nas2vote' force;



SQL> connect /as sysasm
Connected.
alter diskgroup data add failgroup fg1 disk '/dev/mapper/nas1datafg11' force ,'/dev/mapper/nas1datafg12' force rebalance power 10;



alter diskgroup arch add failgroup fg1 disk '/dev/mapper/nas1archfg11' force,'/dev/mapper/nas1archfg12' force rebalance power 10;


[oragrid@node2 ~]$ crsctl query css votedisk
##  STATE    File Universal Id                File Name Disk group
--  -----    -----------------                --------- ---------
1. ONLINE   8de708933a684f61bf045fa63959a66f (/dev/mapper/nas2vote) [OCRVOTE]
2. ONLINE   8d2309a2e3774fbdbf0fc1b96d3678c1 (/voting_disk/nfsvote) [OCRVOTE]
Located 2 voting disk(s).

alter diskgroup ocrvote add disk '/dev/mapper/nas2vote' force;
标签: 暂无标签
dongxujian

写了 86 篇文章,拥有财富 384,被 13 人关注

转播转播 分享分享 分享淘帖
回复

使用道具

您需要登录后才可以回帖 登录 | 加入社区

本版积分规则

意见
反馈