rpm -ivh oracleasm* to install 3 ASMLIB RPM packages
In the first node of the cluster:
/etc/init.d/oracleasm configure to configure the driver
/etc/init.d/oracleasm createdisk ORADATA_DISK01 /dev/emcpowera1 to mark the disk as ASM disk
/etc/init.d/oracleasm querydisk /dev/emcpowera1 to show the mapping between ASM disks and the device
From the remaining nodes:
/etc/init.d/oracleasm scandisks to detect the configured disks
/etc/sysconfig/oracleasm is modified to match and exclude the disk naming partterns
# ORACLEASM_SCANORDER: Matching patterns to order disk scanning
ORACLEASM_SCANORDER="emcpower"
# ORACLEASM_SCANEXCLUDE: Matching patterns to exclude disks from scan
ORACLEASM_SCANEXCLUDE="sd"
Some Note about Configuring SAN Disks
1) To check HBA driver version and supported
cat /etc/modprobe.conf
alias scsi_hostadapter1 lpfc
alias scsi_hostadapter2 lpfc
modinfo lpfc | more
filename: /lib/modules/2.6.18-194.el5/kernel/drivers/scsi/lpfc/lpfc.ko
version: 0:8.2.0.63.3p
author: Emulex Corporation - tech.support@emulex.com
description: Emulex LightPulse Fibre Channel SCSI driver 8.2.0.63.3p
license: GPL
srcversion: CABE989E4BB61146A4D4D5D
2) Check LUNs -cat /proc/scsi/scsi | more
3) Partitioning Disks: Should be run on one node only when actually partiioning the disks. Then on the other nodes, you can run partprobe to update the partition table or you can reboot the servers to update the partition table changes.
If you prepare the partitions for ASM with udev, you can define rules and in the configuration file /etc/udev/udev.conf../etc/udev/rules.d
-rw-r--r-- 1 root root 255 Jun 15 2010 89-raw_permissions.rules
-rw-r--r-- 1 root root 600 Jun 15 2010 60-raw.rules
[root@lltcind01 rules.d]# more 89-raw_permissions.rules
#OCR
KERNEL=="raw1",OWNER="root", GROUP="oinstall", MODE="640"
#Votingdisk
KERNEL=="raw2",OWNER="oracle", GROUP="oinstall", MODE="640"
KERNEL=="raw3",OWNER="oracle", GROUP="oinstall", MODE="640"
KERNEL=="raw4",OWNER="oracle", GROUP="oinstall", MODE="640"
[root@lltcind01 rules.d]# more 60-raw.rules
# Enter raw device bindings here.
#
# An example would be:
# ACTION=="add", KERNEL=="sda", RUN+="/bin/raw /dev/raw/raw1 %N"
# to bind /dev/raw/raw1 to /dev/sda, or
# ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="1", RUN+="/bin/raw /dev/raw/raw2 %M %m"
# to bind /dev/raw/raw2 to the device with major 8, minor 1.
ACTION=="add", KERNEL=="emcpoweras1", RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add", KERNEL=="emcpowerat1", RUN+="/bin/raw /dev/raw/raw2 %N"
Then you can reoload the udev rules and restart udev via "udevcontrol reload_rules, start_dev
cat /etc/modprobe.conf
alias scsi_hostadapter1 lpfc
alias scsi_hostadapter2 lpfc
modinfo lpfc | more
filename: /lib/modules/2.6.18-194.el5/kernel/drivers/scsi/lpfc/lpfc.ko
version: 0:8.2.0.63.3p
author: Emulex Corporation - tech.support@emulex.com
description: Emulex LightPulse Fibre Channel SCSI driver 8.2.0.63.3p
license: GPL
srcversion: CABE989E4BB61146A4D4D5D
2) Check LUNs -cat /proc/scsi/scsi | more
3) Partitioning Disks: Should be run on one node only when actually partiioning the disks. Then on the other nodes, you can run partprobe to update the partition table or you can reboot the servers to update the partition table changes.
If you prepare the partitions for ASM with udev, you can define rules and in the configuration file /etc/udev/udev.conf../etc/udev/rules.d
-rw-r--r-- 1 root root 255 Jun 15 2010 89-raw_permissions.rules
-rw-r--r-- 1 root root 600 Jun 15 2010 60-raw.rules
[root@lltcind01 rules.d]# more 89-raw_permissions.rules
#OCR
KERNEL=="raw1",OWNER="root", GROUP="oinstall", MODE="640"
#Votingdisk
KERNEL=="raw2",OWNER="oracle", GROUP="oinstall", MODE="640"
KERNEL=="raw3",OWNER="oracle", GROUP="oinstall", MODE="640"
KERNEL=="raw4",OWNER="oracle", GROUP="oinstall", MODE="640"
[root@lltcind01 rules.d]# more 60-raw.rules
# Enter raw device bindings here.
#
# An example would be:
# ACTION=="add", KERNEL=="sda", RUN+="/bin/raw /dev/raw/raw1 %N"
# to bind /dev/raw/raw1 to /dev/sda, or
# ACTION=="add", ENV{MAJOR}=="8", ENV{MINOR}=="1", RUN+="/bin/raw /dev/raw/raw2 %M %m"
# to bind /dev/raw/raw2 to the device with major 8, minor 1.
ACTION=="add", KERNEL=="emcpoweras1", RUN+="/bin/raw /dev/raw/raw1 %N"
ACTION=="add", KERNEL=="emcpowerat1", RUN+="/bin/raw /dev/raw/raw2 %N"
Then you can reoload the udev rules and restart udev via "udevcontrol reload_rules, start_dev
ASM With Connected Clients
You cannot shutdown ASM instance when the OCR is stored on a diskgroup with error ORA-15097.
+ASM1 > srvctl stop diskgroup -g GI_VOTE
Although the srvctl reports the ASM OCR/vote diskgroup is stopped, but it still accessible and in quiesced state.
SQL> select name, state from v$asm_diskgroup;
NAME STATE
------------------------------ -----------
GI_VOTE QUIESCING
Or completely shutdown ASM, you need to stop Grid Infrastructure, you can use "crsctl stop crs" / each node or "crsctl stop crs all" to shutdown ASM from all nodes.
In Oracle 11.2, the dependency between local ASM and database instance has been redefined
crsctl status resource ora.DATAoc.db -p
START_DEPENDENCIES=hard(ora.DATA_PD101.dg,ora.DATA_PF101.dg) pullup(ora.DATA_PD101.dg,ora.DATA_PF101.dg)
STOP_DEPENDENCIES=hard(intermediate:ora.asm,shutdown:ora.DATA_PD101.dg,shutdown:ora.DATA_PF101.dg)
You should use srvctl start diskgroup -g diskgroup since ASM is now resouce within the Grid infrastructure
srvctl start diskgroup -g GI_VOTE -n lltcind01
+ASM1 > srvctl stop diskgroup -g GI_VOTE
Although the srvctl reports the ASM OCR/vote diskgroup is stopped, but it still accessible and in quiesced state.
SQL> select name, state from v$asm_diskgroup;
NAME STATE
------------------------------ -----------
GI_VOTE QUIESCING
Or completely shutdown ASM, you need to stop Grid Infrastructure, you can use "crsctl stop crs" / each node or "crsctl stop crs all" to shutdown ASM from all nodes.
In Oracle 11.2, the dependency between local ASM and database instance has been redefined
crsctl status resource ora.DATAoc.db -p
START_DEPENDENCIES=hard(ora.DATA_PD101.dg,ora.DATA_PF101.dg) pullup(ora.DATA_PD101.dg,ora.DATA_PF101.dg)
STOP_DEPENDENCIES=hard(intermediate:ora.asm,shutdown:ora.DATA_PD101.dg,shutdown:ora.DATA_PF101.dg)
You should use srvctl start diskgroup -g diskgroup since ASM is now resouce within the Grid infrastructure
srvctl start diskgroup -g GI_VOTE -n lltcind01
DBMS_SQLDIAG.export_sql_testcase, DBMS_SQLDIAG.import_sql_testcase
In some situation you need to reproduce a SQL failure on a different machine for diagnosis or provide to Oracle Support. Oracle offers the SQL Test Case Builder to reproduce a SQL error via EXPORT_SQL_TESTCASE and IMPORT_SQL_TESTCASE in the DBMS_SQLDIAG package. Oracle creates the SQL test case as a script that contains SQL statements that will re-create the database objects with runtime information. In addition, information will be captured are the SQL text, table data (optional), the execution plan, optimizer statistics, bind variables, user privileges, SQL profile, metadata, runtime information, etc..
EXPORT:
SQL> create or replace directory mysql as '/data/oracle/bkup/test/';
Directory created.
SQL> grant read,write on directory mysql to anguyen_dba;
Grant succeeded.
SQL> grant dba to anguyen_dba;
Grant succeeded.
1 declare mycase clob;
2 begin
3 dbms_sqldiag.export_sql_testcase
4 (directory => 'MYSQL',
5 sql_text =>'select * from rodba.edba_vmstats where rownum <=100 order by 1',
6 user_name => 'ANGUYEN_DBA',
7 exportData => TRUE,
8 testcase => mycase
9 );
10* end;
/
PL/SQL procedure successfully completed.
The export process create several files in directory MYSQL. By default, Oracle doesn't export the data. You can set this parameter to TRUE to get the data. The file name oratcb1_00E2093D0001main.xml contains the metadata for the test case.
IMPORT:
SQL> create or replace directory mysql as '/data/oracle/bkup/test/';
Directory created.
SQL> grant read,write on directory mysql to anguyen_dba;
Grant succeeded.
SQL> grant dba to anguyen_dba;
Grant succeeded.
SQL> connect anguyen_dba
Enter password:
Connected.
SQL> begin
2 dbms_sqldiag.import_sql_testcase
3 (directory => 'MYSQL',
4 filename =>'oratcb1_00E2093D0001main.xml',
5 importDATA => TRUE);
6 end;
7 /
PL/SQL procedure successfully completed.
DBMS_SPACE.CREATE_INDEX_COST
You can use the procedure dbms_space.create_index_cost to estimate how much space will take place. In the example below, the used_bytes is how much space is for the index data and the alloc_bytes is how much space is allocated within the tablespace.
SQL> set serverout on
SQL> exec dbms_stats.gather_table_stats('RODBA','EDBA_VMSTATS');
PL/SQL procedure successfully completed.
SQL> variable used_bytes number
SQL> variable alloc_bytes number
SQL> exec dbms_space.create_index_cost('create index RODBA.EDBA_VMSTAT_IX2 on RODBA.EDBA_VMSTATS(runqueue, blocked,cpu_user)',:used_bytes, :alloc_bytes);
PL/SQL procedure successfully completed.
SQL> print :used_bytes
USED_BYTES
----------
276723
SQL> print :alloc_bytes
ALLOC_BYTES
-----------
786432
SQL> set serverout on
SQL> exec dbms_stats.gather_table_stats('RODBA','EDBA_VMSTATS');
PL/SQL procedure successfully completed.
SQL> variable used_bytes number
SQL> variable alloc_bytes number
SQL> exec dbms_space.create_index_cost('create index RODBA.EDBA_VMSTAT_IX2 on RODBA.EDBA_VMSTATS(runqueue, blocked,cpu_user)',:used_bytes, :alloc_bytes);
PL/SQL procedure successfully completed.
SQL> print :used_bytes
USED_BYTES
----------
276723
SQL> print :alloc_bytes
ALLOC_BYTES
-----------
786432
What's New for Grid Infrastructure in 11.2.0.2
1. Software Update before install or upgrade - new installer screen will allow you to download software updates, bug fixes, and Oracle patches from My Oracle Support.
2. Automatic NIC Bonding for Private Networks: The clusterware has the ability to automatically bond private interconnect interfaces
3. Cluster Health Monitor: It's now delivered with Grid Infrastructure
4. Grid Installation Owner Group Requirements: The grid installation owner no longer has to be a member of the group designated for ASMOPER if one has been created.
5. Out of Place Install for Patchset Releases: New installation and out of place so it can be patched easily prior to being applied to reduce downtime. The Grid home at least must have 5.5 GB.
6. Clusterware configuration Wizard
7. Automatic CVU checks: The CVU is managed by CRSD to report any problems if sees in the cluster setup. You can check the frequency via "srvctl config cvu"
2. Automatic NIC Bonding for Private Networks: The clusterware has the ability to automatically bond private interconnect interfaces
3. Cluster Health Monitor: It's now delivered with Grid Infrastructure
4. Grid Installation Owner Group Requirements: The grid installation owner no longer has to be a member of the group designated for ASMOPER if one has been created.
5. Out of Place Install for Patchset Releases: New installation and out of place so it can be patched easily prior to being applied to reduce downtime. The Grid home at least must have 5.5 GB.
6. Clusterware configuration Wizard
7. Automatic CVU checks: The CVU is managed by CRSD to report any problems if sees in the cluster setup. You can check the frequency via "srvctl config cvu"
Creating a Disk Group via ASMCMD
Confirm that your system can discover the disks that are under ASMLib control:
+ASM1 - oracle: oracleasm-discover
Using ASMLib from /opt/oracle/extapi/64/asm/orcl/1/libasm.so
[ASM Library - Generic Linux, version 2.0.4 (KABI_V2)]
Discovered disk: ORCL:AN_GRID01_DISK1 [10475208 blocks (5363306496 bytes), maxio 256]
Discovered disk: ORCL:AN_GRID01_DISK2 [10475208 blocks (5363306496 bytes), maxio 256]
Discovered disk: ORCL:AN_GRID01_DISK3 [10475208 blocks (5363306496 bytes), maxio 256]
Discovered disk: ORCL:AN_DATA101_DISK1 [1073728213 blocks (549748845056 bytes), maxio 256]
ASMCMD> lsdsk --candidate
Path
ORCL:ORADATA_DD501_DISK01
ORCL:ORADATA_DD501_DISK02
ORCL:ORADATA_DD501_DISK03
ORCL:ORADATA_FD501_DISK01
ORCL:ORADATA_FD501_DISK02
Build an XML file to present to ASMCMD for creating the diskgroup. The mkdg (make diskgroup) command in ASMCMD requires that an XML file to be used to pass the required parameters.
Here is an example:
cat /apps/work/an/DG_DBA101.xml
<dg name="DG_DBA101" redundancy="external">
<dsk string="ORCL:ORADATA_DD501_DISK01" />
<dsk string="ORCL:ORADATA_DD501_DISK02" />
<dsk string="ORCL:ORADATA_DD501_DISK03" />
<a name="compatible.asm" value="11.2"/>
<a name="compatible.rdbms" value="11.2"/>
</dg>
To create a diskgroup:
ASMCMD > mkdg /apps/work/an/DG_DBA101.xml
As of Oracle 11.1, the au_size = 4MB is recomended. You need to setup partition aligment for your LUNs.
1MB partition alignment for ocr and vote disk diskgroup.
4MB partition alignment for DATA and FRA diskgroups
For example: To verify the partition aligment, execute the following:
sfdisk -uS -l /dev/emcpowera1Each sector is 512 bytes so you should have 8192 for 4MB and 2048 for 1MB.
To create a diskgroup with 4MB partition
<dg name="DG_DBA102" redundancy="external">
<dsk string="ORCL:ORADATA_DD501_DISK05" />
<dsk string="ORCL:ORADATA_DD501_DISK06" />
<a name="compatible.asm" value="11.2"/>
<a name="compatible.rdbms" value="11.2"/>
<a name="au_size" value="4M" />
</dg>
+ASM1 - oracle: oracleasm-discover
Using ASMLib from /opt/oracle/extapi/64/asm/orcl/1/libasm.so
[ASM Library - Generic Linux, version 2.0.4 (KABI_V2)]
Discovered disk: ORCL:AN_GRID01_DISK1 [10475208 blocks (5363306496 bytes), maxio 256]
Discovered disk: ORCL:AN_GRID01_DISK2 [10475208 blocks (5363306496 bytes), maxio 256]
Discovered disk: ORCL:AN_GRID01_DISK3 [10475208 blocks (5363306496 bytes), maxio 256]
Discovered disk: ORCL:AN_DATA101_DISK1 [1073728213 blocks (549748845056 bytes), maxio 256]
ASMCMD> lsdsk --candidate
Path
ORCL:ORADATA_DD501_DISK01
ORCL:ORADATA_DD501_DISK02
ORCL:ORADATA_DD501_DISK03
ORCL:ORADATA_FD501_DISK01
ORCL:ORADATA_FD501_DISK02
Build an XML file to present to ASMCMD for creating the diskgroup. The mkdg (make diskgroup) command in ASMCMD requires that an XML file to be used to pass the required parameters.
Here is an example:
cat /apps/work/an/DG_DBA101.xml
<dg name="DG_DBA101" redundancy="external">
<dsk string="ORCL:ORADATA_DD501_DISK01" />
<dsk string="ORCL:ORADATA_DD501_DISK02" />
<dsk string="ORCL:ORADATA_DD501_DISK03" />
<a name="compatible.asm" value="11.2"/>
<a name="compatible.rdbms" value="11.2"/>
</dg>
To create a diskgroup:
ASMCMD > mkdg /apps/work/an/DG_DBA101.xml
As of Oracle 11.1, the au_size = 4MB is recomended. You need to setup partition aligment for your LUNs.
1MB partition alignment for ocr and vote disk diskgroup.
4MB partition alignment for DATA and FRA diskgroups
For example: To verify the partition aligment, execute the following:
sfdisk -uS -l /dev/emcpowera1Each sector is 512 bytes so you should have 8192 for 4MB and 2048 for 1MB.
To create a diskgroup with 4MB partition
<dg name="DG_DBA102" redundancy="external">
<dsk string="ORCL:ORADATA_DD501_DISK05" />
<dsk string="ORCL:ORADATA_DD501_DISK06" />
<a name="compatible.asm" value="11.2"/>
<a name="compatible.rdbms" value="11.2"/>
<a name="au_size" value="4M" />
</dg>
OCR v.s OLR
The OCR holds the metadata and wallets for all the resounces under the CRSD statck and its agents. OCR still hol information about local resources, but it's not required to be accesible to join the cluster. It includes information about the state of resouces, how to stop/start resources. At least one OCR must exist, but you can have it up to five copies. In 11gR2, OCR can be stored in ASM. The OCR location can be found in /etc/oracle/ocr.loc
+ASM1 - oracle: ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 3528
Available space (kbytes) : 258592
ID : 1710014838
Device/File Name : +OV
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check bypassed due to non-privileged user
From the output above, there are 5 slots, but one is used.
The OLR holds metadata for the local nodeand GPnP profile. It's managed by the OHASD and also manages the low-level processes required to allow a node to join a cluster. None of the OLR data is shared.
- root: ocrcheck -local
Status of Oracle Local Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 2572
Available space (kbytes) : 259548
ID : 272464046
Device/File Name : /apps/grid/11.2.0/grid/cdata/dbplno155.olr
Device/File integrity check succeeded
Local registry integrity check succeeded
Logical corruption check succeeded
or you can view it in /etc/oracle/olr.loc
+ASM1 - oracle: ocrcheck
Status of Oracle Cluster Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 3528
Available space (kbytes) : 258592
ID : 1710014838
Device/File Name : +OV
Device/File integrity check succeeded
Device/File not configured
Device/File not configured
Device/File not configured
Device/File not configured
Cluster registry integrity check succeeded
Logical corruption check bypassed due to non-privileged user
From the output above, there are 5 slots, but one is used.
The OLR holds metadata for the local nodeand GPnP profile. It's managed by the OHASD and also manages the low-level processes required to allow a node to join a cluster. None of the OLR data is shared.
- root: ocrcheck -local
Status of Oracle Local Registry is as follows :
Version : 3
Total space (kbytes) : 262120
Used space (kbytes) : 2572
Available space (kbytes) : 259548
ID : 272464046
Device/File Name : /apps/grid/11.2.0/grid/cdata/dbplno155.olr
Device/File integrity check succeeded
Local registry integrity check succeeded
Logical corruption check succeeded
or you can view it in /etc/oracle/olr.loc
Detect Database Corruption
**Check logical clause means that RMAN will check for logical corruption only. If RMAN finds one or more data blocks are corrupted, it generates a trace and also listed in the v$database_block_corrupted.
RMAN> run {
allocate channel d1 type disk;
allocate channel d4 type disk;
backup validate check logical database;
}
**The corrupted blocks are listed in the view v$database_block_corruption:
SQL> select * from v$database_block_corruption;
**The segment names are listed in the query below:
SELECT e.owner, e.segment_type, e.segment_name, e.partition_name, c.file#
, greatest(e.block_id, c.block#) corr_start_block#
, least(e.block_id+e.blocks-1, c.block#+c.blocks-1) corr_end_block#
, least(e.block_id+e.blocks-1, c.block#+c.blocks-1)
- greatest(e.block_id, c.block#) + 1 blocks_corrupted
, null description
FROM dba_extents e, v$database_block_corruption c
WHERE e.file_id = c.file#
AND e.block_id <= c.block# + c.blocks - 1
AND e.block_id + e.blocks - 1 >= c.block#
UNION
SELECT s.owner, s.segment_type, s.segment_name, s.partition_name, c.file#
, header_block corr_start_block#
, header_block corr_end_block#
, 1 blocks_corrupted
, 'Segment Header' description
FROM dba_segments s, v$database_block_corruption c
WHERE s.header_file = c.file#
AND s.header_block between c.block# and c.block# + c.blocks - 1
UNION
SELECT null owner, null segment_type, null segment_name, null partition_name, c.file#
, greatest(f.block_id, c.block#) corr_start_block#
, least(f.block_id+f.blocks-1, c.block#+c.blocks-1) corr_end_block#
, least(f.block_id+f.blocks-1, c.block#+c.blocks-1)
- greatest(f.block_id, c.block#) + 1 blocks_corrupted
, 'Free Block' description
FROM dba_free_space f, v$database_block_corruption c
WHERE f.file_id = c.file#
AND f.block_id <= c.block# + c.blocks - 1
AND f.block_id + f.blocks - 1 >= c.block#
order by file#, corr_start_block#;
* Check the corruption with the table and its index(s) to perform the cross reference
ANALYZE TABLE <OWNER.TABLE_NAME> VALIDATE STRUCTURE CASCADE;
* Validate all the datafiles in the database.
RMAN> backup validate database;
* Run expdp/ export to detect any table level issues with all of the databases in a specific schema.
* DBVFY to check physical corruption
Subscribe to:
Posts (Atom)