导航

oracle_hc.sql

Posted on 2018-09-06 09:49  datalife  阅读(1000)  评论(0编辑  收藏  举报

select event,count(1) from gv$session group by event order by 2;
exec dbms_workload_repository.create_snapshot;
select 'alter system kill session '''||sid||','||serial#||''';' from v$session where event='latch free';

SET LINESIZE 200
SET NEWPAGE NONE
COL PARAMETER FOR A50
COL VALUE FOR A30
COL VALUE_INT FOR 999999999999999999999999999
COL NAME FOR A60
COL MEMBER FOR A60
COL TABLESPACE_NAME FOR A20
COL FILE_NAME FOR A80
COL DB_LINK FOR A20
COL FILE_NAME FOR A30
COL FIRST_CHANGE# FOR 99999999999999999999999
COL "TOTAL SGA" FOR 99999999999999999999999
COL "LOGIC READS" FOR 99999999999999999999999
COL "PHISICAL READS" FOR 99999999999999999999999
COL ACCOUNT_STATUS FOR A20
COL OPNAME FOR A20
COL USERNAME FOR A15
COL OBJECT_NAME FOR A20
COL TARGET FOR A30
COL TEMPORY_TABLESPACE FOR A15
COL ACCOUNT_STATUS FOR A15
COL VALUE_INT FOR 99999999999999999999999

/************************************************************
--Database Configuration:
************************************************************/

--1.USER STATUS
SELECT USERNAME,DEFAULT_TABLESPACE,TEMPORARY_TABLESPACE,ACCOUNT_STATUS FROM DBA_USERS ORDER BY 1;
--2.DATABASE VERSION
SELECT * FROM V$VERSION;
--3.DATABASE PRODUCTION --from rda
--SELECT * FROM V$OPTION ORDER BY 1;
--4.CONTROL FILE --from rda
--SELECT NAME,STATUS FROM V$CONTROLFILE;
--5.LOG FILE --from rda
--SELECT GROUP#,THREAD#,SEQUENCE#,BYTES,MEMBERS,ARCHIVED,STATUS,FIRST_CHANGE# FROM V$LOG;
--SELECT GROUP#,STATUS,MEMBER FROM V$LOGFILE;
ARCHIVE LOG LIST;
--6.PARAMETER FILE
--SELECT NAME,VALUE FROM V$PARAMETER ORDER BY 1;
--SELECT NAME,VALUE,ISSPECIFIED,ORDINAL FROM V$SPPARAMETER ORDER BY ISSPECIFIED,NAME;

--7.USAGE PERCENTAGE OF TABLESPACE AND EXTENT ATTRIBUTE:
SELECT D.TABLESPACE_NAME,SPACE "SUM_SPACE(M)",BLOCKS SUM_BLOCKS,SPACE-NVL(FREE_SPACE,0) "USED_SPACE(M)",
ROUND((1-NVL(FREE_SPACE,0)/SPACE)*100,2) "USED_RATE(%)",FREE_SPACE "FREE_SPACE(M)"
FROM
(SELECT TABLESPACE_NAME,ROUND(SUM(BYTES)/(1024*1024),2) SPACE,SUM(BLOCKS) BLOCKS
FROM DBA_DATA_FILES
GROUP BY TABLESPACE_NAME) D,
(SELECT TABLESPACE_NAME,ROUND(SUM(BYTES)/(1024*1024),2) FREE_SPACE
FROM DBA_FREE_SPACE
GROUP BY TABLESPACE_NAME) F
WHERE D.TABLESPACE_NAME = F.TABLESPACE_NAME(+)
UNION ALL --IF HAVE TEMPFILE
SELECT D.TABLESPACE_NAME,SPACE "SUM_SPACE(M)",BLOCKS SUM_BLOCKS,
USED_SPACE "USED_SPACE(M)",ROUND(NVL(USED_SPACE,0)/SPACE*100,2) "USED_RATE(%)",
NVL(FREE_SPACE,0) "FREE_SPACE(M)"
FROM
(SELECT TABLESPACE_NAME,ROUND(SUM(BYTES)/(1024*1024),2) SPACE,SUM(BLOCKS) BLOCKS
FROM DBA_TEMP_FILES
GROUP BY TABLESPACE_NAME) D,
(SELECT TABLESPACE_NAME,ROUND(SUM(BYTES_USED)/(1024*1024),2) USED_SPACE,
ROUND(SUM(BYTES_FREE)/(1024*1024),2) FREE_SPACE
FROM V$TEMP_SPACE_HEADER
GROUP BY TABLESPACE_NAME) F
WHERE D.TABLESPACE_NAME = F.TABLESPACE_NAME(+);

--12C
set pages 80
set lin 120
set echo off
set feed off
column PCT format 999.99
column tbspce format A30
col container for a30
select substr(f.tablespace_name,1,30) tbspce,
round(f.tsbytes/(1024*1024),0) "ALLOCATED(MB)",
round(nvl(s.segbytes,0)/(1024*1024),0) "USED(MB)",
round((nvl(s.segbytes,0)/f.tsbytes)*100,2) PCT,
lower(vc.name) as container
from
(select con_id,tablespace_name,sum(bytes) tsbytes from cdb_data_files group by con_id,tablespace_name) f,
(select con_id,tablespace_name,sum(bytes) segbytes from cdb_segments group by con_id,tablespace_name) s,
v$containers vc
where f.con_id=s.con_id(+)
and f.tablespace_name=s.tablespace_name
and f.con_id=vc.con_id
order by container, tbspce;

set verify off
column file_name format a50 word_wrapped
column smallest format 999,990 heading "Smallest|Size|Poss."
column currsize format 999,990 heading "Current|Size"
column savings format 999,990 heading "Poss.|Savings"
break on report
compute sum of savings on report

column value new_val blksize
select value from v$parameter where name = 'db_block_size'
/

select file_name,
ceil( (nvl(hwm,1)*&&blksize)/1024/1024 ) smallest,
ceil( blocks*&&blksize/1024/1024) currsize,
ceil( blocks*&&blksize/1024/1024) -
ceil( (nvl(hwm,1)*&&blksize)/1024/1024 ) savings
from dba_data_files a,
( select file_id, max(block_id+blocks-1) hwm
from dba_extents
group by file_id ) b
where a.file_id = b.file_id(+)
/

column cmd format a75 word_wrapped

select 'alter database datafile '''||file_name||''' resize ' ||
ceil( (nvl(hwm,1)*&&blksize)/1024/1024 ) || 'm;' cmd
from dba_data_files a,
( select file_id, max(block_id+blocks-1) hwm
from dba_extents
group by file_id ) b
where a.file_id = b.file_id(+)
and ceil( blocks*&&blksize/1024/1024) -
ceil( (nvl(hwm,1)*&&blksize)/1024/1024 ) > 0
/


--TABLESPACE INCREASE
select
to_char(begin_interval_time,'yy/mm/dd hh24:mm') c1,
object_name c2,
space_used_total c3
from
dba_hist_seg_stat s,
dba_hist_seg_stat_obj o,
dba_hist_snapshot sn
where
o.owner = 'OG'
and
s.obj# = o.obj#
and
sn.snap_id = s.snap_id
--and
-- object_name like 'XIF2%'
order by
begin_interval_time;

set serverout on
declare
v_unformatted_blocks number;
v_unformatted_bytes number;
v_fs1_blocks number;
v_fs1_bytes number;
v_fs2_blocks number;
v_fs2_bytes number;
v_fs3_blocks number;
v_fs3_bytes number;
v_fs4_blocks number;
v_fs4_bytes number;
v_full_blocks number;
v_full_bytes number;
begin
sys.dbms_space.space_usage ('T','SYS_C0037503', 'INDEX', v_unformatted_blocks,
v_unformatted_bytes, v_fs1_blocks, v_fs1_bytes, v_fs2_blocks, v_fs2_bytes,v_fs3_blocks, v_fs3_bytes, v_fs4_blocks, v_fs4_bytes, v_full_blocks, v_full_bytes);
dbms_output.put_line('Unformatted Blocks = '||v_unformatted_blocks);
dbms_output.put_line('FS1 Blocks = '||v_fs1_blocks);
dbms_output.put_line('FS2 Blocks = '||v_fs2_blocks);
dbms_output.put_line('FS3 Blocks = '||v_fs3_blocks);
dbms_output.put_line('FS4 Blocks = '||v_fs4_blocks);
dbms_output.put_line('Full Blocks = '||v_full_blocks);
end;
/

select b.name,
a.rtime,
a.tablespace_usedsize,
a.tablespace_size,
round(100 * a.tablespace_usedsize / a.tablespace_size) used_percent
from dba_hist_tbspc_space_usage a,
(select t2.name,
min(rtime) rtime,
min(tablespace_id) tablespace_id
from dba_hist_tbspc_space_usage t1
inner join v$tablespace t2 on t1.tablespace_id = t2.TS#
where t2.NAME = upper('&1')
group by name, substr(rtime,1,10)
) b
where a.tablespace_id = b.tablespace_id
and a.rtime = b.rtime
order by a.rtime;


select * from table(dbms_space.OBJECT_GROWTH_TREND('OG', 'GAMEDETAIL', 'TABLE PARTITION', 'P201105'));


--如何估算即将创建的索引大小
SQL> set serveroutput on
SQL> declare
2 l_index_ddl varchar2(1000);
3 l_used_bytes number;
4 l_allocated_bytes number;
5 begin
6 dbms_space.create_index_cost (
7 ddl =>'create index idx_t on t(id) ', --执行的ddl
8 used_bytes => l_used_bytes,
9 alloc_bytes => l_allocated_bytes );
10 dbms_output.put_line ('used= ' || l_used_bytes || 'bytes'
11 || ' allocated= ' || l_allocated_bytes || 'bytes');
12 end;
13 /
used= 345098bytes allocated= 720896bytes
PL/SQL 过程已成功完成。
说明: used_bytes 给出索引数据实际表现的字节数。
allocated 是当实际创建索引时在表空间中实际占用的字节数。

set linesize 200;
col TABLESPACE_NAME for a30;
select a.TABLESPACE_NAME "TableSpace Name",
round(a.BYTES/1024/1024) "Total MB",
round((a.BYTES-nvl(b.BYTES, 0)) /1024/1024) "Used MB ",
nvl(round(b.BYTES/1024/1024), 0) "Free MB ",
round((1-((a.BYTES-nvl(b.BYTES,0))/a.BYTES))*100,2) "Pct Free",
a.AUTO
from (select TABLESPACE_NAME,
sum(BYTES) BYTES,
max(AUTOEXTENSIBLE) AUTO
from sys.dba_data_files
group by TABLESPACE_NAME) a,
(select TABLESPACE_NAME,
sum(BYTES) BYTES
from sys.dba_free_space
group by TABLESPACE_NAME) b
where a.TABLESPACE_NAME = b.TABLESPACE_NAME (+)
order by ((a.BYTES-b.BYTES)/a.BYTES) desc
/


col tsname format a16 justify c heading 'Tablespace'
col nfrags format 999,990 justify c heading 'Free|Frags'
col mxfrag format 999,999,990 justify c heading 'Largest|Frag (KB)'
col totsiz format 999,999,990 justify c heading 'Total|(KB)'
col avasiz format 999,999,990 justify c heading 'Available|(KB)'
col pctusd format 990 justify c heading 'Pct|Used'

select
total.tablespace_name tsname,
count(free.bytes) nfrags,
nvl(max(free.bytes)/1024,0) mxfrag,
total.bytes/1024 totsiz,
nvl(sum(free.bytes)/1024,0) avasiz,
(1-nvl(sum(free.bytes),0)/total.bytes)*100 pctusd
from
dba_data_files total,
dba_free_space free
where
total.tablespace_name = free.tablespace_name(+)
and total.file_id=free.file_id(+)
group by
total.tablespace_name,
total.bytes
/


set lines 200 pages 1000
col owner for a10
col table_name for a30
col segment_type for a10
SELECT OWNER, SEGMENT_NAME TABLE_NAME,SEGMENT_TYPE,
GREATEST(ROUND(100 * (NVL(HWM - AVG_USED_BLOCKS,0)/GREATEST(NVL(HWM,1),1) ), 2), 0) WASTE_PER,
ROUND(BYTES/1024, 2) TABLE_KB,
NUM_ROWS,BLOCKS, EMPTY_BLOCKS, HWM HIGHWATER_MARK,
AVG_USED_BLOCKS,CHAIN_PER, EXTENTS, MAX_EXTENTS, ALLO_EXTENT_PER,
DECODE(GREATEST(MAX_FREE_SPACE - NEXT_EXTENT, 0), 0,'N','Y') CAN_EXTEND_SPACE,
NEXT_EXTENT, MAX_FREE_SPACE,
O_TABLESPACE_NAME TABLESPACE_NAME
FROM
(SELECT A.OWNER OWNER, A.SEGMENT_NAME, A.SEGMENT_TYPE, A.BYTES,B.NUM_ROWS,
A.BLOCKS BLOCKS, B.EMPTY_BLOCKS EMPTY_BLOCKS,
A.BLOCKS - B.EMPTY_BLOCKS - 1 HWM,
DECODE(ROUND((B.AVG_ROW_LEN * NUM_ROWS * (1 + (PCT_FREE/100)))/C.BLOCKSIZE, 0),0, 1,
ROUND((B.AVG_ROW_LEN * NUM_ROWS * (1 + (PCT_FREE/100)))/C.BLOCKSIZE, 0)) + 2 AVG_USED_BLOCKS,
ROUND(100 * (NVL(B.CHAIN_CNT, 0)/GREATEST(NVL(B.NUM_ROWS, 1), 1)), 2) CHAIN_PER,
ROUND(100 * (A.EXTENTS/A.MAX_EXTENTS), 2) ALLO_EXTENT_PER,
A.EXTENTS EXTENTS,A.MAX_EXTENTS MAX_EXTENTS, B.NEXT_EXTENT NEXT_EXTENT,
B.TABLESPACE_NAME O_TABLESPACE_NAME
FROM SYS.DBA_SEGMENTS A,SYS.DBA_TABLES B,SYS.TS$ C
WHERE A.OWNER =B.OWNER and SEGMENT_NAME = TABLE_NAME and SEGMENT_TYPE = 'TABLE' AND
B.TABLESPACE_NAME = C.NAME
UNION ALL
SELECT A.OWNER OWNER,
SEGMENT_NAME || '.' || B.PARTITION_NAME,
SEGMENT_TYPE,
BYTES,
B.NUM_ROWS,
A.BLOCKS BLOCKS,
B.EMPTY_BLOCKS EMPTY_BLOCKS,
A.BLOCKS - B.EMPTY_BLOCKS - 1 HWM,
DECODE( ROUND((B.AVG_ROW_LEN * B.NUM_ROWS * (1 + (B.PCT_FREE/100)))/C.BLOCKSIZE, 0),0, 1,
ROUND((B.AVG_ROW_LEN * B.NUM_ROWS * (1 + (B.PCT_FREE/100)))/C.BLOCKSIZE, 0)) + 2 AVG_USED_BLOCKS,
ROUND(100 * (NVL(B.CHAIN_CNT,0)/GREATEST(NVL(B.NUM_ROWS, 1), 1)), 2) CHAIN_PER,
ROUND(100 * (A.EXTENTS/A.MAX_EXTENTS), 2) ALLO_EXTENT_PER,
A.EXTENTS EXTENTS,A.MAX_EXTENTS MAX_EXTENTS,B.NEXT_EXTENT,
B.TABLESPACE_NAME O_TABLESPACE_NAME
FROM SYS.DBA_SEGMENTS A,SYS.DBA_TAB_PARTITIONS B,SYS.TS$ C,SYS.DBA_TABLES D
WHERE A.OWNER = B.TABLE_OWNER
AND SEGMENT_NAME = B.TABLE_NAME
AND SEGMENT_TYPE = 'TABLE PARTITION'
AND B.TABLESPACE_NAME = C.NAME
AND D.OWNER = B.TABLE_OWNER
AND D.TABLE_NAME = B.TABLE_NAME
AND A.PARTITION_NAME = B.PARTITION_NAME),
(SELECT TABLESPACE_NAME F_TABLESPACE_NAME,
MAX(BYTES)MAX_FREE_SPACE
FROM SYS.DBA_FREE_SPACE GROUP BY TABLESPACE_NAME)
WHERE F_TABLESPACE_NAME = O_TABLESPACE_NAME
AND GREATEST(ROUND(100 * (NVL(HWM - AVG_USED_BLOCKS, 0)/GREATEST(NVL(HWM, 1), 1) ), 2), 0) > 25
--AND OWNER = 'TEST'
AND BLOCKS > 128 ORDER BY 10 DESC, 1 ASC, 2 ASC;
WASTE_PER:已分配空间中水线以下的空闲空间(即浪费空间)的百分比。
TABLE_KB:该表目前已经分配的所有空间的大小,以k为单位。
NUM_ROWS:在在表中数据的行数。
BLOCKS:该表目前已经分配的数据块的块数,包含水线以上的部分。
EMPTY_BLOCKS:已分配空间中水线以上的空闲空间。
HIGHWATER_MARK:目前的水线。
AVG_USED_BLOCKS:理想情况下(没有行迁移),该表数据应该占用的数据块的个数。
CHAIN_PER:发生行迁移现象的行占总行的比率。
EXTENTS:该表目前已经分配的extent数。
MAX_EXTENTS:该表可以分配的最大extent的个数。
ALLO_EXTENT_PER:目前已分配的extent的个数占可以分配最大extent的比率。
CAN_EXTEND_SPACE:是否可以分配下一个extent。
NEXT_EXTENT:下一个extent的大小。
MAX_FREE_SPACE:表的已分配空间中最大的空闲空间。
通过该语句得到下列内容
OWNER TABLE_NAME SEGMENT_TY WASTE_PER TABLE_KB NUM_ROWS BLOCKS EMPTY_BLOCKS HIGHWATER_MARK AVG_USED_BLOCKS CHAIN_PER

PLDM ODS_LOANBACKSTATUS TABLE 83.9 12517376 9719660 1564672 0 1564671 251892 0 375
PLDM RPT_L_PLMSACCBALANCELIST TABLE 92.42 24407040 5500311 3050880 0 3050879 231174 0 565
PLDM RPT_L_STORAGEGOODS TABLE 92.85 5110784 1319244 638848 0 638847 45705 0 292

 

SQL> set serveroutput on
SQL> declare
unformatted_blocks NUMBER;
unformatted_bytes NUMBER;
fs1_blocks NUMBER;
fs1_bytes NUMBER;
fs2_blocks NUMBER;
fs2_bytes NUMBER;
fs3_blocks NUMBER;
fs3_bytes NUMBER;
fs4_blocks NUMBER;
fs4_bytes NUMBER;
full_blocks NUMBER;
full_bytes NUMBER;
begin
dbms_space.space_usage('<SEGMENT_OWNER>','TOSEND_BATCH_INSTR','TABLE PARTITION',unformatted_blocks
,unformatted_bytes,fs1_blocks,fs1_bytes,fs2_blocks,fs2_bytes,fs3_blocks,fs3_bytes,
fs4_blocks,fs4_bytes,full_blocks,full_bytes,'<PARTITION_NAME>');
dbms_output.put_line(unformatted_blocks);
dbms_output.put_line(unformatted_bytes);
dbms_output.put_line(fs1_blocks);
dbms_output.put_line(fs1_bytes);
dbms_output.put_line(fs2_blocks);
dbms_output.put_line(fs2_bytes);
dbms_output.put_line(fs3_blocks);
dbms_output.put_line(fs3_bytes);
dbms_output.put_line(fs4_blocks);
dbms_output.put_line(fs4_bytes);
dbms_output.put_line(full_blocks);
dbms_output.put_line(full_bytes);
end;
/

数据文件大大小及头大小
SELECT v1.file_name,v1.file_id,
num1 totle_space,
num3 free_space,
num1-num3 Used_space,
nvl(num2,0) data_space,
num1-num3-nvl(num2,0) file_head
FROM
(SELECT file_name,file_id,SUM(bytes) num1 FROM Dba_Data_Files GROUP BY file_name,file_id) v1,
(SELECT file_id,SUM(bytes) num2 FROM dba_extents GROUP BY file_id) v2,
(SELECT file_id,SUM(BYTES) num3 FROM DBA_FREE_SPACE GROUP BY file_id) v3
WHERE v1.file_id=v2.file_id(+)
AND v1.file_id=v3.file_id(+);
运行以上查询,我们可以如下信息:
Totle_pace:该数据文件的总大小,字节为单位
Free_space:该数据文件的剩于大小,字节为单位
Used_space:该数据文件的已用空间,字节为单位
Data_space:该数据文件中段数据占用空间,也就是数据空间,字节为单位
File_Head:该数据文件头部占用空间,字节为单位
如果是求WHM,用如下过程
declare
cursor c_dbfile is
select tablespace_name,file_name,file_id,bytes
from sys.dba_data_files
where status !='INVALID'
order by tablespace_name,file_id;
cursor c_space(v_file_id in number) is
select block_id,blocks
from sys.dba_free_space
where file_id=v_file_id
order by block_id desc;
blocksize binary_integer;
filesize binary_integer;
extsize binary_integer;
begin
select value into blocksize
from v$parameter
where name = 'db_block_size';
for c_rec1 in c_dbfile loop
filesize := c_rec1.bytes;
<<outer>>
for c_rec2 in c_space(c_rec1.file_id) loop
extsize := ((c_rec2.block_id - 1)*blocksize + c_rec2.blocks*blocksize);
if extsize = filesize then
filesize := (c_rec2.block_id - 1)*blocksize;
else
exit outer;
end if;
end loop outer;
if filesize = c_rec1.bytes
then
dbms_output.put_line('Tablespace: '
||' '||c_rec1.tablespace_name||' Datafile: '||c_rec1.file_name);
dbms_output.put_line('Can not be resized, no free space at end of file.')
;
dbms_output.put_line('.');
else
if filesize < 2*blocksize then
dbms_output.put_line('Tablespace: '
||' '||c_rec1.tablespace_name||' Datafile: '||c_rec1.file_name);
dbms_output.put_line('Can be resized uptil: '||2*blocksize
||' Bytes, Actual size: '||c_rec1.bytes||' Bytes');
dbms_output.put_line('.');
else
dbms_output.put_line('Tablespace: '
||' '||c_rec1.tablespace_name||' Datafile: '||c_rec1.file_name);
dbms_output.put_line('Can be resized uptil: '||filesize
||' Bytes, Actual size: '||c_rec1.bytes);
dbms_output.put_line('.');
end if;
end if;
end loop;
end;
/

SQL> @?/rdbms/admin/utlchain.sql
SQL> ANALYZE TABLE TEST LIST CHAINED ROWS;
SQL> ANALYZE TABLE TEST PARTITION P1 LIST CHAINED ROWS;
SQL> SELECT * FROM CHAINED_ROWS;


/************************************************************
--Contiguous Space:
************************************************************/
CREATE TABLE tl_contig_space (
tablespace_name VARCHAR2(30),
file_id NUMBER,
block_id NUMBER,
starting_file_id NUMBER,
starting_block_id NUMBER,
blocks NUMBER,
bytes NUMBER)
tablespace users;

CREATE OR REPLACE VIEW v_contig_space AS
SELECT SUBSTR(tablespace_name,1,20) TABLESPACE_NAME,
starting_file_id, starting_block_id, SUM(blocks) sum_blocks,
COUNT(blocks) count_blocks, MAX(blocks) max_blocks,
SUM(bytes)/1024/1024 SUM_MB
FROM tl_contig_space
GROUP BY tablespace_name, starting_file_id, starting_block_id;

DECLARE
CURSOR query IS
SELECT *
FROM dba_free_space
ORDER BY tablespace_name, file_id, block_id;

this_row query%ROWTYPE;
previous_row query%ROWTYPE;
old_file_id PLS_INTEGER;
old_block_id PLS_INTEGER;
BEGIN
OPEN query;
FETCH query INTO this_row;
previous_row := this_row;
old_file_id := previous_row.file_id;
old_block_id := previous_row.block_id;

WHILE query%FOUND LOOP
IF this_row.file_id = previous_row.file_id AND
this_row.block_id = previous_row.block_id+previous_row.blocks
THEN
INSERT INTO tl_contig_space
(tablespace_name, file_id, block_id, starting_file_id,
starting_block_id, blocks, bytes)
VALUES
(previous_row.tablespace_name, previous_row.file_id,
this_row.block_id, old_file_id, old_block_id, this_row.blocks,
this_row.bytes);
ELSE
INSERT INTO tl_contig_space
(tablespace_name, file_id, block_id, starting_file_id,
starting_block_id, blocks, bytes)
VALUES
(this_row.tablespace_name, this_row.file_id,
this_row.block_id, this_row.file_id, this_row.block_id,
this_row.blocks, this_row.bytes);

old_file_id := this_row.file_id;
old_block_id := this_row.block_id;
END IF;
previous_row := this_row;
FETCH query INTO this_row;
END LOOP;
COMMIT;
END;
/

col tablespace_name format a20
col sum_mb format 999.999

SELECT * FROM v_contig_space;


SELECT TABLESPACE_NAME,NEXT_EXTENT,EXTENT_MANAGEMENT,ALLOCATION_TYPE,SEGMENT_SPACE_MANAGEMENT FROM DBA_TABLESPACES;


/************************************************************
--BAD BLOCK:
************************************************************/
DB_BLOCK_CHECKING = FULL
DB_BLOCK_CHECKSUM = FULL
_DB_BLOCK_CHECK_FOR_DEBUG = TRUE

sqlplus> execute dbms_backup_restore.resetcfilesection(17);
sqlplus> execute dbms_backup_restore.resetcfilesection(18);

SQL> @?\rdbms\admin\utlvalid.sql
表已创建。
SQL> analyze table t_p validate structure into invalid_rows;
表已分析。
SQL> select * from invalid_rows;

ALTER SYSTEM SET EVENTS='10231 trace name context forever,level 10' ;
ANALYZE TABLE XXXXXX VALIDATE STRUCTURE CASCADE online;

1. Create Repair Table

declare
begin
-- Create repair table
dbms_repair.admin_tables (
-- table_name => 'REPAIR_TABLE',
table_type => dbms_repair.repair_table,
action => dbms_repair.create_action,
tablespace => 'SRBZ_NEW'); -- default TS of SYS if not specified
end;
/

2. create Orphan Key Table

declare
begin
-- Create orphan key table
dbms_repair.admin_tables (
table_type => dbms_repair.orphan_table,
action => dbms_repair.create_action,
tablespace => 'SRBZ_NEW'); -- default TS of SYS if not specified
end;
/

3. DBMS_REPAIR.CHECK_OBJECT

declare
rpr_count int;
begin
rpr_count := 0;
dbms_repair.check_object (
schema_name => 'SRBZ',
object_name => 'USER_BASE_INFO06',
repair_table_name => 'REPAIR_TABLE',
corrupt_count => rpr_count);
dbms_output.put_line('repair count: ' || to_char(rpr_count));
end;
/

4. DBMS_REPAIR.fix_corrupt_blocks

declare
fix_count int;
begin
fix_count := 0;
dbms_repair.fix_corrupt_blocks (
schema_name => 'SRBZ',
object_name => 'USER_BASE_INFO06',
object_type => dbms_repair.table_object,
repair_table_name => 'REPAIR_TABLE',
fix_count => fix_count);
dbms_output.put_line('fix count: ' || to_char(fix_count));
end;
/

5. DBMS_REPAIR.DUMP_ORPHAN_KEYS

SQL> set serveroutput on

-- find index of the table
select index_name from dba_indexes where table_name ='USER_BASE_INFO06' and owner='SRBZ';

--
USER_BASE_INFO06_IDX1
USER_BASE_INFO06_IDX2

sql>
declare
key_count int;
begin
key_count := 0;
dbms_repair.dump_orphan_keys (
schema_name => 'SRBZ',
object_name => 'USER_BASE_INFO06_IDX1',
object_type => dbms_repair.INDEX_OBJECT,
repair_table_name => 'REPAIR_TABLE',
orphan_table_name => 'ORPHAN_KEY_TABLE',
key_count => key_count);
dbms_output.put_line('orphan key count: ' || to_char(key_count));
end;
/

sql>
declare
key_count int;
begin
key_count := 0;
dbms_repair.dump_orphan_keys (
schema_name => 'SRBZ',
object_name => 'USER_BASE_INFO06_IDX2',
object_type => dbms_repair.INDEX_OBJECT,
repair_table_name => 'REPAIR_TABLE',
orphan_table_name => 'ORPHAN_KEY_TABLE',
key_count => key_count);
dbms_output.put_line('orphan key count: ' || to_char(key_count));
end;
/

6. DBMS_REPAIR.SKIP_CORRUPT_BLOCKS

declare
begin
dbms_repair.skip_corrupt_blocks (
schema_name => 'SRBZ',
object_name => 'USER_BASE_INFO06',
object_type => dbms_repair.table_object,
flags => dbms_repair.skip_flag);
end;
/

SQL> select table_name, skip_corrupt from dba_tables where table_name = 'USER_BASE_INFO06';

TABLE_NAME SKIP_CORRUPT
------------------------------ ------------
USER_BASE_INFO06 ENABLED

7. rebuild Index
select 'alter index '||owner||'.'||index_name || ' rebuild online tablespace '||tablespace_name||';'
from dba_indexes where owner='SRBZ' and table_name='USER_BASE_INFO06' ;

alter index SRBZ.USER_BASE_INFO06_IDX1 rebuild online tablespace SRBZ_YZTAB_IDX;
alter index SRBZ.USER_BASE_INFO06_IDX2 rebuild online tablespace SRBZ_YZTAB_IDX;


/************************************************************************************************************************/

SELECT FILE_NAME,AUTOEXTENSIBLE FROM DBA_DATA_FILES WHERE AUTOEXTENSIBLE='YES'
UNION ALL
SELECT FILE_NAME,AUTOEXTENSIBLE FROM DBA_TEMP_FILES WHERE AUTOEXTENSIBLE='YES';

--8.USAGE PERCENTAGE OF DATAFILES --from rda
/*SELECT D.TABLESPACE_NAME,FILE_NAME "FILE_NAME",SPACE "SUM_SPACE(M)",BLOCKS SUM_BLOCKS,SPACE-NVL(FREE_SPACE,0) "USED_SPACE(M)",
ROUND((1-NVL(FREE_SPACE,0)/SPACE)*100,2) "USED_RATE(%)",FREE_SPACE "FREE_SPACE(M)",AUTOEXTENSIBLE
FROM
(SELECT FILE_ID,FILE_NAME,TABLESPACE_NAME,ROUND(SUM(BYTES)/(1024*1024),2) SPACE,SUM(BLOCKS) BLOCKS
FROM DBA_DATA_FILES
GROUP BY TABLESPACE_NAME,FILE_ID,FILE_NAME) D,
(SELECT FILE_ID,TABLESPACE_NAME,ROUND(SUM(BYTES)/(1024*1024),2) FREE_SPACE
FROM DBA_FREE_SPACE
GROUP BY TABLESPACE_NAME,FILE_ID) E,
(SELECT FILE_ID,AUTOEXTENSIBLE FROM DBA_DATA_FILES) F
WHERE D.TABLESPACE_NAME = E.TABLESPACE_NAME(+) AND D.FILE_ID = E.FILE_ID(+) AND D.FILE_ID = F.FILE_ID(+)
UNION ALL --IF HAVE TEMPFILE
SELECT D.TABLESPACE_NAME,FILE_NAME "FILE_NAME",SPACE "SUM_SPACE(M)",BLOCKS SUM_BLOCKS,
USED_SPACE "USED_SPACE(M)",ROUND(NVL(USED_SPACE,0)/SPACE*100,2) "USED_RATE(%)",
NVL(FREE_SPACE,0) "FREE_SPACE(M)",AUTOEXTENSIBLE
FROM
(SELECT FILE_ID,FILE_NAME,TABLESPACE_NAME,ROUND(SUM(BYTES)/(1024*1024),2) SPACE,SUM(BLOCKS) BLOCKS
FROM DBA_TEMP_FILES
GROUP BY TABLESPACE_NAME,FILE_ID,FILE_NAME) D,
(SELECT FILE_ID,TABLESPACE_NAME,ROUND(SUM(BYTES_USED)/(1024*1024),2) USED_SPACE,
ROUND(SUM(BYTES_FREE)/(1024*1024),2) FREE_SPACE
FROM V$TEMP_SPACE_HEADER
GROUP BY TABLESPACE_NAME,FILE_ID) E,
(SELECT FILE_ID,AUTOEXTENSIBLE FROM DBA_TEMP_FILES) F
WHERE D.TABLESPACE_NAME = E.TABLESPACE_NAME(+) AND D.FILE_ID = E.FILE_ID(+) AND D.FILE_ID = F.FILE_ID(+)
ORDER BY TABLESPACE_NAME,FILE_NAME;*/

--9.DATABASE OBJECTS
SELECT OWNER,OBJECT_TYPE,COUNT(*) FROM DBA_OBJECTS GROUP BY OWNER,OBJECT_TYPE ORDER BY 1,2;

--INVALID OBJECTS --from rda
--SELECT OWNER,OBJECT_NAME,OBJECT_TYPE FROM DBA_OBJECTS WHERE STATUS != 'VALID' ORDER BY 1,3,2;

--10.DATABASE LINK
SELECT OWNER,DB_LINK,USERNAME FROM DBA_DB_LINKS;

--11.NUMBER AND SIZE OF DATAFILES
SELECT COUNT(1) FILE_COUNT,ROUND(SUM(BYTES)/1024/1024/1024,2) FILE_SIZE FROM DBA_DATA_FILES;

--12.NOT SYS AND SYSTEM USER`S OBJECT IN TABLESPACE SYSTEM
SELECT OWNER,COUNT(DISTINCT SEGMENT_NAME) SEGMENT_COUNT,SUM(BYTES) BYTES FROM DBA_SEGMENTS
WHERE OWNER NOT IN ('SYS','SYSTEM') AND TABLESPACE_NAME='SYSTEM'
GROUP BY OWNER;

--14.BACKUP CONTROLFILE
ALTER DATABASE BACKUP CONTROLFILE TO TRACE;

--15.V$SYSSTAT
SELECT NAME,VALUE VALUE_INT FROM V$SYSSTAT
WHERE NAME IN ('user commits','user rollbacks','user calls','sorts (rows)','sorts (disk)',
'sorts (memory)','table fetch continued row') OR NAME LIKE 'table scan%';

--16.INDEX LEVEL HEIGHT
SELECT MAX(BLEVEL) FROM DBA_INDEXES;

--17.PROFILE LIMIT
SELECT PROFILE, LIMIT
FROM DBA_PROFILES,
(SELECT LIMIT AS DEF_PWD_LIFE_TM
FROM DBA_PROFILES
WHERE PROFILE = 'DEFAULT'
AND RESOURCE_NAME = 'PASSWORD_LIFE_TIME')
WHERE RESOURCE_NAME = 'PASSWORD_LIFE_TIME'
AND ((REPLACE(LIMIT,'DEFAULT',DEF_PWD_LIFE_TM) IN ('UNLIMITED',NULL))
OR (LPAD(REPLACE(LIMIT,'DEFAULT',DEF_PWD_LIFE_TM),40,'0') >
LPAD('90',40,'0')));

--18.SYSDBA USERS
SELECT * FROM V$PWFILE_USERS;

--19.DBA USERS
SELECT GRANTEE,GRANTED_ROLE FROM DBA_ROLE_PRIVS WHERE GRANTED_ROLE='DBA' ORDER BY GRANTEE;

--20.PLSQL SPECIAL PRIVS
SELECT TABLE_NAME FROM DBA_TAB_PRIVS
WHERE GRANTEE='PUBLIC'
AND PRIVILEGE ='EXECUTE'
AND TABLE_NAME IN
('UTL_SMTP', 'UTL_TCP', 'UTL_HTTP', 'UTL_FILE',
'DBMS_RANDOM','DBMS_LOB', 'DBMS_SQL','DBMS_JOB',
'DBMS_BACKUP_RESTORE','DBMS_OBFUSCATION_TOOLKIT');

/*--HIDDEN PARAMETER
SELECT * FROM V$SYSTEM_FIX_CONTROL;
_optimizer_max_permutations
_sort_elimination_cost_ratio
ALTER SESSION SET "_SORT_ELIMINATION_COST_RATIO"=5

\_spin\_count%' escape '\'

select * from x$ksllclass;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_shared\_pool\_reserved\_min\_alloc%' escape '\'
order by name;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_shared\_pool\_reserved\_min\_alloc%' escape '\'
order by name;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_small\_table\_threshold%' escape '\'
order by name;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_job\_queue\_interval%' escape '\'
order by name;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '_sort_multiblock_read%' ;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '%_bump_highwater_mark_count%';

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '%push_join_union_view%';

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_kks\_use\_mutex\_pin%' escape '\'
order by name;

--cursor: mutex S
--cursor: mutex X
--cursor: pin S
--cursor: pin S wait on X
--cursor: pin X

--Note:5907779.8 Bug 5907779 Self deadlock hang on "cursor: pin S wait on X" (typically from DBMS_STATS)
--Patch 5907779

--bug 5485914 - proactively avoid "cursor: ping S wait on X", MUTEX REPORTED SELF DEADLOCK AFTER DBMS_MONITOR.SESSION_TRACE_ENABLE

CAS (Compare and Swap) machine-made (Notice:is not supported by HP if version <= 10.2.0.3)

*/


/**********************************************************************
* File: systime.sql
* Type: SQL*Plus script
* Author: Tim Gorman (Evergreen Database Technologies, Inc)
* Date: 25-Mar-02
*
* Description:
* SQL*Plus script to display total time spent waiting info
* (from V$SYSTEM_EVENT) along with total time spent processing
* info (from V$SYSSTAT for "CPU used by this session" statistic),
* along with a calculation of the percentage of time the instance
* spent doing each thing...
*
* Note:
* Due to use of "analytic" reporting function, this query can only
* be used with v8.1.6 and above...
*
* Modifications:
*********************************************************************/
break on report on username on sid skip 1
set pagesize 100 lines 80 trimspool on trimout on verify off

undef usr

col type format a5 heading "Svc,|Idle,|Wait"
col name format a35 heading "Name" truncate
col tot_secs_spent format 999,999,999,990.00 heading "Total|Seconds|Spent"
col pct_total format 990.00 heading "%|Total"
col nonidle_total format 990.00 heading "%|NonIdle"

select type,
name,
tot_secs_spent,
(tot_secs_spent / (sum(tot_secs_spent) over ()))*100 pct_total,
(nonidle_secs_spent / (sum(nonidle_secs_spent) over ()))*100 nonidle_total
from (select decode(event,
'rdbms ipc message', 'Idle',
'rdbms ipc reply', 'Idle',
'SQL*Net message from client', 'Idle',
'SQL*Net break/reset to client', 'Idle',
'pipe get', 'Idle',
'pmon timer', 'Idle',
'smon timer', 'Idle',
'dispatcher timer', 'Idle',
'virtual circuit status', 'Idle',
'PX Idle Wait', 'Idle',
'PX Deq: Execute Reply', 'Idle',
'PX Deq: Execution Msg', 'Idle',
'PX Deq: Table Q Normal', 'Idle',
'PX Deq Credit: send blkd', 'Idle',
'PX Deq Credit: need buffer', 'Idle',
'PX Deq: Parse Reply', 'Idle',
'PX Deq: Signal ACK', 'Idle',
'PX Deq: Join ACK', 'Idle',
'PX qref latch', 'Idle',
'PX Deq: Msg Fragment', 'Idle',
'PL/SQL lock timer', 'Idle',
'inactive session', 'Idle',
'Wait') type,
event name,
time_waited/100 tot_secs_spent,
decode(event,
'rdbms ipc message', 0,
'rdbms ipc reply', 0,
'SQL*Net message from client', 0,
'SQL*Net break/reset to client', 0,
'pipe get', 0,
'pmon timer', 0,
'smon timer', 0,
'dispatcher timer', 0,
'virtual circuit status', 0,
'PX Idle Wait', 0,
'PX Deq: Execute Reply', 0,
'PX Deq: Execution Msg', 0,
'PX Deq: Table Q Normal', 0,
'PX Deq Credit: send blkd', 0,
'PX Deq Credit: need buffer', 0,
'PX Deq: Parse Reply', 0,
'PX Deq: Signal ACK', 0,
'PX Deq: Join ACK', 0,
'PX qref latch', 0,
'PX Deq: Msg Fragment', 0,
'PL/SQL lock timer', 0,
'inactive session', 0,
time_waited/100) nonidle_secs_spent
from v$system_event
where time_waited > 0
union all
select 'Svc' type,
'other cpu usage' name,
(t.value - (p.value + r.value))/100 tot_secs_spent,
(t.value - (p.value + r.value))/100 nonidle_secs_spent
from v$sysstat t,
v$sysstat p,
v$sysstat r
where t.name = 'CPU used by this session'
and p.name = 'recursive cpu usage'
and r.name = 'parse time cpu'
union all
select 'Svc' type,
name,
value/100 tot_secs_spent,
value/100 nonidle_secs_spent
from v$sysstat
where name = 'recursive cpu usage'
and value > 0
union all
select 'Svc' type,
name,
value/100 tot_secs_spent,
value/100 nonidle_secs_spent
from v$sysstat
where name = 'parse time cpu'
and value > 0)
order by 5 desc, 4 desc, 3 desc, 2

spool systime
/
spool off

--DEAD PROCESS:
select p.addr from v$process p where pid <> 1
minus
select s.paddr from v$session s;

select spid, program from v$process
where program!= 'PSEUDO'
and addr not in (select paddr from v$session)
and addr not in (select paddr from v$bgprocess);


--CHECK DEFAULT PASSWORD:
CONN SYS/CHANGE_ON_INSTALL --
CONN SYSTEM/MANAGER
CONN /AS SYSDBA

--OS CHECK:
tail –200 /var/adm/syslog/syslog.log
crontab –l
sar - u 2 10
sar -b 2 10
sar -w 2 10
sar -v 2 10
vmstat 2 10
bdf
ps -ef | egrep -v "STIME|$LOGNAME" | sort +3 -r | head -n 15


/************************************************************
--OPATCH:
************************************************************/
./opatch lsinventory
./opatch lsinventory -oh $ORA_CRS_HOME
--VI TRACE FILE OF BACKUP CONTROLFILE.
export OUI_ARGS=-ignoreSysPrereqs
export OPATCH_DEBUG=TRUE

opatch prereq CheckConflictAmongPatchesWithDetail -phBaseDir <path to base dir where all the patches to be applied are kept>
opatch prereq CheckConflictAgainstOHWithDetail -phBaseDir <path to base dir where all the patches to be applied are kept>

--10204 CRS PUS patch
/home/oracle/app/product/10.2.0/crs
/home/oracle/app/product/10.2.0/db
--root
custom/scripts/prerootpatch.sh -crshome /home/oracle/app/product/10.2.0/crs -crsuser oracle
--oracle
custom/scripts/prepatch.sh -crshome /home/oracle/app/product/10.2.0/crs
--oracle
custom/server/9294403/custom/scripts/prepatch.sh -dbhome /home/oracle/app/product/10.2.0/db
--oracle
opatch napply -local -oh /home/oracle/app/product/10.2.0/crs -id 9294403
--oracle
opatch napply custom/server/ -local -oh /home/oracle/app/product/10.2.0/db -id 9294403
--oracle
custom/scripts/postpatch.sh -crshome /home/oracle/app/product/10.2.0/crs
--oracle
custom/server/9294403/custom/scripts/postpatch.sh -dbhome /home/oracle/app/product/10.2.0/db
--root
custom/scripts/postrootpatch.sh -crshome /home/oracle/app/product/10.2.0/crs


#######################
--root
custom/scripts/prerootpatch.sh -crshome /crs/crshome/10.2.0 -crsuser oracle
--oracle
custom/scripts/prepatch.sh -crshome /crs/crshome/10.2.0
--oracle
custom/server/9294403/custom/scripts/prepatch.sh -dbhome /app/oracle/10.2.0
--oracle
opatch napply -local -oh /crs/crshome/10.2.0 -id 9294403
--oracle
opatch napply custom/server/ -local -oh /app/oracle/10.2.0 -id 9294403
--oracle
custom/scripts/postpatch.sh -crshome /crs/crshome/10.2.0
--oracle
custom/server/9294403/custom/scripts/postpatch.sh -dbhome /app/oracle/10.2.0
--root
custom/scripts/postrootpatch.sh -crshome /crs/crshome/10.2.0


1.停止CRS和DB
srvctl stop database -d xgp1
srvctl stop nodeapps -n XGP1_db1
srvctl stop nodeapps -n XGP1_db2
使用ROOT用户执行
./crsctl stop crs
2.备份OCR和VD
dd if=/dev/vgdata/rlv_XGP1_ocr1 of=/backup/rlv_XGP1_ocr1.bak bs=1m count=512
dd if=/dev/vgdata/rlv_XGP1_ocr2 of=/backup/rlv_XGP1_ocr2.bak bs=1m count=512
dd if=/dev/vgdata/rlv_XGP1_vote1 of=/backup/rlv_XGP1_vote1.bak bs=1m count=512
dd if=/dev/vgdata/rlv_XGP1_vote2 of=/backup/rlv_XGP1_vote2.bak bs=1m count=512
dd if=/dev/vgdata/rlv_XGP1_vote3 of=/backup/rlv_XGP1_vote3.bak bs=1m count=512
3.备份软件
tar -cvf crs.tar $ORACLE_BASE/product/10.2.0/crs
tar -cvf db.tar /u01/app/oracle/product/10.2.0/db_1
tar -cvf oraInventory.tar /u01/app/oracle/oraInventory
4.安装CRS PSU补丁
oracle用户执行
custom/server/9294403/custom/scripts/prepatch.sh -dbhome $ORACLE_HOME
opatch napply custom/server/ -local -oh $ORACLE_HOME -id 9294403
custom/server/9294403/custom/scripts/postpatch.sh -dbhome $ORACLE_HOME
使用ROOT用户执行
./crsctl start crs

/************************************************************
--CHECK AFTER UPGRADE:
************************************************************/
--select data dictionary version--
COL version FOR A10
COL comp_id FOR A8
COL schema LIKE comp_id
COL comp_name FOR A35
COL status FOR A12
SELECT comp_id, schema, status, version, comp_name
FROM dba_registry
ORDER BY 1;

--select invalid object--
COL status FOR a9
COL object_type FOR a20
COL owner.object FOR a50
SELECT status, object_id, object_type, owner||'.'||object_name "OWNER.OBJECT"
FROM dba_objects
WHERE status != 'VALID' AND object_name NOT LIKE 'BIN$%'
ORDER BY 4,2;


/************************************************************
--LOCK:
************************************************************/

select e.sid,substr(a.os_user_name,1,8) "OS User"
, substr(a.oracle_username,1,8) "DB User"
, substr(b.owner,1,8) "Schema"
, substr(b.object_name,1,20) "Object Name"
, substr(b.object_type,1,10) "Type"
, substr(c.segment_name,1,5) "RBS"
, substr(d.used_urec,1,12) "# of Records"
from v$locked_object a
, dba_objects b
, dba_rollback_segs c
, v$transaction d
, v$session e
where a.object_id = b.object_id
and a.xidusn = c.segment_id
and a.xidusn = d.xidusn
and a.xidslot = d.xidslot
and d.addr = e.taddr
;

 

SET ECHO off
REM NAME: TFSCLOCK.SQL
REM USAGE:"@path/tfsclock"
REM ------------------------------------------------------------------------
REM REQUIREMENTS:
REM SELECT on V_$LOCK, V_$SESSION, SYS.USER$, SYS.OBJ$
REM ------------------------------------------------------------------------
REM PURPOSE:
REM The following locking information script provides fully DECODED
REM information regarding the locks currently held in the database.
REM The report generated is fairly complex and difficult to read,
REM but has considerable detail.
REM
REM The TFTS series contains scripts to provide (less detailed) lock
REM information in a formats which are somewhat less difficult to read:
REM TFSMLOCK.SQL and TFSLLOCK.SQL.
REM ------------------------------------------------------------------------
REM EXAMPLE:
REM Too complex to show a representative sample here
REM
REM ------------------------------------------------------------------------
REM DISCLAIMER:
REM This script is provided for educational purposes only. It is NOT
REM supported by Oracle World Wide Technical Support.
REM The script has been tested and appears to work as intended.
REM You should always run new scripts on a test instance initially.
REM ------------------------------------------------------------------------
REM

set lines 200
set pagesize 66
break on Kill on sid on username on terminal
column Kill heading 'Kill String' format a13
column res heading 'Resource Type' format 999
column id1 format 9999990
column id2 format 9999990
column locking heading 'Lock Held/Lock Requested' format a40
column lmode heading 'Lock Held' format a20
column request heading 'Lock Requested' format a20
column serial# format 99999
column username format a10 heading "Username"
column terminal heading Term format a6
column tab format a30 heading "Table Name"
column owner format a9
column LAddr heading "ID1 - ID2" format a18
column Lockt heading "Lock Type" format a40
column command format a25
column sid format 990

select
nvl(S.USERNAME,'Internal') username,
L.SID,
nvl(S.TERMINAL,'None') terminal,
decode(command,
0,'None',decode(l.id2,0,U1.NAME||'.'||substr(T1.NAME,1,20),'None')) tab,
decode(command,
0,'BACKGROUND',
1,'Create Table',
2,'INSERT',
3,'SELECT',
4,'CREATE CLUSTER',
5,'ALTER CLUSTER',
6,'UPDATE',
7,'DELETE',
8,'DROP',
9,'CREATE INDEX',
10,'DROP INDEX',
11,'ALTER INDEX',
12,'DROP TABLE',
13,'CREATE SEQUENCE',
14,'ALTER SEQUENCE',
15,'ALTER TABLE',
16,'DROP SEQUENCE',
17,'GRANT',
18,'REVOKE',
19,'CREATE SYNONYM',
20,'DROP SYNONYM',
21,'CREATE VIEW',
22,'DROP VIEW',
23,'VALIDATE INDEX',
24,'CREATE PROCEDURE',
25,'ALTER PROCEDURE',
26,'LOCK TABLE',
27,'NO OPERATION',
28,'RENAME',
29,'COMMENT',
30,'AUDIT',
31,'NOAUDIT',
32,'CREATE EXTERNAL DATABASE',
33,'DROP EXTERNAL DATABASE',
34,'CREATE DATABASE',
35,'ALTER DATABASE',
36,'CREATE ROLLBACK SEGMENT',
37,'ALTER ROLLBACK SEGMENT',
38,'DROP ROLLBACK SEGMENT',
39,'CREATE TABLESPACE',
40,'ALTER TABLESPACE',
41,'DROP TABLESPACE',
42,'ALTER SESSION',
43,'ALTER USER',
44,'COMMIT',
45,'ROLLBACK',
46,'SAVEPOINT',
47,'PL/SQL EXECUTE',
48,'SET TRANSACTION',
49,'ALTER SYSTEM SWITCH LOG',
50,'EXPLAIN',
51,'CREATE USER',
52,'CREATE ROLE',
53,'DROP USER',
54,'DROP ROLE',
55,'SET ROLE',
56,'CREATE SCHEMA',
57,'CREATE CONTROL FILE',
58,'ALTER TRACING',
59,'CREATE TRIGGER',
60,'ALTER TRIGGER',
61,'DROP TRIGGER',
62,'ANALYZE TABLE',
63,'ANALYZE INDEX',
64,'ANALYZE CLUSTER',
65,'CREATE PROFILE',
66,'DROP PROFILE',
67,'ALTER PROFILE',
68,'DROP PROCEDURE',
69,'DROP PROCEDURE',
70,'ALTER RESOURCE COST',
71,'CREATE SNAPSHOT LOG',
72,'ALTER SNAPSHOT LOG',
73,'DROP SNAPSHOT LOG',
74,'CREATE SNAPSHOT',
75,'ALTER SNAPSHOT',
76,'DROP SNAPSHOT',
79,'ALTER ROLE',
85,'TRUNCATE TABLE',
86,'TRUNCATE CLUSTER',
87,'-',
88,'ALTER VIEW',
89,'-',
90,'-',
91,'CREATE FUNCTION',
92,'ALTER FUNCTION',
93,'DROP FUNCTION',
94,'CREATE PACKAGE',
95,'ALTER PACKAGE',
96,'DROP PACKAGE',
97,'CREATE PACKAGE BODY',
98,'ALTER PACKAGE BODY',
99,'DROP PACKAGE BODY',
command||' - ???') COMMAND,
decode(L.LMODE,1,'No Lock',
2,'Row Share',
3,'Row Exclusive',
4,'Share',
5,'Share Row Exclusive',
6,'Exclusive','NONE') lmode,
decode(L.REQUEST,1,'No Lock',
2,'Row Share',
3,'Row Exclusive',
4,'Share',
5,'Share Row Exclusive',
6,'Exclusive','NONE') request,
l.id1||'-'||l.id2 Laddr,
l.type||' - '||
decode(l.type,
'BL','Buffer hash table instance lock',
'CF',' Control file schema global enqueue lock',
'CI','Cross-instance function invocation instance lock',
'CS','Control file schema global enqueue lock',
'CU','Cursor bind lock',
'DF','Data file instance lock',
'DL','Direct loader parallel index create',
'DM','Mount/startup db primary/secondary instance lock',
'DR','Distributed recovery process lock',
'DX','Distributed transaction entry lock',
'FI','SGA open-file information lock',
'FS','File set lock',
'HW','Space management operations on a specific segment lock',
'IN','Instance number lock',
'IR','Instance recovery serialization global enqueue lock',
'IS','Instance state lock',
'IV','Library cache invalidation instance lock',
'JQ','Job queue lock',
'KK','Thread kick lock',
'MB','Master buffer hash table instance lock',
'MM','Mount definition gloabal enqueue lock',
'MR','Media recovery lock',
'PF','Password file lock',
'PI','Parallel operation lock',
'PR','Process startup lock',
'PS','Parallel operation lock',
'RE','USE_ROW_ENQUEUE enforcement lock',
'RT','Redo thread global enqueue lock',
'RW','Row wait enqueue lock',
'SC','System commit number instance lock',
'SH','System commit number high water mark enqueue lock',
'SM','SMON lock',
'SN','Sequence number instance lock',
'SQ','Sequence number enqueue lock',
'SS','Sort segment lock',
'ST','Space transaction enqueue lock',
'SV','Sequence number value lock',
'TA','Generic enqueue lock',
'TD','DDL enqueue lock',
'TE','Extend-segment enqueue lock',
'TM','DML enqueue lock',
'TO','Temporary Table Object Enqueue',
'TT','Temporary table enqueue lock',
'TX','Transaction enqueue lock',
'UL','User supplied lock',
'UN','User name lock',
'US','Undo segment DDL lock',
'WL','Being-written redo log instance lock',
'WS','Write-atomic-log-switch global enqueue lock',
'TS',decode(l.id2,0,'Temporary segment enqueue lock (ID2=0)',
'New block allocation enqueue lock (ID2=1)'),
'LA','Library cache lock instance lock (A=namespace)',
'LB','Library cache lock instance lock (B=namespace)',
'LC','Library cache lock instance lock (C=namespace)',
'LD','Library cache lock instance lock (D=namespace)',
'LE','Library cache lock instance lock (E=namespace)',
'LF','Library cache lock instance lock (F=namespace)',
'LG','Library cache lock instance lock (G=namespace)',
'LH','Library cache lock instance lock (H=namespace)',
'LI','Library cache lock instance lock (I=namespace)',
'LJ','Library cache lock instance lock (J=namespace)',
'LK','Library cache lock instance lock (K=namespace)',
'LL','Library cache lock instance lock (L=namespace)',
'LM','Library cache lock instance lock (M=namespace)',
'LN','Library cache lock instance lock (N=namespace)',
'LO','Library cache lock instance lock (O=namespace)',
'LP','Library cache lock instance lock (P=namespace)',
'LS','Log start/log switch enqueue lock',
'PA','Library cache pin instance lock (A=namespace)',
'PB','Library cache pin instance lock (B=namespace)',
'PC','Library cache pin instance lock (C=namespace)',
'PD','Library cache pin instance lock (D=namespace)',
'PE','Library cache pin instance lock (E=namespace)',
'PF','Library cache pin instance lock (F=namespace)',
'PG','Library cache pin instance lock (G=namespace)',
'PH','Library cache pin instance lock (H=namespace)',
'PI','Library cache pin instance lock (I=namespace)',
'PJ','Library cache pin instance lock (J=namespace)',
'PL','Library cache pin instance lock (K=namespace)',
'PK','Library cache pin instance lock (L=namespace)',
'PM','Library cache pin instance lock (M=namespace)',
'PN','Library cache pin instance lock (N=namespace)',
'PO','Library cache pin instance lock (O=namespace)',
'PP','Library cache pin instance lock (P=namespace)',
'PQ','Library cache pin instance lock (Q=namespace)',
'PR','Library cache pin instance lock (R=namespace)',
'PS','Library cache pin instance lock (S=namespace)',
'PT','Library cache pin instance lock (T=namespace)',
'PU','Library cache pin instance lock (U=namespace)',
'PV','Library cache pin instance lock (V=namespace)',
'PW','Library cache pin instance lock (W=namespace)',
'PX','Library cache pin instance lock (X=namespace)',
'PY','Library cache pin instance lock (Y=namespace)',
'PZ','Library cache pin instance lock (Z=namespace)',
'QA','Row cache instance lock (A=cache)',
'QB','Row cache instance lock (B=cache)',
'QC','Row cache instance lock (C=cache)',
'QD','Row cache instance lock (D=cache)',
'QE','Row cache instance lock (E=cache)',
'QF','Row cache instance lock (F=cache)',
'QG','Row cache instance lock (G=cache)',
'QH','Row cache instance lock (H=cache)',
'QI','Row cache instance lock (I=cache)',
'QJ','Row cache instance lock (J=cache)',
'QL','Row cache instance lock (K=cache)',
'QK','Row cache instance lock (L=cache)',
'QM','Row cache instance lock (M=cache)',
'QN','Row cache instance lock (N=cache)',
'QO','Row cache instance lock (O=cache)',
'QP','Row cache instance lock (P=cache)',
'QQ','Row cache instance lock (Q=cache)',
'QR','Row cache instance lock (R=cache)',
'QS','Row cache instance lock (S=cache)',
'QT','Row cache instance lock (T=cache)',
'QU','Row cache instance lock (U=cache)',
'QV','Row cache instance lock (V=cache)',
'QW','Row cache instance lock (W=cache)',
'QX','Row cache instance lock (X=cache)',
'QY','Row cache instance lock (Y=cache)',
'QZ','Row cache instance lock (Z=cache)','????') Lockt
from V$LOCK L,
V$SESSION S,
SYS.USER$ U1,
SYS.OBJ$ T1
where L.SID = S.SID
and T1.OBJ# = decode(L.ID2,0,L.ID1,1)
and U1.USER# = T1.OWNER#
and S.TYPE != 'BACKGROUND'
order by 1,2,5

If we look a bit further we can then again match KGLLKSES with SADDR
in v$session to find further information on the blocking session:

SELECT SID,USERNAME,TERMINAL,PROGRAM FROM V$SESSION
WHERE SADDR in
(SELECT KGLLKSES FROM X$KGLLK LOCK_A
WHERE KGLLKREQ = 0
AND EXISTS (SELECT LOCK_B.KGLLKHDL FROM X$KGLLK LOCK_B
WHERE KGLLKSES = '672B43A8' / * BLOCKED SESSION * /
AND LOCK_A.KGLLKHDL = LOCK_B.KGLLKHDL
AND KGLLKREQ > 0)
);

In the same way we can also find all the blocked sessions:

SELECT SID,USERNAME,TERMINAL,PROGRAM FROM V$SESSION
WHERE SADDR in
(SELECT KGLLKSES FROM X$KGLLK LOCK_A
WHERE KGLLKREQ > 0
AND EXISTS (SELECT LOCK_B.KGLLKHDL FROM X$KGLLK LOCK_B
WHERE KGLLKSES = 'saddr_from_v$session' /* BLOCKING SESSION */
AND LOCK_A.KGLLKHDL = LOCK_B.KGLLKHDL
AND KGLLKREQ = 0)
);


--CF ENQUEUE
event="10706 trace name context forever, level 10"
_ksi_trace=CF


--enqueue type:the 1415053316 is p1 when we check in v$session_wait;
select chr(bitand(1415053316,-16777216)/16777215)||
chr(bitand(1415053316,16711680)/65535) "Lock"
from dual;

SELECT sid,
CHR (BITAND (p1,-16777216) / 16777215) ||
CHR (BITAND (p1, 16711680) / 65535) enq,
DECODE (CHR (BITAND (p1,-16777216) / 16777215) ||
CHR (BITAND (p1, 16711680) / 65535),
'TX', 'Transaction (RBS)',
'TM', 'DML Transaction',
'TS', 'Tablespace and Temp Seg',
'TT', 'Temporary Table',
'ST', 'Space Mgt (e.g., uet$, fet$)',
'UL', 'User Defined',
CHR (BITAND (p1,-16777216) / 16777215) ||
CHR (BITAND (p1, 16711680) / 65535)) enqueue_name,
DECODE (BITAND (p1, 65535), 1, 'Null', 2, 'Sub-Share',
3, 'Sub-Exclusive', 4, 'Share', 5, 'Share/Sub-Exclusive',
6, 'Exclusive', 'Other') lock_mode
FROM v$session_wait
WHERE sid = 96;

select event,status,sql_hash_value,last_call_et,p1,p2,p3,ROW_WAIT_OBJ#,ROW_WAIT_FILE#,ROW_WAIT_BLOCK#,ROW_WAIT_ROW#
from gv$session where event in ('gc buffer busy','enq: TX - index contention');


----show blocking locks.
select SID,ADDR,KADDR,type,LMODE,round(CTIME/60) "Time(Min)" from V$LOCK where block>0;
select sid,username from V$SESSION where sid in(select sid from V$LOCK where block>0);
select s.username,s.sid,s.serial# from v$session s,dba_blockers dbab where s.sid=dbab.holding_session;

--block and waiter
select ROW_WAIT_OBJ#,ROW_WAIT_FILE#,ROW_WAIT_BLOCK#,ROW_WAIT_ROW# from v$session where event='enq: TX - row lock contention' order by 3,4;
select sid,sql_id,sql_hash_value,last_call_et,status,event,blocking_session from v$session where blocking_session is not null order by blocking_session,sid;
select sid,sql_id,sql_hash_value,last_call_et,status,event,blocking_session from v$session where sid in (select distinct blocking_session from v$session where blocking_session is not null);

 

set pagesize 40 linesize 150
column blockers format a45
column waiters format a45
select '阻塞者('||sb.sid||':'||sb.serial#||'-'||sb.username||')-'||qb.sql_text blockers,
'等待者('||sw.sid||':'||sw.serial#||'-'||sw.username||')-'||qw.sql_text waiters
from v$lock lb,v$lock lw,v$session sb,v$session sw,v$sql qb,v$sql qw
where lb.sid=sb.sid
and lw.sid=sw.sid
and sb.prev_sql_addr=qb.address
and sw.sql_address=qw.address
and lb.id1=lw.id1
and sb.lockwait is null
and sw.lockwait is not null
and lb.block>0;

set echo off
set pagesize 80
set underline =
Column "waiting Sid" FORMAT 9999 heading "Waiter Sid " trunc
COLUMN "waiting SQL" FORMAT A70 heading "Waiter SQL" Trunc
COLUMN "locked object" FORMAT 99999999999999999999 heading "Locked Object"
COLUMN "Filler" Heading " " FORMAT A50
COLUMN "blocking sid" FORMAT 9999 heading "Blocking Sid"
COLUMN "SQL from blocking session" FORMAT A70 heading "Blocking SQL" trunc
" "
select distinct
waiter.sid "waiting sid"
, w_sql.sql_text "waiting SQL"
, waiter.ROW_WAIT_OBJ# "locked object"
,' ' Filler
, waiter.BLOCKING_SESSION "blocking sid"
, b_sql.sql_text "SQL from blocking session"
from v$session waiter, v$active_session_history blocker, v$sql b_sql, v$sql w_sql
where waiter.event='enq: TX - row lock contention'
and waiter.sql_id=w_sql.sql_id
and waiter.blocking_session=blocker.session_id
and b_sql.sql_id=blocker.sql_id
and blocker.CURRENT_OBJ#=waiter.ROW_WAIT_OBJ#
and blocker.CURRENT_FILE#= waiter.ROW_WAIT_FILE#
and blocker.CURRENT_BLOCK#= waiter.ROW_WAIT_BLOCK#;

--holder and waiter:
col wait for a30
SELECT chain_id, num_waiters, in_wait_secs, osid, blocker_osid, substr(wait_event_text,1,30) wait
FROM v$wait_chains;

set pages 1000
set lines 120
set heading off
column w_proc format a50 tru
column instance format a20 tru
column inst format a28 tru
column wait_event format a50 tru
column p1 format a16 tru
column p2 format a16 tru
column p3 format a15 tru
column Seconds format a50 tru
column sincelw format a50 tru
column blocker_proc format a50 tru
column waiters format a50 tru
column chain_signature format a100 wra
column blocker_chain format a100 wra

SELECT *
FROM (SELECT 'Current Process: '||osid W_PROC, 'SID '||i.instance_name INSTANCE,
'INST #: '||instance INST,'Blocking Process: '||decode(blocker_osid,null,'<none>',blocker_osid)||
' from Instance '||blocker_instance BLOCKER_PROC,'Number of waiters: '||num_waiters waiters,
'Wait Event: ' ||wait_event_text wait_event, 'P1: '||p1 p1, 'P2: '||p2 p2, 'P3: '||p3 p3,
'Seconds in Wait: '||in_wait_secs Seconds, 'Seconds Since Last Wait: '||time_since_last_wait_secs sincelw,
'Wait Chain: '||chain_id ||': '||chain_signature chain_signature,'Blocking Wait Chain: '||decode(blocker_chain_id,null,
'<none>',blocker_chain_id) blocker_chain
FROM v$wait_chains wc,
v$instance i
WHERE wc.instance = i.instance_number (+)
AND ( num_waiters > 0
OR ( blocker_osid IS NOT NULL
AND in_wait_secs > 10 ) )
ORDER BY chain_id,
num_waiters DESC)
WHERE ROWNUM < 101;

SELECT /*+rule*/decode(request,0,'holder:','waiter:')||sid sid,id1,id2,lmode,request,type
FROM v$lock
WHERE (id1,id2,type) IN
(SELECT id1,id2,type
FROM v$lock
WHERE request>0)
ORDER BY id1,request;

--simple
select b.blocking_session blocker_sid,w.sql_id blocker_sql_id,b.sid waiter_sid,b.sql_id waiter_sql_id,b.last_call_et blocker_time
from gv$session b,gv$session w
where b.inst_id=w.inst_id and b.blocking_session=w.sid;

--locked object:
col lock_type for a20
col object_name for a30
select rpad(oracle_username,10) o_name,session_id sid,
decode(locked_mode,0,'None',1,'Null',2,'Row share',3,'Row Exclusive',4,'Share',5,'Share Row Exclusive',6,'Exclusive') lock_type,
object_name ,xidusn,xidslot,xidsqn
from v$locked_object,dba_objects
where v$locked_object.object_id=dba_objects.object_id;

select sid, event, p1, p1raw,
chr(bitand(P1,-16777216)/16777215)||chr(bitand(P1,16711680)/65535) type,
mod(P1, 16) "MODE"
from v$session_wait
where event like '%enq%';

set pagesize 40 linesize 150
column username format a9
column sid format 9999
column serial# format 99999999
column mode_locked format a12
column os_user_name format a16
column object_name format a12
column object_type format a12
select s.username,s.sid,s.serial#,
decode(lo.locked_mode,
0,'none',
1,'null',
2,'row-s(ss)',
3,'row-x(sx)',
4,'share',
5,'s/row-x(ssx)',
6,'exclusive',
to_char(lo.locked_mode)) mode_locked,
lo.os_user_name,
do.object_name,do.object_type
from v$session s,v$locked_object lo,dba_objects do
where
lo.object_id=do.object_id and s.sid=lo.session_id;

--waiter:
select c.sid waiter_sid,
a.object_name,
a.object_type
from dba_objects a,
v$session b,
v$session_wait c
where ( a.object_id = b.row_wait_obj# or a.data_object_id = b.row_wait_obj#)
and b.sid = c.sid
and chr(bitand(c.P1,-16777216)/16777215) || chr(bitand(c.P1,16711680)/65535) = 'TX'
and c.event like '%enq%';


select * from v$lock where block=1
select * from v$locked_object
select * from v$access where owner='T'
select * from v$enqueue_lock
select * from v$enqueue_stat
select * from V$GLOBAL_BLOCKED_LOCKS
select sid,type,lmode,request,ctime,block from v$transaction_enqueue;
select * from V$RESOURCE
select * from V$RESOURCE_LIMIT
SELECT * FROM DBA_BLOCKERS
SELECT * FROM DBA_WAITERS
select * from DBA_KGLLOCK
select object_name,object_id,owner from dba_objects where object_id=131107

select a.SESSION_ID,a.ORACLE_USERNAME,b.object_name,a.locked_mode
from v$locked_object a,dba_objects b
where a.object_id=b.object_id;

--LIBRARY CACHE PIN/LOCK
SELECT sql_text
FROM v$sqlarea
WHERE (v$sqlarea.address, v$sqlarea.hash_value) IN (
SELECT sql_address, sql_hash_value
FROM v$session
WHERE SID IN (
SELECT SID
FROM v$session a, x$kglpn b
WHERE a.saddr = b.kglpnuse
AND b.kglpnmod <> 0
AND b.kglpnhdl IN (SELECT p1raw
FROM v$session_wait
WHERE event LIKE 'library%')));


SELECT s.sid, kglpnmod "Mode", kglpnreq "Req", SPID "OS Process"
FROM v$session_wait w, x$kglpn p, v$session s ,v$process o
WHERE p.kglpnuse=s.saddr
AND kglpnhdl=w.p1raw
--and w.event like '%library cache%'
and s.paddr=o.addr;

col KGLNAOBJ for a30
select sw.sid,xb.kglhdadr as addr,xb.kglhdpar as parent_addr ,xb.kglnaown as owner,xb.kglnaobj
from x$kglob xb,v$session_wait sw
where xb.kglhdadr = sw.p1raw
and sw.event like 'library%';

col username for a10
col program for a15
col req for a10
col mode for a10

SELECT a.SID, a.username,
case b.kglpnmod when 2 then 'S' when 3 then 'X' end "Mode",
case b.kglpnreq when 2 then 'S' when 3 then 'X' end "Req" ,
a.program, b.addr, b.kglpnadr, b.kglpnuse,b.kglpnses, b.kglpnhdl, b.kglpnlck
FROM v$session a, x$kglpn b
WHERE a.saddr = b.kglpnuse
--AND b.kglpnmod <> 0
AND b.kglpnhdl IN (SELECT p1raw FROM v$session_wait WHERE event LIKE 'library%') ;

select /*+ ordered */ w1.sid waiting_session,
h1.sid holding_session,
w.kgllktype lock_or_pin,
w.kgllkhdl address,
decode(h.kgllkmod, 0, 'None', 1, 'Null', 2, 'Share', 3, 'Exclusive',
'Unknown') mode_held,
decode(w.kgllkreq, 0, 'None', 1, 'Null', 2, 'Share', 3, 'Exclusive',
'Unknown') mode_requested
from dba_kgllock w, dba_kgllock h, v$session w1, v$session h1
where
(((h.kgllkmod != 0) and (h.kgllkmod != 1)
and ((h.kgllkreq = 0) or (h.kgllkreq = 1)))
and
(((w.kgllkmod = 0) or (w.kgllkmod= 1))
and ((w.kgllkreq != 0) and (w.kgllkreq != 1))))
and w.kgllktype = h.kgllktype
and w.kgllkhdl = h.kgllkhdl
and w.kgllkuse = w1.saddr
and h.kgllkuse = h1.saddr
/

select * from x$kgllk where kglnaobj like '%DBMS_DEBUG%';
select * from x$kglob where kglnaobj like '%DBMS_DEBUG%';

/************************************************************
--DBA_HIST:
************************************************************/
col BEGIN_INTERVAL_TIME for a30
col END_INTERVAL_TIME for a30
select snap_id,BEGIN_INTERVAL_TIME,END_INTERVAL_TIME from DBA_HIST_SNAPSHOT where snap_id=14361;
select snap_id,sql_id,event,blocking_session from DBA_HIST_ACTIVE_SESS_HISTORY where event='enq: TX - row lock contention' and snap_id>=14361 order by snap_id,event;

 

/************************************************************
--LATCH:
************************************************************/
--PARAMETERS RELATED TO LATCH:
select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_db\_block\_hash\_latches%' escape '\'
order by name;

select a.ksppinm name,b.ksppstvl value,b.ksppstdf "default",decode(a.ksppity, 1,'boolean', 2,'string', 3,'number', 4,'file', a.ksppity) type
,a.ksppdesc description
from sys.x$ksppi a,sys.x$ksppcv b
where a.indx = b.indx and a.ksppinm like '\_db\_block\_hash\_buckets%' escape '\'
order by name;

_db\_block\_lru\_latches
_db\_block\_hash\_latches
_db\_block\_hash\_buckets
_db\_block\_max\_scan\_pct
_db\_writer\_scan\_depth\_pct --UNDER 9I
_db\_large\_dirty\_queue


db_block_buffer = 8388608
6553600
_db_block_hash_latches = power(2,trunc(log(2, 8388604) - 6)) = 131072
_db_block_hash_buckets = 16777216
14736697

_db_block_hash_latches = db_block_buffer / 4
--LATCH QUERY:
SELECT * FROM V$LATCHNAME
SELECT * FROM V$LATCH ORDER BY MISSES DESC
SELECT * FROM V$LATCHHOLDER

select addr,gets,misses,sleeps from v$latch_children where name='cache buffers chains' and misses>1000000;
select file#,dbarfil,dbablk,class,state from x$bh where hladdr='C0000015E05734B8';

SELECT V$SESSION.SID,NAME LATCH_NAME FROM
V$SESSION,V$LATCH,V$SESSION_WAIT S
WHERE V$SESSION.SID=S.SID AND S.EVENT='latch free' AND S.P2=V$LATCH.LATCH#;

select p1,count(1) from v$session_wait where event='latch free' group by p1;

select sid,serial#,sql_hash_value,last_call_et,status from v$session where sid=2413;

/*
** Display System-wide latch statistics.
*/
column name format A32 truncate heading "LATCH NAME"
column pid heading "HOLDER PID"
select c.name,a.addr,a.gets,a.misses,a.sleeps,
a.immediate_gets,a.immediate_misses,b.pid
from v$latch a, v$latchholder b, v$latchname c
where a.addr = b.laddr(+)
and a.latch# = c.latch#
order by a.latch#;

/*
** Given a latch address, find out the latch name.
*/
column name format a64 heading 'Name'
select a.name from v$latchname a, v$latch b
where b.addr = '&addr'
and b.latch#=a.latch#;

/*
** Display latch statistics by latch name.
*/
column name format a32 heading 'LATCH NAME'
column pid heading 'HOLDER PID'
select c.name,a.addr,a.gets,a.misses,a.sleeps,
a.immediate_gets,a.immediate_misses,b.pid
from v$latch a, v$latchholder b, v$latchname c
where a.addr = b.laddr(+) and a.latch# = c.latch#
and c.name like '&latch_name%' order by a.latch#;

cache buffers chains latches
Individual block contention can show up as contention for one of these
latches. Each cache buffers chains latch covers a list of buffers in
the buffer cache. If one or two child latches stand out from
V$LATCH_CHILDREN then:

In Oracle8:
SELECT File# , dbablk, class, state
FROM x$bh
WHERE hladdr='&ADDR_OF_CHILD_LATCH';

The meaning of state: 0 FREE no valid block image
1 XCUR a current mode block, exclusive to this instance
2 SCUR a current mode block, shared with other instances
3 CR a consistent read (stale) block image
4 READ buffer is reserved for a block being read from disk
5 MREC a block in media recovery mode
6 IREC a block in instance (crash) recovery mode

The meaning of tch: tch is the touch count. A high touch count indicates that the buffer is used often. Therefore,
it will probably be at the head of the MRU list. See also touch count. The meaning of tim: touch time.
class represents a value designated for the use of the block. lru_flag set_ds maps to addr on x$kcbwds.
le_addr can be outer joined on x$le.le_addr. flag is a bit array. Bit if set
0 Block is dirty
4 temporary block
9 or 10 ping
14 stale
16 direct
524288 (=0x80000) Block was read in a full table scan See this link

SELECT "WHERE", SLEEP_COUNT, WTR_SLP_COUNT, LONGHOLD_COUNT
FROM v$latch_misses
WHERE parent_name='&ADDR_OF_PROBLEM_LATCH'
ORDER BY 1;

select
count(*) child_count,
sum(gets) sum_gets,
sum(misses) sum_misses,
sum(sleeps) sum_sleeps
from
v$latch_children
where
name = 'cache buffers chains';

select
P1
from
v$session_wait
where
event = 'cache buffer chains';

select /*+ RULE */
e.owner ||'.'|| e.segment_name segment_name,
e.extent_id extent#,
x.dbablk - e.block_id + 1 block#,
x.tch,
l.child#
from
sys.v$latch_children l,
sys.x$bh x,
sys.dba_extents e
where
x.hladdr = 'C0000015E05734B8' and
e.file_id = x.file# and
x.hladdr = l.addr and
x.dbablk between e.block_id and e.block_id + e.blocks -1
order by x.tch desc;

--FIND THE OBJECT FROM THE SEGMENTS/EXTENTS:
SELECT SEGMENT_NAME, SEGMENT_TYPE FROM DBA_EXTENTS
WHERE FILE_ID = <file_number> and <block_number> BETWEEN BLOCK_ID
AND BLOCK_ID + BLOCKS - 1;


select indx, spin, yield, waittime from x$ksllclass;

SQL> select latch#,name from v$latchname where name in ('library cache','cache buffers chains');

LATCH# NAME
---------- --------------------------------------------------
122 cache buffers chains
214 library cache

_latch_class_0 = "8000"
"_latch_class_1" = "800"
"_latch_classes" = "122:1 214:1"

select a.kslldnam, b.kslltnum, b.class_ksllt
from x$kslld a, x$ksllt b
where a.kslldadr = b.addr
and b.class_ksllt > 0;

--library cache latch:
select count(*)from v$latch_children where name='library cache';
select sql_text,VERSION_COUNT,INVALIDATIONS,PARSE_CALLS,OPTIMIZER_MODE,PARSING_USER_ID,PARSING_SCHEMA_ID,ADDRESS,HASH_VALUE
from v$sqlarea where version_count >1000;
select min(version_count) from v$sqlarea a,v$sql b where a.sql_text=b.sql_text and b.OPTIMIZER_COST >1000;
select max(version_count) from v$sqlarea a,v$sql b where a.sql_text=b.sql_text and b.OPTIMIZER_COST >1000;
select max(version_count) from v$sqlarea a,v$sql b where a.sql_text=b.sql_text;

_kgl_latch_count --number of library cache latches
_kgl_bucket_count

_kghdsidx_count --number of shared pool latches
_sqlexec_progression_cost=0 --sql execution progression monitoring cost threshold,this can solve sql not sharing and 04031. Note 62143.1

--simulator lru latch BUG 5918642
ALTER SYSTEM SET DB_CACHE_ADVICE=OFF SCOPE=BOTH;

alter system set "_library_cache_advice" = false;

/************************************************************
--UNDO AND TEMP: 10203 undo bug 5387030
************************************************************/
_smu_debug_mode=33554432,将tuned_undoretention算法修正为 max(maxquerylen secs + 300,undo_retention )
_undo_autotune=false,关闭自动undo retention

Note:461480.1 FAQ – Automatic Undo Management (AUM) / System Managed Undo (SMU)
Note:240746.1 10g NEW FEATURE on AUTOMATIC UNDO RETENTION
Bug 5387030 – Automatic tuning of undo_retention causes unusual extra space allocation


/******************************************************************************************
Contention Under Auto-Tuned Undo Retention (Doc ID 742035.1)

Bug 7291739 - Contention with auto-tuned undo retention (Doc ID 7291739.8)
Description
High 'latch: row cache objects' contention may be seen on dc_rollback_segments
together with high 'enq: US - contention' waits when using system managed undo
with an auto tuned undo retention period.

This fix allows users to specify a high threshold for undo retention via
the "_highthreshold_undoretention" parameter. The tuned value of undo retention
will then not exceed the set value. This underscore parameter is a dynamic
parameter and its value can be changed with
"alter system set "_highthreshold_undoretention"=<new_value>;"

Workaround
Disable auto tuning by setting "_undo_autotune" = false

The patch is available on both 10g and 11g. If the patch is applied on 11g, you use parameter _highthreshold_undoretention.
SQL> alter system set "_HIGHTHRESHOLD_UNDORETENTION"=900 SCOPE=spfile;
If the patch is applied on 10g, you use _first_spare_parameter.
SQL> alter system set "_first_spare_parameter"=10800 SCOPE=spfile;

And without patch, you use "_undo_autotune" = false as workaround.

Related To:
_FIRST_SPARE_PARAMETER
_SECOND_SPARE_PARAMETER
_THIRD_SPARE_PARAMETER

Note: _first_spare_parameter in 9.2.0.8 will configure a % of the shared pool to be
used for the Streams pool. A _kghdsidx_count setting of 1 avoids the use of sub pools within the
shared_pool; in 9.2 multiple sub pools have been associated with performance issues.

******************************************************************************************/


event="10511 trace name context forever, level 1" --Prevent SMON from offlining idle Undo Segments using event 10511

SQL> conn /as sysdba
SQL>ALTER SYSTEM SET max_dump_file_size=200M;
SQL>ALTER SYSTEM SET "_trace_buffer_wait_timeouts"=5 SCOPE=MEMORY;
SQL>ALTER SYSTEM SET "_in_memory_undo" = FALSE;

--ROLLBACK PROGRESS RATE:
strings rorasystem | grep _SYSSMU | cut -d $ -f 1 | sort -u > listSMU

SELECT * FROM V$FAST_START_SERVERS;
SELECT DISTINCT KTUXECFL,COUNT(*) FROM X$KTUXE GROUP BY KTUXECFL;
SELECT KTUXEUSN,KTUXESLT,KTUXESIZ FROM X$KTUXE WHERE KTUXECFL='DEAD';
SELECT KTUXEUSN,KTUXESLT,KTUXESIZ FROM X$KTUXE WHERE KTUXESTA='ACTIVE';

SELECT KTUXEUSN,KTUXESLT,KTUXESIZ FROM X$KTUXE WHERE KTUXESTA!='INACTIVE';
SET SERVEROUT ON
DECLARE
L_START NUMBER;
L_END NUMBER;
BEGIN
SELECT KTUXESIZ INTO L_START FROM X$KTUXE WHERE KTUXEUSN=1002 AND KTUXESLT=12;
DBMS_OUTPUT.PUT_LINE(L_START);
DBMS_LOCK.SLEEP(60);
SELECT KTUXESIZ INTO L_END FROM X$KTUXE WHERE KTUXEUSN=1002 AND KTUXESLT=12;
DBMS_OUTPUT.PUT_LINE(L_END);
DBMS_OUTPUT.PUT_LINE('TIME EST DAY: '||ROUND(L_END/(L_START-L_END)/60/24,2));
END;
/

select usn, state, undoblockstotal "Total", undoblocksdone "Done", undoblockstotal-undoblocksdone "ToDo",
decode(cputime,0,'unknown',sysdate+(((undoblockstotal-undoblocksdone) / (undoblocksdone / cputime)) / 86400))
"Estimated time to complete"
from v$fast_start_transactions;


--UNDO USAGE RATE:
select 'the '||name||' undo tablespace '||tablespace_name||' total space '||UNDOB/1024/1024||
'MB used space '||round((UNDO*DBS/1024/1024),0)||'MB idle '||round(100*((UNDOB-UNDO*DBS)/UNDOB),0)||
'% at '||to_char(sysdate,'yyyymmddhh24miss') as a
FROM
(select name from V$database),
(select nvl(sum(undoblks),0) UNDO from v$undostat
where
begin_time >(select sysdate - UR/(3600*24) from (select value as UR from v$parameter where name='undo_retention'))),
(select value as DBS from v$parameter where name='db_block_size'),
(select sum(bytes) as UNDOB,tablespace_name
from dba_data_files
where tablespace_name=(select UPPER(value) as UNDO from v$parameter where name='undo_tablespace')
group by tablespace_name);

SELECT STATUS,SUM(BYTES) FROM DBA_UNDO_EXTENTS GROUP BY STATUS;

select decode(sign(round(100*((UNDOB-UNDO*DBS)/UNDOB),0)-10), 1,'normal:',
decode(sign(round(100*((UNDOB-UNDO*DBS)/UNDOB),0)-20), 1,'warning:','error:'))
||' the '||instance_name||' undo tablespace '||tablespace_name||' total space '
||UNDOB/1024/1024||'MB used space '||round((UNDO*DBS/1024/1024),0)||'MB idle '||
round(100*((UNDOB-UNDO*DBS)/UNDOB),0)||'% at '||to_char(sysdate,'yyyymmddhh24miss') as a
FROM
(select instance_name from V$instance),
(select nvl(sum(undoblks),0) UNDO from v$undostat
where begin_time >(select sysdate - UR/(3600*24) from
(select value as UR from v$parameter where name='undo_retention'))),
(select value as DBS from v$parameter where name='db_block_size'),
(select sum(bytes) as UNDOB,tablespace_name from dba_data_files
where tablespace_name=(select upper(value) as UNDO
from v$parameter where name='undo_tablespace')
group by tablespace_name);


+ Use the following query to find if EXPIRED EXTENTS of other segments are used or not:
select sum(EXPSTEALCNT), sum(EXPBLKRELCNT), sum(EXPBLKREUCNT) from v$undostat;
A output of '0' for sum(EXPBLKREUCNT) confirms that expired extents of other segments are not used

+ Use following query to find out the HWM of all the undo segments:
select n.name,extents,rssize/(1024*1024) rssize_MB,
hwmsize/(1024*1024) hwmsize_MB
from v$rollstat s,v$rollname n
where s.usn = n.usn
/

set pagesize 2000
set linesize 110
col name for a20
col username for a10
col text format a25
select a.name,b.xacts,c.sid,c.username,c.serial#,d.sql_text text,e.used_ublk*8192/1024/1024||'M'
from v$rollname a,v$rollstat b,v$session c,v$sqltext d,v$transaction e
where a.usn=b.usn and b.usn=e.xidusn and c.taddr=e.addr
and c.sql_address=d.address and c.sql_hash_value=d.hash_value order by a.name,c.sid,d.piece;


select r.name 回滚段名,s.sid,s.serial#,s.username 用户名,s.status,t.cr_get,t.phy_io,t.used_ublk,t.noundo,substr(s.program,1,78) 操作程序
from v$session s,v$transaction t,v$rollname r where t.addr=s.taddr and t.xidusn=r.usn --and r.name in ('')
order by t.cr_get,t.phy_io;


--TEMP USAGE RATE:
select 'the '||name||' temp tablespaces '||tablespace_name||' idle '||round(100-(s.tot_used_blocks/s.total_blocks)*100,3)||'% at '||to_char(sysdate,'yyyymmddhh24miss')
from
(select d.tablespace_name tablespace_name,
nvl(sum(used_blocks),0) tot_used_blocks,
sum(blocks) total_blocks
from v$sort_segment v ,dba_temp_files d
where d.tablespace_name=v.tablespace_name(+)
group by d.tablespace_name) s, v$database;

col DatafileName for a30
set lin 150
--The query below would show temp usage:
Select round((f.bytes_free + f.bytes_used) / 1024 / 1024, 2) "total MB",
round(((f.bytes_free + f.bytes_used) - nvl(p.bytes_used, 0)) / 1024 / 1024, 2) "Free MB" ,
d.file_name "DatafileName",
round(nvl(p.bytes_used, 0)/ 1024 / 1024, 2) "Used MB",
round((f.bytes_free + f.bytes_used) / 1024, 2) "total KB",
round(((f.bytes_free + f.bytes_used) - nvl(p.bytes_used, 0)) / 1024, 2) "Free KB",
round(nvl(p.bytes_used, 0)/ 1024, 2) "Used KB",
0 "Fragmentation Index"
from V$TEMP_SPACE_HEADER f, DBA_TEMP_FILES d, V$TEMP_EXTENT_POOL p
where f.tablespace_name(+) = d.tablespace_name
and f.file_id(+) = d.file_id
and p.file_id(+) = d.file_id

/*select 'the '||name||' temp tablespaces '||tablespace_name||' idle '||round(100-(s.tot_used_blocks/s.total_blocks)*100,3)||'% at '||to_char(sysdate,'yyyymmddhh24miss')
from
(select d.tablespace_name tablespace_name,
nvl(sum(used_blocks),0) tot_used_blocks,
sum(blocks) total_blocks
from v$sort_segment v ,dba_data_files d
where d.tablespace_name=v.tablespace_name(+)
and d.tablespace_name like '%TEMP%'
and v.tablespace_name like '%TEMP%'
group by d.tablespace_name) s, v$database;
*/

SELECT INST_ID, TABLESPACE_NAME, SEGMENT_FILE, TOTAL_BLOCKS,
USED_BLOCKS, FREE_BLOCKS, MAX_USED_BLOCKS, MAX_SORT_BLOCKS
FROM GV$SORT_SEGMENT;

SELECT INST_ID, TABLESPACE_NAME, BLOCKS_CACHED, BLOCKS_USED
FROM GV$TEMP_EXTENT_POOL;

SELECT INST_ID,TABLESPACE_NAME, BLOCKS_USED, BLOCKS_FREE
FROM GV$TEMP_SPACE_HEADER;

SELECT INST_ID,FREE_REQUESTS,FREED_EXTENTS
FROM GV$SORT_SEGMENT;

/*QUERY DETAIL INFO OF TEMP USAGE*/
--Take a look at:
v$temp_space_header
v$temp_extent_pool --用完的extent会cache在v$temp_extent_pool中被重用.
v$sort_usage tells you space currently being used.

--SORT USAGE RATE:
select sess.SID, sess.serial#, segtype, blocks*8/1024 "MB" ,sess.sql_id ,sql_text
from v$sort_usage sort, v$session sess ,v$sql sql
where sort.SESSION_ADDR = sess.SADDR
and sql.sql_id = sess.sql_id
order by blocks desc

--For 8.1.7 to 9.2:
SELECT a.username, a.sid, a.serial#, a.osuser, b.tablespace, b.blocks, c.sql_text
FROM v$session a, v$sort_usage b, v$sqlarea c
WHERE a.saddr = b.session_addr
AND c.address= a.sql_address
AND c.hash_value = a.sql_hash_value
ORDER BY b.tablespace, b.blocks;

col usename for a10
col osuser for a10
col TABLESPACE for a15
col sql_text for a30
--For 10.1 and above:
/*This query is slow*/
SELECT a.username, a.sid, a.serial#, a.osuser, b.tablespace, b.blocks, c.sql_text
FROM v$session a, v$tempseg_usage b, v$sqlarea c
WHERE a.saddr = b.session_addr
AND c.address= a.sql_address
AND c.hash_value = a.sql_hash_value
ORDER BY b.tablespace, b.blocks;


select ts# from v$tablespace where name = 'TEMP';
ALTER SESSION SET EVENTS 'IMMEDIATE TRACE NAME DROP_SEGMENTS LEVEL <TS NUMBER + 1>';

/************************************************************
--SQL PLAN BASELINE:
************************************************************/
select * from session_privs where privilege like '%ADMINISTER SQL MAN%';
SELECT * FROM TABLE(DBMS_XPLAN.display_sql_plan_baseline(plan_name=>'SQL_PLAN_2qy8m1m9gtrxv97bbe3d0'));
col col for a300
SELECT DBMS_SQLTUNE.REPORT_SQL_DETAIL('3axa1x91c865y',report_level=>'ALL') col FROM DUAL;

VARIABLE my_rept CLOB
BEGIN
:my_rept :=DBMS_SQLTUNE.REPORT_SQL_MONITOR();
END;
/
PRINT :my_rept


/************************************************************
--SQL AND EXECUTION PLAN:
************************************************************/
SET AUTOTRACE TRACEONLY EXPLAIN

select * from table(dbms_xplan.display_cursor(null, 0, 'basic +PEEKED_BINDS'));
select * from table(dbms_xplan.display_cursor(null, 0, 'ADVANCED'));
select * from table(dbms_xplan.display_cursor('fqsfh4cuc4rzs',null,'ADVANCED'));
select DBMS_SQLTUNE.REPORT_SQL_DETAIL('3axa1x91c865y') aaa from dual;

sql_compiler
sql test case builder

--QUERY EXECUTION PLAN:
select * from (select
rpad('|'||substr(lpad(' ',1*(depth-1))||operation||
decode(options, null,'',' '||options), 1, 32), 33, ' ')||'|'||
rpad(decode(id, 0, '----- '||to_char(hash_value)||' -----'
, substr(decode(substr(object_name, 1, 7), 'SYS_LE_', null, object_name)
||' ',1, 20)), 21, ' ')||'|'||
lpad(decode(cardinality,null,' ',
decode(sign(cardinality-1000), -1, cardinality||' ',
decode(sign(cardinality-1000000), -1, trunc(cardinality/1000)||'K',
decode(sign(cardinality-1000000000), -1, trunc(cardinality/1000000)||'M',
trunc(cardinality/1000000000)||'G')))), 7, ' ') || '|' ||
lpad(decode(bytes,null,' ',
decode(sign(bytes-1024), -1, bytes||' ',
decode(sign(bytes-1048576), -1, trunc(bytes/1024)||'K',
decode(sign(bytes-1073741824), -1, trunc(bytes/1048576)||'M',
trunc(bytes/1073741824)||'G')))), 6, ' ') || '|' ||
lpad(decode(cost,null,' ',
decode(sign(cost-10000000), -1, cost||' ',
decode(sign(cost-1000000000), -1, trunc(cost/1000000)||'M',
trunc(cost/1000000000)||'G'))), 8, ' ') || '|' as "Explain plan"
from Gv$sql_plan
where hash_value=2639526010);


set linesize 132
set pagesize 300
variable hash_value number;
prompt
prompt Specify the Hash Value
prompt ~~~~~~~~~~~~~~~~~~~~~~
prompt Hash Value specified is: &&hv
prompt
begin
:hash_value:= '&&hv';
end;
/
select st.sql_text
from v$sqltext st
where st.hash_value=:hash_value
order by st.piece;

select '--------------------------------------------------------------------------------' from dual
union all
select '| Operation | PHV/Object Name | Rows | Bytes| Cost |' as "Optimizer Plan:" from dual
union all
select '--------------------------------------------------------------------------------' from dual
union all
select *
from (select
rpad('|'||substr(lpad(' ',1*(depth-1))||operation||
decode(options, null,'',' '||options), 1, 50), 51, ' ')||'|'||
rpad(decode(id, 0, '----- '||to_char(hash_value)||' -----'
, substr(decode(substr(object_name, 1, 7), 'SYS_LE_', null, object_name)
||' ',1, 20)), 21, ' ')||'|'||
lpad(decode(cardinality,null,' ',
decode(sign(cardinality-1000), -1, cardinality||' ',
decode(sign(cardinality-1000000), -1, trunc(cardinality/1000)||'K',
decode(sign(cardinality-1000000000), -1, trunc(cardinality/1000000)||'M',
trunc(cardinality/1000000000)||'G')))), 7, ' ') || '|' ||
lpad(decode(bytes,null,' ',
decode(sign(bytes-1024), -1, bytes||' ',
decode(sign(bytes-1048576), -1, trunc(bytes/1024)||'K',
decode(sign(bytes-1073741824), -1, trunc(bytes/1048576)||'M',
trunc(bytes/1073741824)||'G')))), 6, ' ') || '|' ||
lpad(decode(cost,null,' ',
decode(sign(cost-10000000), -1, cost||' ',
decode(sign(cost-1000000000), -1, trunc(cost/1000000)||'M',
trunc(cost/1000000000)||'G'))), 8, ' ') || '|' as "Explain plan"
from v$sql_plan
where hash_value = :hash_value
order by hash_value, id
)
union all
select '--------------------------------------------------------------------------------' from dual;
undefine hv;


--FIND THE SQL DEPENDENT ON A PARTICULAR OBJECT
accept OwnerName prompt "Owner Name: "
accept TableName prompt "Table Name: "

column sql_text format a60 word_wrapped

spool &&TableName
select /*+ ordered use_hash(d) use_hash(c) */
sum(c.kglobt13) disk_reads,
sum(c.kglhdexc) executions,
c.kglnaobj sql_text
from
sys.x$kglob o,
sys.x$kgldp d,
sys.x$kglcursor c
where
o.inst_id = userenv('Instance') and
d.inst_id = userenv('Instance') and
c.inst_id = userenv('Instance') and
o.kglnaown = upper('&OwnerName') and
o.kglnaobj = upper('&TableName') and
d.kglrfhdl = o.kglhdadr and
c.kglhdadr = d.kglhdadr
group by
c.kglnaobj
order by
1 desc
/

spool off


--RETRIEVE THE CURRENT RUNNING QUERY IN THE DB.
set serverout on

declare
x number;
c binary_integer:=0;
begin
for x in
( select username||'('||sid||','||serial#||
') ospid = ' || process ||
' program = ' || program username,
to_char(LOGON_TIME,' MON-DD HH24:MI') logon_time,
to_char(sysdate,' MON-DD HH24:MI') current_time,
sql_address, LAST_CALL_ET
from v$session
where status = 'ACTIVE'
and rawtohex(sql_address) <> '00'
and username is not null
and sid != (select distinct sid from v$mystat)
order by last_call_et )
loop
for y in ( select max(decode(piece,0,sql_text,null)) ||
max(decode(piece,1,sql_text,null)) ||
max(decode(piece,2,sql_text,null)) ||
max(decode(piece,3,sql_text,null))
sql_text
from v$sqltext_with_newlines
where address = x.sql_address
and piece < 4)
loop
if ( y.sql_text not like '%listener.get_cmd%' and
y.sql_text not like '%RAWTOHEX(SQL_ADDRESS)%')

then
if c=0
then
dbms_output.put_line(
'**************************************' );
dbms_output.put_line( x.username );
dbms_output.put_line( x.logon_time || ' ' ||
x.current_time||
' last et = ' ||
x.LAST_CALL_ET);
dbms_output.put_line(substr( y.sql_text, 1, 250 ) );
dbms_output.put_line(substr( y.sql_text, 251, 250 ) );
dbms_output.put_line(substr( y.sql_text, 501, 250 ) );
dbms_output.put_line(substr( y.sql_text, 751, 250 ) );
-- dbms_output.put_line(substr( y.sql_text, 1051, 250 ) );
else
dbms_output.put_line(
'------------------------------------------------' );
dbms_output.put_line( x.username );
dbms_output.put_line( x.logon_time || ' ' ||
x.current_time||
' last et = ' ||
x.LAST_CALL_ET);
dbms_output.put_line(substr( y.sql_text, 1, 250 ) );
dbms_output.put_line(substr( y.sql_text, 251, 250 ) );
dbms_output.put_line(substr( y.sql_text, 501, 250 ) );
dbms_output.put_line(substr( y.sql_text, 751, 250 ) );
-- dbms_output.put_line(substr( y.sql_text, 1051, 250 ) );
end if;
end if;
c:=c+1;
end loop;
end loop;
end;
/


/************************************************************
--OPEN CURSORS:
************************************************************/
select
'session_cached_cursors' parameter,
lpad(value, 5) value,
decode(value, 0, ' n/a', to_char(100 * used / value, '990') || '%') usage
from
( select
max(s.value) used
from
v$statname n,
v$sesstat s
where
n.name = 'session cursor cache count' and
s.statistic# = n.statistic#
),
( select
value
from
v$parameter
where
name = 'session_cached_cursors'
)
union all
select
'open_cursors',
lpad(value, 5),
to_char(100 * used / value, '990') || '%'
from
( select
max(sum(s.value)) used
from
v$statname n,
v$sesstat s
where
n.name in ('opened cursors current', 'session cursor cache count') and
s.statistic# = n.statistic#
group by
s.sid
),
( select
value
from
v$parameter
where
name = 'open_cursors'
)
/


--When you have the same statement that has several versions (child) the view v$sql_shared_cursor shows the reason
--why the statement cannot be shared.More detail on reasons in metalink note 120655.1

select version_count,address,hash_value,parsing_schema_name,reason,sql_text from (
select
address,''
||decode(max( UNBOUND_CURSOR),'Y', ' UNBOUND_CURSOR')
||decode(max( SQL_TYPE_MISMATCH),'Y', ' SQL_TYPE_MISMATCH')
||decode(max( OPTIMIZER_MISMATCH),'Y', ' OPTIMIZER_MISMATCH')
||decode(max( OUTLINE_MISMATCH),'Y', ' OUTLINE_MISMATCH')
||decode(max( STATS_ROW_MISMATCH),'Y', ' STATS_ROW_MISMATCH')
||decode(max( LITERAL_MISMATCH),'Y', ' LITERAL_MISMATCH')
||decode(max( SEC_DEPTH_MISMATCH),'Y', ' SEC_DEPTH_MISMATCH')
||decode(max( EXPLAIN_PLAN_CURSOR),'Y', ' EXPLAIN_PLAN_CURSOR')
||decode(max( BUFFERED_DML_MISMATCH),'Y', ' BUFFERED_DML_MISMATCH')
||decode(max( PDML_ENV_MISMATCH),'Y', ' PDML_ENV_MISMATCH')
||decode(max( INST_DRTLD_MISMATCH),'Y', ' INST_DRTLD_MISMATCH')
||decode(max( SLAVE_QC_MISMATCH),'Y', ' SLAVE_QC_MISMATCH')
||decode(max( TYPECHECK_MISMATCH),'Y', ' TYPECHECK_MISMATCH')
||decode(max( AUTH_CHECK_MISMATCH),'Y', ' AUTH_CHECK_MISMATCH')
||decode(max( BIND_MISMATCH),'Y', ' BIND_MISMATCH')
||decode(max( DESCRIBE_MISMATCH),'Y', ' DESCRIBE_MISMATCH')
||decode(max( LANGUAGE_MISMATCH),'Y', ' LANGUAGE_MISMATCH')
||decode(max( TRANSLATION_MISMATCH),'Y', ' TRANSLATION_MISMATCH')
||decode(max( ROW_LEVEL_SEC_MISMATCH),'Y', ' ROW_LEVEL_SEC_MISMATCH')
||decode(max( INSUFF_PRIVS),'Y', ' INSUFF_PRIVS')
||decode(max( INSUFF_PRIVS_REM),'Y', ' INSUFF_PRIVS_REM')
||decode(max( REMOTE_TRANS_MISMATCH),'Y', ' REMOTE_TRANS_MISMATCH')
||decode(max( LOGMINER_SESSION_MISMATCH),'Y', ' LOGMINER_SESSION_MISMATCH')
||decode(max( INCOMP_LTRL_MISMATCH),'Y', ' INCOMP_LTRL_MISMATCH')
||decode(max( OVERLAP_TIME_MISMATCH),'Y', ' OVERLAP_TIME_MISMATCH')
||decode(max( SQL_REDIRECT_MISMATCH),'Y', ' SQL_REDIRECT_MISMATCH')
||decode(max( MV_QUERY_GEN_MISMATCH),'Y', ' MV_QUERY_GEN_MISMATCH')
||decode(max( USER_BIND_PEEK_MISMATCH),'Y', ' USER_BIND_PEEK_MISMATCH')
||decode(max( TYPCHK_DEP_MISMATCH),'Y', ' TYPCHK_DEP_MISMATCH')
||decode(max( NO_TRIGGER_MISMATCH),'Y', ' NO_TRIGGER_MISMATCH')
||decode(max( FLASHBACK_CURSOR),'Y', ' FLASHBACK_CURSOR')
||decode(max( ANYDATA_TRANSFORMATION),'Y', ' ANYDATA_TRANSFORMATION')
||decode(max( INCOMPLETE_CURSOR),'Y', ' INCOMPLETE_CURSOR')
||decode(max( TOP_LEVEL_RPI_CURSOR),'Y', ' TOP_LEVEL_RPI_CURSOR')
||decode(max( DIFFERENT_LONG_LENGTH),'Y', ' DIFFERENT_LONG_LENGTH')
||decode(max( LOGICAL_STANDBY_APPLY),'Y', ' LOGICAL_STANDBY_APPLY')
||decode(max( DIFF_CALL_DURN),'Y', ' DIFF_CALL_DURN')
||decode(max( BIND_UACS_DIFF),'Y', ' BIND_UACS_DIFF')
||decode(max( PLSQL_CMP_SWITCHS_DIFF),'Y', ' PLSQL_CMP_SWITCHS_DIFF')
||decode(max( CURSOR_PARTS_MISMATCH),'Y', ' CURSOR_PARTS_MISMATCH')
||decode(max( STB_OBJECT_MISMATCH),'Y', ' STB_OBJECT_MISMATCH')
||decode(max( ROW_SHIP_MISMATCH),'Y', ' ROW_SHIP_MISMATCH')
||decode(max( PQ_SLAVE_MISMATCH),'Y', ' PQ_SLAVE_MISMATCH')
||decode(max( TOP_LEVEL_DDL_MISMATCH),'Y', ' TOP_LEVEL_DDL_MISMATCH')
||decode(max( MULTI_PX_MISMATCH),'Y', ' MULTI_PX_MISMATCH')
||decode(max( BIND_PEEKED_PQ_MISMATCH),'Y', ' BIND_PEEKED_PQ_MISMATCH')
||decode(max( MV_REWRITE_MISMATCH),'Y', ' MV_REWRITE_MISMATCH')
||decode(max( ROLL_INVALID_MISMATCH),'Y', ' ROLL_INVALID_MISMATCH')
||decode(max( OPTIMIZER_MODE_MISMATCH),'Y', ' OPTIMIZER_MODE_MISMATCH')
||decode(max( PX_MISMATCH),'Y', ' PX_MISMATCH')
||decode(max( MV_STALEOBJ_MISMATCH),'Y', ' MV_STALEOBJ_MISMATCH')
||decode(max( FLASHBACK_TABLE_MISMATCH),'Y', ' FLASHBACK_TABLE_MISMATCH')
||decode(max( LITREP_COMP_MISMATCH),'Y', ' LITREP_COMP_MISMATCH')
reason
from
v$sql_shared_cursor
group by
address
) join v$sqlarea using(address) where version_count>&versions
order by version_count desc,address
;

--MULTI-VERSION SQLS:
SELECT SUBSTR(SQL_TEXT,1,80) "SQL", COUNT(*), SUM(EXECUTIONS) "TOTEXECS"
FROM V$SQLAREA
WHERE EXECUTIONS < 5
GROUP BY SUBSTR(SQL_TEXT,1,80)
HAVING COUNT(*) > 30
ORDER BY 2;

SELECT PLAN_HASH_VALUE,COUNT(*) FROM V$SQL GROUP BY PLAN_HASH_VALUE HAVING COUNT(*)>30 ORDER BY 2 DESC;

--CACHE CURSORS:
CREATE OR REPLACE PROCEDURE pincurs AS
addr_plus_hash varchar2(100);
cursor c1 is select rawtohex(address) addr,hash_value
from v$sqlarea
where executions > 500;
BEGIN
for C in C1 loop
addr_plus_hash := c.addr||','||c.hash_value;
DBMS_SHARED_POOL.KEEP(addr_plus_hash,'C');
end loop;
END pincurs;
/


----------------------------------------------------------------
--THE REASON FOR HIGH RECURSIVE CALLS:
----------------------------------------------------------------
hard pares
pl/sql function calls
side effect from modification(triggers,FBI)
space request:DMT(space request) or LMT(quota)


----------------------------------------------------------------
--Top 10 most expensive SQL(Elapsed Time)...
----------------------------------------------------------------
select rownum as rank, a.*
from (
select elapsed_Time,
executions,
buffer_gets,
disk_reads,
cpu_time
hash_value,
sql_text
from v$sqlarea
where elapsed_time > 20000
order by elapsed_time desc) a
where rownum < 11

----------------------------------------------------------------
--Top 10 most expensive SQL (CPU Time)...
----------------------------------------------------------------
select rownum as rank, a.*
from (
select elapsed_Time,
executions,
buffer_gets,
disk_reads,
cpu_time
hash_value,
sql_text
from v$sqlarea
where cpu_time > 20000
order by cpu_time desc) a
where rownum < 11

----------------------------------------------------------------
--Top 10 most expensive SQL (Buffer Gets by Executions)...
----------------------------------------------------------------
select rownum as rank, a.*
from (
select buffer_gets,
executions,
buffer_gets/ decode(executions,0,1, executions) gets_per_exec,
hash_value,
sql_text
from v$sqlarea
where buffer_gets > 50000
order by buffer_gets desc) a
where rownum < 11

----------------------------------------------------------------
--Top 10 most expensive SQL (Physical Reads by Executions)...
----------------------------------------------------------------
select rownum as rank, a.*
from (
select disk_reads,
executions,
disk_reads / decode(executions,0,1, executions) reads_per_exec,
hash_value,
sql_text
from v$sqlarea
where disk_reads > 10000
order by disk_reads desc) a
where rownum < 11

----------------------------------------------------------------
--Top 10 most expensive SQL (Rows Processed by Executions)...
----------------------------------------------------------------
select rownum as rank, a.*
from (
select rows_processed,
executions,
rows_processed / decode(executions,0,1, executions) rows_per_exec,
hash_value,
sql_text
from v$sqlarea
where rows_processed > 10000
order by rows_processed desc) a
where rownum < 11

----------------------------------------------------------------
--Top 10 most expensive SQL (Buffer Gets vs Rows Processed)...
----------------------------------------------------------------
select rownum as rank, a.*
from (
select buffer_gets, lpad(rows_processed ||
decode(users_opening + users_executing, 0, ' ','*'),20) "rows_processed",
executions, loads,
(decode(rows_processed,0,1,1)) *
buffer_gets/ decode(rows_processed,0,1,
rows_processed) avg_cost,
sql_text
from v$sqlarea
where decode(rows_processed,0,1,1) * buffer_gets/ decode(rows_processed,0,1,rows_processed) > 10000
order by 5 desc) a
where rownum < 11


/************************************************************
--DYNAMIC SQL:
************************************************************/
execute immediate 'sql';
execute immediate 'sql_select' into var_1, var_2;
execute immediate 'sql' using [in|out|in out] bind_var_1, [in|out|in out] bind_var_2;
execute immediate 'sql_select' into var_1, var_2 using [in|out|in out] bind_var_1, [in|out|in out] bind_var_2;
execute immediate 'sql' returning into var_1;
execute immediate 'sql' bulk collect into indexed_var;


/************************************************************
--PERFORMANCE:
************************************************************/
--BUFFER HIT RATIO:
SELECT CONSISTENT_GETS+DB_BLOCK_GETS "LOGIC READS",PHYSICAL_READS "PHISICAL READS",
1 - PHYSICAL_READS/(CONSISTENT_GETS+DB_BLOCK_GETS) "BUFFER HIT RATIO"
FROM V$BUFFER_POOL_STATISTICS;

select logical.value "LOGIC READS",physical.value - direct.value - lobs.value "PHISICAL READS",
1 - ((physical.value - direct.value - lobs.value) / logical.value) "Buffer Cache Hit Ratio"
from V$SYSSTAT physical,V$SYSSTAT direct,V$SYSSTAT lobs,V$SYSSTAT logical
where physical.name = 'physical reads'and
direct.name = 'physical reads direct'and
lobs.name = 'physical reads direct (lob)' and
logical.name = 'session logical reads';

SELECT SUM(DECODE(NAME, 'db block gets', VALUE, 0)) + SUM(DECODE(NAME, 'consistent gets', VALUE, 0)) "LOGIC READS",
SUM(DECODE(NAME, 'physical reads', VALUE, 0)) "PHISICAL READS",
1 - SUM(DECODE(NAME, 'physical reads', VALUE, 0))/
(SUM(DECODE(NAME, 'db block gets', VALUE, 0)) + SUM(DECODE(NAME, 'consistent gets', VALUE, 0))) "BUFFER HIT RATIO"
FROM V$SYSSTAT;

--SGA_RESIZE_OPS
SET LIN 200
COL PARAMETER FOR A25
COL OPER_TYPE FOR A15
COL STATUS FOR A10
ALTER SESSION SET NLS_DATE_FORMAT='YYYYMMDD HH24:MI:SS';
SELECT PARAMETER,OPER_TYPE,OPER_MODE,INITIAL_SIZE,TARGET_SIZE,FINAL_SIZE,STATUS,START_TIME,END_TIME FROM GV$SGA_RESIZE_OPS ORDER BY 1,START_TIME;

--SGASTAT:
SELECT * FROM v$SGASTAT ORDER BY 1,2;

--SGA COMPONENTS:
COLUMN POOL FORMAT A20
SELECT POOL, ROUND( POOL_BYTES / 1048576 ),
ROUND( 100 * POOL_BYTES / TOTAL_SGA, 2 ) PERCENT
FROM
( SELECT SUM( BYTES ) TOTAL_SGA FROM V$SGASTAT ),
( SELECT NVL( POOL, NAME ) POOL, SUM( BYTES ) POOL_BYTES
FROM V$SGASTAT
GROUP BY NVL( POOL, NAME ) )
ORDER BY 3 DESC;

--FREE PERCENTAGE OF SGA:
SELECT TOTAL "TOTAL SGA",FREE "FREE",ROUND(FREE/TOTAL*100,2)||'%' "PERCENT FREE" FROM
(SELECT SUM(BYTES) FREE FROM V$SGASTAT WHERE V$SGASTAT.NAME= 'free memory') A,
(SELECT SUM(VALUE) TOTAL FROM V$SGA) B;

--LIBRARY CACHE hitRatio:
SELECT SUM(PINS) TOTAL_PINS,SUM(RELOADS) TOTAL_RELOADS,
SUM(RELOADS)/SUM(PINS)*100 LIBCACHE_RELOAD_RATIO
FROM V$LIBRARYCACHE;

--ROW CACHE hitRatio:
SELECT 1 - SUM(GETMISSES) / SUM(GETS) "DATA DICTIONARY HITRATIO" FROM V$ROWCACHE;

col parameter for a15
SELECT PARAMETER,SUM("COUNT") count,SUM(USAGE) usage,SUM(GETS) gets,SUM(GETMISSES) getmisses,
SUM(SCANS) scans,SUM(SCANMISSES) scanmisses,SUM(MODIFICATIONS) modifications,
SUM(DLM_REQUESTS) dlm_requests,SUM(DLM_CONFLICTS) dlm_confilicts,SUM(DLM_RELEASES) dlm_releases
FROM V$ROWCACHE
GROUP BY PARAMETER;

--FREE PERCENTAGE OF SHARED_POOL:
SELECT TO_NUMBER(V$PARAMETER.VALUE) "TOTAL SHARED POOL", V$SGASTAT.BYTES "FREE",
ROUND((V$SGASTAT.BYTES/V$PARAMETER.VALUE)*100,2)||'%' "PERCENT FREE"
FROM V$SGASTAT, V$PARAMETER
WHERE V$SGASTAT.NAME= 'free memory'
AND V$PARAMETER.NAME = 'shared_pool_size'
AND V$SGASTAT.POOL='shared pool';

--SHARED POOL OBJECTS:
SELECT type, kept, COUNT(*), SUM(sharable_mem)
FROM V$DB_OBJECT_CACHE
GROUP BY type, kept;

--KEEP OBJECTS:
SQL> @?\RDBMS\ADMIN\DBMSPOOL.SQL
SQL> @?\RDBMS\ADMIN\PRVTPOOL.PLB;
SQL> EXEC DBMS_SHARED_POOL.KEEP(NAME=>'TEST.P_TEST');
SQL> COL NAME FOR A30
SQL> COL OWNER FOR A10
SQL> SELECT OWNER,NAME,TYPE,LOADS,PINS,EXECUTIONS,KEPT FROM V$DB_OBJECT_CACHE WHERE OWNER='TEST' AND NAME='P_TEST';

OWNER NAME TYPE LOADS PINS EXECUTIONS KEP
---------- ------------------------------ ---------------------------- ---------- ---------- ---------- ---
TEST P_TEST PROCEDURE 1 0 135 YES

 

--SHARED_POOL TRUNK:
--Note 396940.1
--CHUNK SUMMARY:
SELECT KSMCHCLS CLASS, COUNT(KSMCHCLS) NUM, SUM(KSMCHSIZ) SIZ,
To_char( ((SUM(KSMCHSIZ)/COUNT(KSMCHCLS)/1024)),'999,999.00')||'k' "AVG SIZE"
FROM X$KSMSP GROUP BY KSMCHCLS;

--MEMORY SIZE AND INUSE:
select name, round(sum(mb),1) mb, round(sum(inuse),1) inuse
from (select case when name = 'buffer_cache'
then 'db_cache_size'
when name = 'log_buffer'
then 'log_buffer'
else pool
end name,
bytes/1024/1024 mb,
case when name <> 'free memory'
then bytes/1024/1024
end inuse
from v$sgastat
)group by name

--FREE CHUNK:
col sga_heap format a15
col size format a10
select KSMCHIDX "SubPool", 'sga heap('||KSMCHIDX||',0)'sga_heap,ksmchcom ChunkComment,
decode(round(ksmchsiz/1000),0,'0-1K', 1,'1-2K', 2,'2-3K',3,'3-4K',
4,'4-5K',5,'5-6k',6,'6-7k',7,'7-8k',8,
'8-9k', 9,'9-10k','> 10K') "size",
count(*),ksmchcls Status, sum(ksmchsiz) Bytes
from x$ksmsp
where KSMCHCOM = 'free memory'
group by ksmchidx, ksmchcls,
'sga heap('||KSMCHIDX||',0)',ksmchcom, ksmchcls,decode(round(ksmchsiz/1000),0,'0-1K',
1,'1-2K', 2,'2-3K', 3,'3-4K',4,'4-5K',5,'5-6k',6,
'6-7k',7,'7-8k',8,'8-9k', 9,'9-10k','> 10K');

select '0 (<140)' BUCKET, KSMCHCLS, KSMCHIDX, 10*trunc(KSMCHSIZ/10) "From",
count(*) "Count" , max(KSMCHSIZ) "Biggest",
trunc(avg(KSMCHSIZ)) "AvgSize", trunc(sum(KSMCHSIZ)) "Total"
from x$ksmsp
where KSMCHSIZ<140
and KSMCHCLS='free'
group by KSMCHCLS, KSMCHIDX, 10*trunc(KSMCHSIZ/10)
UNION ALL
select '1 (140-267)' BUCKET, KSMCHCLS, KSMCHIDX,20*trunc(KSMCHSIZ/20) ,
count(*) , max(KSMCHSIZ) ,
trunc(avg(KSMCHSIZ)) "AvgSize", trunc(sum(KSMCHSIZ)) "Total"
from x$ksmsp
where KSMCHSIZ between 140 and 267
and KSMCHCLS='free'
group by KSMCHCLS, KSMCHIDX, 20*trunc(KSMCHSIZ/20)
UNION ALL
select '2 (268-523)' BUCKET, KSMCHCLS, KSMCHIDX, 50*trunc(KSMCHSIZ/50) ,
count(*) , max(KSMCHSIZ) ,
trunc(avg(KSMCHSIZ)) "AvgSize", trunc(sum(KSMCHSIZ)) "Total"
from x$ksmsp
where KSMCHSIZ between 268 and 523
and KSMCHCLS='free'
group by KSMCHCLS, KSMCHIDX, 50*trunc(KSMCHSIZ/50)
UNION ALL
select '3-5 (524-4107)' BUCKET, KSMCHCLS, KSMCHIDX, 500*trunc(KSMCHSIZ/500) ,
count(*) , max(KSMCHSIZ) ,
trunc(avg(KSMCHSIZ)) "AvgSize", trunc(sum(KSMCHSIZ)) "Total"
from x$ksmsp
where KSMCHSIZ between 524 and 4107
and KSMCHCLS='free'
group by KSMCHCLS, KSMCHIDX, 500*trunc(KSMCHSIZ/500)
UNION ALL
select '6+ (4108+)' BUCKET, KSMCHCLS, KSMCHIDX, 1000*trunc(KSMCHSIZ/1000) ,
count(*) , max(KSMCHSIZ) ,
trunc(avg(KSMCHSIZ)) "AvgSize", trunc(sum(KSMCHSIZ)) "Total"
from x$ksmsp
where KSMCHSIZ >= 4108
and KSMCHCLS='free'
group by KSMCHCLS, KSMCHIDX, 1000*trunc(KSMCHSIZ/1000);

--SHARED POOL LRU STATS:
SELECT
KGHLURCR "RECURRENT_CHUNKS", --1
KGHLUTRN "TRANSIENT_CHUNKS", --3
KGHLUFSH "FLUSHED_CHUNKS", --1
KGHLUOPS "PINS AND_RELEASES", --20
KGHLUNFU "ORA-4031_ERRORS",
KGHLUNFS "LAST ERROR_SIZE"
FROM
SYS.X$KGHLU
WHERE
INST_ID = USERENV('INSTANCE');

--HUGE USAGE OF SHARED POOL:
SELECT * FROM X$KSMLRU WHERE KSMLRSIZ > 0;

--SHARE POOL RESERVERD SIZE:
SELECT FREE_SPACE,ROUND(AVG_FREE_SIZE,2),USED_SPACE,ROUND(AVG_USED_SIZE,2),
USED_COUNT,REQUEST_FAILURES,LAST_FAILURE_SIZE
FROM gV$SHARED_POOL_RESERVED;

--REDO HIT RATIO:
SELECT name, gets, misses, immediate_gets, immediate_misses,
Decode(gets,0,0,misses/gets*100) ratio1,
Decode(immediate_gets+immediate_misses,0,0,
immediate_misses/(immediate_gets+immediate_misses)*100) ratio2
FROM v$latch WHERE name IN ('redo allocation', 'redo copy');

--PGASTAT
max memory used per session = min(5%*pga_aggregate_target,50%*_pga_max_size,_smm_max_size)
_sort_multiblock_read_count
RAM=48GB _pga_max_size=6G pga_aggregate_target=32G _smm_max_size=6291456
@ According to internal bug# 3946308 the maximum pga workarea is 20% of
@ PGA_AGGREGATE_TARGET. It used to be 5% in previous releases but starting with
@ Oracle 10g R2 this limit was increased to 20% of pga_aggregate_target. If
@ this is true, then with the above setings we would expect the ct to be able
@ to use > 6GB --- 20% of 32GB --- but that is not happening.

SELECT NAME,VALUE VALUE_INT FROM V$PGASTAT;

select last_execution,count(1) from v$sql_workarea where POLICY='AUTO' group by last_execution;

select sum(OPTIMAL_EXECUTIONS) OPTIMAL,sum(ONEPASS_EXECUTIONS) ONEPASS ,sum(MULTIPASSES_EXECUTIONS) MULTIPASSES
from v$sql_workarea where POLICY='AUTO';

SELECT LOW_OPTIMAL_SIZE/1024 low_kb,(HIGH_OPTIMAL_SIZE+1)/1024 high_kb,
optimal_executions optimal, onepass_executions onepass, multipasses_executions multipasses
FROM v$sql_workarea_histogram
WHERE total_executions != 0;

select n.name,sum(s.value) value
from v$sesstat s,v$statname n where s.statistic#=n.statistic#
and n.name like 'workarea executions%'
group by n.name;

select n.name,sum(s.value) value
from v$sysstat s,v$statname n where s.statistic#=n.statistic#
and n.name like 'workarea executions%'
group by n.name;

SELECT round(PGA_TARGET_FOR_ESTIMATE/1024/1024) target_mb,
ESTD_PGA_CACHE_HIT_PERCENTAGE cache_hit_perc,
ESTD_OVERALLOC_COUNT
FROM v$pga_target_advice;

SELECT server, s.username, osuser, NAME, VALUE / 1024 / 1024, s.SID, s.serial#, spid
FROM v$session s, v$sesstat st, v$statname sn, v$process p
WHERE st.SID = s.SID
AND st.statistic# = sn.statistic#
AND sn.NAME LIKE 'session pga memory'
AND p.addr = s.paddr
AND VALUE > 10 * 1024 * 1024 --only show pga > 10M
ORDER BY VALUE DESC;


set pages500 lines110 trims on
clear col
col name format a30
col username format a20
break on username nodup skip 1

select vses.username||':'||vsst.sid||','||vses.serial# username, vstt.name, max(vsst.value) value
from v$sesstat vsst, v$statname vstt, v$session vses
where vstt.statistic# = vsst.statistic# and vsst.sid = vses.sid and vstt.name in
('session pga memory','session pga memory max','session uga memory','session uga memory max',
'session cursor cache count','session cursor cache hits','session stored procedure space',
'opened cursors current','opened cursors cumulative') and vses.username is not null
group by vses.username, vsst.sid, vses.serial#, vstt.name order by vses.username, vsst.sid, vses.serial#, vstt.name;


set lin 200
col value for 9999999999999999999999
select a.STAT_NAME,a.VALUE,to_char(b.BEGIN_INTERVAL_TIME,'yyyymmdd hh24miss') from DBA_HIST_SYSSTAT a,DBA_HIST_SNAPSHOT b
where a.snap_id=b.snap_id and b.BEGIN_INTERVAL_TIME > to_date('20100309000000','yyyymmddhh24miss') and
b.BEGIN_INTERVAL_TIME < to_date('20100310230000','yyyymmddhh24miss') and
a.STAT_NAME in
('session pga memory','session pga memory max','session uga memory','session uga memory max',
'session cursor cache count','session cursor cache hits','session stored procedure space',
'opened cursors current','opened cursors cumulative')
order by 3;


select a.CATEGORY,a.NUM_PROCESSES,a.used_total,a.ALLOCATED_TOTAL,a.ALLOCATED_AVG,a.ALLOCATED_MAX,a.MAX_ALLOCATED_MAX,to_char(b.BEGIN_INTERVAL_TIME,'yyyymmdd hh24miss')
from DBA_HIST_PROCESS_MEM_SUMMARY a,DBA_HIST_SNAPSHOT b
where a.snap_id=b.snap_id and b.BEGIN_INTERVAL_TIME > to_date('20100309000000','yyyymmddhh24miss') and
b.BEGIN_INTERVAL_TIME > to_date('20100310230000','yyyymmddhh24miss')
order by to_char(b.BEGIN_INTERVAL_TIME,'yyyymmdd hh24miss'),CATEGORY;

--PGA USAGE
SQL> select
sum(value)/1024/1024 Mb
from
v$sesstat s, v$statname n
where
n.STATISTIC# = s.STATISTIC# and
name = 'session pga memory';

SQL> col name format a30
SQL> select
sid,name,value
from
v$statname n,v$sesstat s
where
n.STATISTIC# = s.STATISTIC# and
name like 'session%memory%'
order by 3 asc;

SQL> select sum(bytes)/1024/1024 Mb from
(select bytes from v$sgastat
union
select value bytes from
v$sesstat s,
v$statname n
where
n.STATISTIC# = s.STATISTIC# and
n.name = 'session pga memory'
);


--LONG OPERATIONS:
col opname for a15
col target for a20
col username for a10
col elapse for 99999
col remain for 99999

select sid,opname,target,username,start_time,last_update_time,elapsed_seconds elapse,time_remaining remain
from v$session_longops where elapsed_seconds>120;

col opname for a15
col target for a20
select inst_id,target,totalwork,inst_id,sid,sofar,opname,time_remaining
from gv$session_longops where time_remaining>120;

SELECT SID, SERIAL#, CONTEXT, SOFAR, TOTALWORK,
ROUND(SOFAR/TOTALWORK*100,2) "%_COMPLETE"
FROM V$SESSION_LONGOPS
WHERE OPNAME LIKE 'RMAN%'
AND OPNAME NOT LIKE '%aggregate%'
AND TOTALWORK != 0
AND SOFAR <> TOTALWORK;

--TRANSACTION ENQUEUE:
select sid,type,lmode,request,ctime,block from v$transaction_enqueue;
select * from v$enqueue_stat;

--ACTIVE TRANSACTION:
SELECT S.SID,S.SERIAL#,T.USED_UBLK,R.STATUS,S.USERNAME,R.SEGMENT_NAME
FROM GV$TRANSACTION T, GV$SESSION S, DBA_ROLLBACK_SEGS R
WHERE T.SES_ADDR = S.SADDR AND T.XIDUSN = R.SEGMENT_ID AND R.TABLESPACE_NAME='UNDOTBS1';

--HIGH CPU USAGE SESSIONS:
col osuser for a10
col terminal for a15
col program for a20

col prog for a20
col value for 99999999
select a.sid,spid,status,substr(a.program,1,40) prog,a.terminal,osuser,round(value/60/100,2) value
from v$session a,v$process b,v$sesstat c
where c.statistic#=12 and c.sid=a.sid and a.paddr=b.addr order by value desc;

select ss.sid,se.command,ss.value CPU ,se.username,se.program
from v$sesstat ss, v$session se
where ss.statistic# in (select statistic# from v$statname
where name = 'CPU used by this session')
and se.sid=ss.sid and ss.sid>6 order by ss.sid

--SESSION WAIT(10g):
col state for a10
col wait_class for a12
col event for a20
select a.sid,b.state,b.wait_class,b.wait_time,b.seconds_in_wait,b.event,b.p1,b.p1raw,b.p2,b.p2raw,b.p3,b.p3raw
from Gv$session a, Gv$session_wait b,Gv$process c
where b.event not like 'SQL%'
and b.event not like '%mon%'
and b.event not like 'rdbms%'
and b.event not like 'PX Deq%'
and b.sid=a.sid
and c.addr=a.paddr
and b.wait_class!='Idle'
order by event;

select parameter1,parameter2,parameter3 from v$event_name where name = 'gc buffer busy';
select parameter1,parameter2,parameter3 from v$event_name where name like '%TX - index%';

--SESSION WAIT(9i):
select a.sid,a.serial#,a.username,b.state,b.wait_time,b.seconds_in_wait,c.spid svrproc,a.process,
b.event,b.p1,b.p2,b.p3
from v$session a, v$session_wait b,v$process c
where b.event not like 'SQL%'
and b.event not like '%mon%'
and b.event not like 'rdbms%'
and b.event not like 'PX Deq%'
and b.sid=a.sid
and c.addr=a.paddr
order by event;

select a.sid,a.serial#,a.username,b.state,b.wait_time,b.seconds_in_wait,--c.spid svrproc,a.process,
b.event,b.p1,b.p2,b.p3
from v$session a, v$session_wait b,v$process c
where b.event not like 'SQL%'
and b.event not like '%mon%'
and b.event not like 'rdbms%'
and b.event not like 'PX Deq%'
and b.sid=a.sid
and c.addr=a.paddr
order by event;

- WAITING SESSIONS:
-- The entries that are shown at the top are the sessions that have
-- waited the longest amount of time that are waiting for non-idle wait
-- events (event column). You can research and find out what the wait
-- event indicates (along with its parameters) by checking the Oracle
-- Server Reference Manual or look for any known issues or documentation
-- by searching Metalink for the event name in the search bar. Example
-- (include single quotes): [ 'buffer busy due to global cache' ].
--
set numwidth 10
column state format a7 tru
column event format a25 tru
column last_sql format a40 tru
select sw.inst_id, sw.sid, sw.state, sw.event, sw.seconds_in_wait seconds,
sw.p1, sw.p2, sw.p3, sa.sql_text last_sql
from gv$session_wait sw, gv$session s, gv$sqlarea sa
where sw.event not in
('rdbms ipc message','smon timer','pmon timer',
'SQL*Net message from client','lock manager wait for remote message',
'ges remote message', 'gcs remote message', 'gcs for action', 'client message',
'pipe get', 'null event', 'PX Idle Wait', 'single-task message',
'PX Deq: Execution Msg', 'KXFQ: kxfqdeq - normal deqeue',
'listen endpoint status','slave wait','wakeup time manager')
and seconds_in_wait > 0
and (sw.inst_id = s.inst_id and sw.sid = s.sid)
and (s.inst_id = sa.inst_id and s.sql_address = sa.address)
order by seconds desc;


--TOP PROCESS SQL_TEXT
ps -ef | egrep -v "STIME|$LOGNAME" | sort +3 -r | head -n 15
ps -aux | sort +3 -r | head -n 10

SELECT /*+ ORDERED */ sql_text FROM v$sqltext a
WHERE (a.hash_value, a.address) IN (
SELECT DECODE (sql_hash_value,0, prev_hash_value,sql_hash_value),
DECODE (sql_hash_value,0, prev_sql_addr, sql_address)
FROM v$session b
WHERE b.paddr = (SELECT addr
FROM v$process c
WHERE c.spid = 1828142))
ORDER BY piece ASC

--UNDO CONTENTION:
select name, waits, gets, waits/gets "Ratio"
from v$rollstat a, v$rollname b
where a.usn = b.usn;

--CHECK TIME MODEL
SELECT stat_name, value / 1000000 seconds
FROM v$sys_time_model
ORDER BY seconds DESC;


set echo off
set feedback off
set linesize 300
set pagesize 55
set verify off
set termout off
column rpt new_value rpt
select instance_name||'_wrh_sysstat_ioworkload_'||'.LST' rpt from v$instance;
set termout on
prompt
prompt
prompt ^^^^^^^^^^^^^
prompt Report Name : ../LST/&&rpt
prompt ^^^^^^^^^^^^^
spool ../LST/&&rpt
column sri head "Small|Read|IOPS"
column swi head "Small|Write|IOPS"
column tsi head "Total|Small|IOPS"
column srp head "Small|Read|I/O%"
column swp head "Small|Write|I/O%"
column lri head "Large|Read|IOPS"
column lwi head "Large|Write|IOPS"
column tli head "Total|Large|IOPS"
column lrp head "Large|Read|I/O%"
column lwp head "Large|Write|I/O%"
column tr head "Total|Read|MBPS"
column tw head "Total|Written|MBPS"
column tm head "Total|MBPS"
column begin_time for a25
column end_time for a25
SELECT end_time,
ROUND(sr/inttime,3) sri,
ROUND(sw/inttime,3) swi,
ROUND((sr+sw)/inttime,3) tsi,
ROUND(sr/DECODE((sr+sw),0,1,(sr+sw))*100,3) srp,
ROUND(sw/DECODE((sr+sw),0,1,(sr+sw))*100,3) swp,
ROUND(lr/inttime,3) lri,
ROUND(lw/inttime,3) lwi,
ROUND((lr+lw)/inttime,3) tli,
ROUND(lr/DECODE((lr+lw),0,1,(lr+lw))*100,3) lrp,
ROUND(lw/DECODE((lr+lw),0,1,(lr+lw))*100,3) lwp,
ROUND((tbr/inttime)/1048576,3) tr,
ROUND((tbw/inttime)/1048576,3) tw,
ROUND(((tbr+tbw)/inttime)/1048576,3) tm
FROM (
SELECT beg.snap_id beg_id, end.snap_id end_id,
beg.begin_interval_time, beg.end_interval_time,
end.begin_interval_time begin_time, end.end_interval_time end_time,
(extract(day from (end.end_interval_time - end.begin_interval_time))*86400)+
(extract(hour from (end.end_interval_time - end.begin_interval_time))*3600)+
(extract(minute from (end.end_interval_time - end.begin_interval_time))*60)+
(extract(second from (end.end_interval_time - end.begin_interval_time))*01) inttime,
decode(end.startup_time,end.begin_interval_time,end.sr,(end.sr-beg.sr)) sr,
decode(end.startup_time,end.begin_interval_time,end.sw,(end.sw-beg.sw)) sw,
decode(end.startup_time,end.begin_interval_time,end.lr,(end.lr-beg.lr)) lr,
decode(end.startup_time,end.begin_interval_time,end.lw,(end.lw-beg.lw)) lw,
decode(end.startup_time,end.begin_interval_time,end.tbr,(end.tbr-beg.tbr)) tbr,
decode(end.startup_time,end.begin_interval_time,end.tbw,(end.tbw-beg.tbw)) tbw
FROM
(SELECT dba_hist_snapshot.snap_id, startup_time, begin_interval_time, end_interval_time,
sum(decode(stat_name,'physical read total IO requests',value,0)-
decode(stat_name,'physical read total multi block requests',value,0)) sr,
sum(decode(stat_name,'physical write total IO requests',value,0)-
decode(stat_name,'physical write total multi block requests',value,0)) sw,
sum(decode(stat_name,'physical read total multi block requests',value,0)) lr,
sum(decode(stat_name,'physical write total multi block requests',value,0)) lw,
sum(decode(stat_name,'physical read total bytes',value,0)) tbr,
sum(decode(stat_name,'physical write total bytes',value,0)) tbw
FROM wrh$_sysstat, wrh$_stat_name, dba_hist_snapshot
WHERE wrh$_sysstat.stat_id = wrh$_stat_name.stat_id
AND wrh$_sysstat.snap_id = dba_hist_snapshot.snap_id
group by dba_hist_snapshot.snap_id, startup_time, begin_interval_time, end_interval_time) beg,
(SELECT dba_hist_snapshot.snap_id, startup_time, begin_interval_time, end_interval_time,
sum(decode(stat_name,'physical read total IO requests',value,0)-
decode(stat_name,'physical read total multi block requests',value,0)) sr,
sum(decode(stat_name,'physical write total IO requests',value,0)-
decode(stat_name,'physical write total multi block requests',value,0)) sw,
sum(decode(stat_name,'physical read total multi block requests',value,0)) lr,
sum(decode(stat_name,'physical write total multi block requests',value,0)) lw,
sum(decode(stat_name,'physical read total bytes',value,0)) tbr,
sum(decode(stat_name,'physical write total bytes',value,0)) tbw
FROM wrh$_sysstat, wrh$_stat_name, dba_hist_snapshot
WHERE wrh$_sysstat.stat_id = wrh$_stat_name.stat_id
AND wrh$_sysstat.snap_id = dba_hist_snapshot.snap_id
group by dba_hist_snapshot.snap_id, startup_time, begin_interval_time, end_interval_time) end
WHERE beg.snap_id + 1 = end.snap_id
)
order by 1
/
spool off


/************************************************************
--OSW:
************************************************************/
How to use the OSwatcher, please refer to below:

Please refer to the following note to download and install this tool:
Note:301137.1 OS Watcher Black Box User Guide

For example:
1. To start the the OSW:
nohup ./startOSWbb.sh 15 120 &
==>It will collect data at 15 second intervals, and remain the last 120 hours(5 days) of data to archive files.
==>Make sure you have enough space to store the OSW output files.

(If you want to compress the output file, please use below command to start the OSW:
nohup ./startOSWbb.sh 15 120 gzip &)

2. Please manually configure the private Network statistics collection for OSWatcher by referring to file –Exampleprivate.net in the OSW folder.

1) Copy the Exampleprivate.net to private.net in the same directory.
2). Replace the private_nodename1 and private_nodename2 with your actual private IP/hostname.
traceroute -r -F private_nodename1
traceroute -r -F private_nodename2
3). Remove the lines for other OS platforms.
4). Please make sure you DO NOT DELETE THE FOLLOWING LINE
rm locks/lock.file

An example of the private.net :

######################################################################
#HP Example
######################################################################
echo "zzz ***"`date`
traceroute -r -F 192.168.1.1
traceroute -r -F 192.168.1.2
######################################################################
rm locks/lock.file


/************************************************************
--INDEX - CONTENTION split:
************************************************************/
alter session set events '10224 trace name context forever,level 100';

 

/************************************************************
--HW - CONTENTION:
************************************************************/
SELECT EVENT,STATUS,LAST_CALL_ET,SQL_HASH_VALUE,P1,P2,P3
FROM V$SESSION WHERE EVENT='enq: HW - contention';

select DBMS_UTILITY.DATA_BLOCK_ADDRESS_FILE( 4194485 ) FILE#,
DBMS_UTILITY.DATA_BLOCK_ADDRESS_BLOCK( 4194485 ) BLOCK# FROM DUAL;

SELECT DBMS_UTILITY.DATA_BLOCK_ADDRESS_FILE(ID2) FILE#,
DBMS_UTILITY.DATA_BLOCK_ADDRESS_BLOCK(ID2) BLOCK#
FROM V$LOCK
WHERE TYPE = 'HW';

SELECT OWNER, SEGMENT_TYPE, SEGMENT_NAME
FROM DBA_EXTENTS
WHERE FILE_ID = 1 AND 181 BETWEEN BLOCK_ID AND BLOCK_ID + BLOCKS - 1;

ALTER TABLE TEST ALLOCATE EXTENT (SIZE XXX);
ALTER TABLE TEST DEALLOCATE UNUSED;
ALTER TABLE TEST MODIFY PARTITION P1 ALLOCATE EXTENT(SIZE XXX);
ALTER TABLE TEST MODIFY PARTITION P1 DEALLOCATE UNUSED;

ALTER TABLE <lob_table> MODIFY LOB (<column_name>) (allocate extent (size <extent size>));

10203 bug 6376915 HW enqueue contention for ASSM LOB segments
10204 ALTER SYSTEM SET EVENT='44951 TRACE NAME CONTEXT FOREVER, LEVEL 1024' scope=spfile;


/************************************************************
--QUERY SESSION INFO FROM SPID:
************************************************************/
select sid,sql_hash_value,status,last_call_et,program--,event
from v$session where paddr=(select addr from v$process where spid=24654);

select spid from v$process where addr=(select paddr from v$session where sid=(select sid from v$mystat where rownum=1));


/************************************************************
--PARALLEL:Note:270837.1
************************************************************/
select owner,index_name,status from dba_indexes where status!='VALID' and status!='N/A';
select index_name,status from dba_ind_partitions where status!='USABLE';
select * from dba_tables where degree like '%DEFAULT%';
SELECT TABLE_NAME,OWNER,DEGREE FROM DBA_TABLES where degree like '%DEFAULT%';

The folowing statements pervent parallel DML:
SELECT for UPDATE statements
LOCK TABLE
EXPLAIN PLAN
If row_locking = INTENT
Parallel DML is not supported on a table if it has triggers on it.
Parallel DML is not supported on object or LOB columns within a table.

SELECT * FROM V$PQ_SESSTAT;
SELECT * FROM V$PQ_SYSSTAT;
SELECT * FROM V$PX_PROCESS;
SELECT * FROM V$PX_PROCESS_SYSSTAT
SELECT * FROM V$PX_SESSTAT;
SELECT * FROM V$PX_SESSION;
SELECT SID, QCSID, DEGREE, REQ_DEGREE FROM V$PX_SESSION ORDER BY QCSID;
SELECT * FROM V$PX_PROCESS_SYSSTAT;
SELECT /*+PARALLEL(TEST,3) */ * FROM TEST;

ALTER SESSION enable parallel DML;
insert /*+ parallel (emp_big,4) */ into emp_big select * from emp;
commit;
ALTER SESSION disable parallel DML;

/***********************************************************
alter session set tracefile_identifier='PX_traces_12850';
alter session set "_px_compilation_debug" = 1;
alter session set events '10053 trace name context forever, level 1';
alter session set events '12850 trace name errorstack level 1';
alter session set "_px_trace" = all;
<run the failing query>
alter session set "_px_compilation_debug" = 0;
alter session set events '10053 trace name context off';
alter session set events '12850 trace name errorstack off';
alter session set "_px_trace" = none;
***********************************************************/

Rem How many CPU does the system have?
Rem Default degree of parallelism is
Rem Default = parallel_threads_per_cpu * cpu_count
Rem -------------------------------------------------;
select substr(name,1,30) Name , substr(value,1,5) Value
from v$parameter
where name in ('parallel_threads_per_cpu' , 'cpu_count' );


Rem Normally DOP := degree * Instances
Rem See the following Note for the excat formula.
Rem Note 260845.1 Old and new Syntax for setting Degree of Parallelism
Rem How many tables a user have with different DOP's
Rem -------------------------------------------------------;
select * from (
select substr(owner,1,15) Owner , ltrim(degree) Degree,
ltrim(instances) Instances,
count(*) "Num Tables" , 'Parallel'
from all_tables
where ( trim(degree) != '1' and trim(degree) != '0' ) or
( trim(instances) != '1' and trim(instances) != '0' )
group by owner, degree , instances
union
select substr(owner,1,15) owner , '1' , '1' ,
count(*) , 'Serial'
from all_tables
where ( trim(degree) = '1' or trim(degree) != '0' ) and
( trim(instances) != '1' or trim(instances) != '0' )
group by owner
)
order by owner;


Rem How many indexes a user have with different DOP's
Rem ---------------------------------------------------;
select * from (
select substr(owner,1,15) Owner ,
substr(trim(degree),1,7) Degree ,
substr(trim(instances),1,9) Instances ,
count(*) "Num Indexes",
'Parallel'
from all_indexes
where ( trim(degree) != '1' and trim(degree) != '0' ) or
( trim(instances) != '1' and trim(instances) != '0' )
group by owner, degree , instances
union
select substr(owner,1,15) owner , '1' , '1' ,
count(*) , 'Serial'
from all_indexes
where ( trim(degree) = '1' or trim(degree) != '0' ) and
( trim(instances) != '1' or trim(instances) != '0' )
group by owner
)
order by owner;


Rem Tables that have Indexes with not the same DOP
Rem !!!!! This command can take some time to execute !!!
Rem ---------------------------------------------------;
set lines 150
select substr(t.owner,1,15) Owner ,
t.table_name ,
substr(trim(t.degree),1,7) Degree ,
substr(trim(t.instances),1,9) Instances,
i.index_name ,
substr(trim(i.degree),1,7) Degree ,
substr(trim(i.instances),1,9) Instances
from all_indexes i,
all_tables t
where ( trim(i.degree) != trim(t.degree) or
trim(i.instances) != trim(t.instances) ) and
i.owner = t.owner and
i.table_name = t.table_name;

set pages 300 lines 300
col "Username" for a12
col "QC/Slave" for A8
col "Slaveset" for A8
col "SlaveINST" for A9
col "QCSID" for A6
col "QCINST" for A6
col "operation_name" for A30
col "target" for A30

select
decode(px.qcinst_id,NULL,username,'-'||lower(substr(pp.SERVER_NAME,length(pp.SERVER_NAME)-4,4))) "Username",
decode(px.qcinst_id,NULL,'QC','(Slave)') "QC/Slave",
to_char(px.server_set) "SlaveSet",
to_char(px.inst_id) "SlaveINST",
substr(opname,1,30) operation_name,
substr(target,1,30) target,
sofar,
totalwork,
units,
start_time,
timestamp,
decode(px.qcinst_id,NULL,to_char(s.sid),px.qcsid) "QCSID",
to_char(px.qcinst_id) "QCINST"
from gv$px_session px,
gv$px_process pp,
gv$session_longops s
where px.sid=s.sid
and px.serial#=s.serial#
and px.inst_id=s.inst_id
and px.sid=pp.sid(+)
and px.serial#=pp.serial#(+)
order by
decode(px.QCINST_ID,NULL,px.INST_ID,px.QCINST_ID),
px.QCSID,
decode(px.SERVER_GROUP,NULL,0,px.SERVER_GROUP),
px.SERVER_SET,
px.INST_ID ;

alter session set "_px_trace"="compilation","execution","messaging";

--CHECK PARALLEL:

create or replace
FUNCTION parallel_degree (
degree in varchar , instances in varchar )
RETURN varchar IS
deg VARCHAR2(10);
inst VARCHAR2(10);
ret_code VARCHAR2(10) := 'WRONG';
BEGIN


deg := trim(degree);
inst := trim(instances);

if deg = '0' THEN
deg := '1';
end if;

if inst = '0' THEN
inst := '1';
end if;

if deg = 'DEFAULT' THEN
return deg;
end if;

if ( inst = 'DEFAULT' and deg = '1' ) THEN
return inst;
end if;

if ( inst = 'DEFAULT' or inst = '1' ) THEN
if deg = '1' then
return 'SERIAL';
else
return deg;
end if;
end if;
ret_code := to_char ( to_number(deg)*to_number ( inst ) );
if ret_code = '1' then
return 'SERIAL';
end if;
return ret_code;
EXCEPTION
WHEN OTHERS THEN
return ret_code;
END;
/


select seg.owner owner,
( case when (bytes/1024/1024) < 200 then '< 200 MB'
when (bytes/1024/1024) < 400 then '< 400 MB'
when (bytes/1024/1024) < 800 then '< 800 MB'
when (bytes/1024/1024) < 1024 then '< 1 GB'
when (bytes/1024/1024) < 2048 then '< 2 GB'
when (bytes/1024/1024) > 2048 then '> 2 GB'
end ) "Size"
, substr (parallel_degree ( degree, instances ), 1, 7 ) "Degree" ,
count(*) "Count Tables"
from all_tables tab ,
dba_segments seg
where seg.owner not in ( 'SYS', 'DBSNMP', 'OUTLN', 'CTXSYS', 'OLAPSYS',
'SYSMAN' , 'ORDSYS' , 'WKSYS','MDSYS' , 'ORDSYS' )
and SEGMENT_TYPE in ( 'TABLE' )
and tab.owner = seg.owner
and tab.table_name = seg.SEGMENT_NAME
and ( ( (bytes/1024/1024) > 200) or
parallel_degree ( degree, instances ) != 'SERIAL' )
group by
case when (bytes/1024/1024) < 200 then '< 200 MB'
when (bytes/1024/1024) < 400 then '< 400 MB'
when (bytes/1024/1024) < 800 then '< 800 MB'
when (bytes/1024/1024) < 1024 then '< 1 GB'
when (bytes/1024/1024) < 2048 then '< 2 GB'
when (bytes/1024/1024) > 2048 then '> 2 GB'
end ,
seg.owner, substr (parallel_degree ( degree, instances ), 1, 7 )
/


select seg.owner owner,
( case when (bytes/1024/1024) < 200 then '< 200 MB'
when (bytes/1024/1024) < 400 then '< 400 MB'
when (bytes/1024/1024) < 800 then '< 800 MB'
when (bytes/1024/1024) < 1024 then '< 1 GB'
when (bytes/1024/1024) < 2048 then '< 2 GB'
else '> 2 GB'
end ) "Size"
, substr (parallel_degree ( degree, instances ), 1, 7 ) "Degree" ,
count(*) "Count Indexes"
from all_indexes ind ,
dba_segments seg
where seg.owner not in ( 'SYS', 'DBSNMP', 'OUTLN', 'CTXSYS', 'OLAPSYS',
'SYSMAN' , 'ORDSYS' , 'WKSYS','MDSYS' , 'ORDSYS' )
and SEGMENT_TYPE in ( 'INDEX' )
and ind.owner = seg.owner
and ind.index_name = seg.SEGMENT_NAME
and ( ( (bytes/1024/1024) > 200) or
parallel_degree ( degree, instances ) != 'SERIAL' )
group by
case when (bytes/1024/1024) < 200 then '< 200 MB'
when (bytes/1024/1024) < 400 then '< 400 MB'
when (bytes/1024/1024) < 800 then '< 800 MB'
when (bytes/1024/1024) < 1024 then '< 1 GB'
when (bytes/1024/1024) < 2048 then '< 2 GB'
else '> 2 GB'
end ,
seg.owner, substr (parallel_degree ( degree, instances ), 1, 7 )
order by seg.owner
/


The following script will return , for each PQ slave, the parent session (if applicable) and the wait event
for both the child and parent sessions.

column child_wait format a30
column parent_wait format a30
column server_name format a4 heading 'Name'
column x_status format a10 heading 'Status'
column schemaname format a10 heading 'Schema'
column x_sid format 9990 heading 'Sid'
column x_pid format 9990 heading 'Pid'
column p_sid format 9990 heading 'Parent'

break on p_sid skip 1

select x.server_name
, x.status as x_status
, x.pid as x_pid
, x.sid as x_sid
, w2.sid as p_sid
, v.osuser
, v.schemaname
, w1.event as child_wait
, w2.event as parent_wait
from v$px_process x
, v$lock l
, v$session v
, v$session_wait w1
, v$session_wait w2
where x.sid <> l.sid(+)
and to_number (substr(x.server_name,2)) = l.id2(+)
and x.sid = w1.sid(+)
and l.sid = w2.sid(+)
and x.sid = v.sid(+)
and nvl(l.type,'PS') = 'PS'
order by 1,2
/


/************************************************************
--HOT BLOCK:
************************************************************/
--1、找到最热的数据块的latch和buffer信息

select b.addr,a.ts#,a.dbarfil,a.dbablk,a.tch,b.gets,b.misses,b.sleeps from
(select * from (select addr,ts#,file#,dbarfil,dbablk,tch,hladdr from x$bh order by tch desc) where rownum <11) a,
(select addr,gets,misses,sleeps from v$latch_children where name= 'cache buffers chains') b
where a.hladdr=b.addr;

--2、找到热点buffer对应的对象信息:

col owner for a20
col segment_name for a30
col segment_type for a30

select distinct e.owner,e.segment_name,e.segment_type from dba_extents e,
(select * from (select addr,ts#,file#,dbarfil,dbablk,tch from x$bh order by tch desc) where rownum <11) b
where e.relative_fno=b.dbarfil
and e.block_id <=b.dbablk
and e.block_id+e.blocks> b.dbablk;

--3、找到操作这些热点对象的sql语句:

break on hash_value skip 1
select /*+rule*/ hash_value,sql_text from v$sqltext where (hash_value,address) in
(select a.hash_value,a.address from v$sqltext a,(select distinct a.owner,a.segment_name,a.segment_type
from dba_extents a,
(select dbarfil,dbablk from (select dbarfil,dbablk from x$bh order by tch desc) where rownum <11) b
where a.relative_fno=b.dbarfil
and a.block_id <=b.dbablk and a.block_id+a.blocks> b.dbablk) b
where a.sql_text like '%'||b.segment_name||'%' and b.segment_type = 'TABLE')
order by hash_value,address,piece;


select
o.object_name obj,
o.object_type otype,
ash.SQL_ID,
w.class
from v$active_session_history ash,
( select rownum class#, class from v$waitstat ) w,
all_objects o
where event='buffer busy waits'
and w.class#(+)=ash.p3
and o.object_id (+)= ash.CURRENT_OBJ#
Order by sample_time;

set linesize 120
col block_type for a20
col objn for a25
col otype for a15
col filen for 9999
col blockn for 9999999
col obj for a20
col tbs for a10
select
bbw.cnt,
bbw.obj,
bbw.otype,
bbw.sql_id,
bbw.block_type,
nvl(tbs.name,to_char(bbw.p1)) TBS,
tbs_defs.assm ASSM
from (
select
count(*) cnt,
nvl(object_name,CURRENT_OBJ#) obj,
o.object_type otype,
ash.SQL_ID sql_id,
nvl(w.class,'usn '||to_char(ceil((ash.p3-18)/2))||' '||
decode(mod(ash.p3,2),
1,'header',
0,'block')) block_type,
--nvl(w.class,to_char(ash.p3)) block_type,
ash.p1 p1
from v$active_session_history ash,
( select rownum class#, class from v$waitstat ) w,
all_objects o
where event='buffer busy waits'
and w.class#(+)=ash.p3
and o.object_id (+)= ash.CURRENT_OBJ#
and ash.session_state='WAITING'
and ash.sample_time > sysdate - &minutes/(60*24)
--and w.class# > 18
group by o.object_name, ash.current_obj#, o.object_type,
ash.sql_id, w.class, ash.p3, ash.p1
) bbw,
(select file_id,
tablespace_name name
from dba_data_files
) tbs,
(select
tablespace_name NAME,
extent_management LOCAL,
allocation_type EXTENTS,
segment_space_management ASSM,
initial_extent
from dba_tablespaces
) tbs_defs
where tbs.file_id(+) = bbw.p1
and tbs.name=tbs_defs.name
Order by bbw.cnt
/

/************************************************************
--GC-WAIT:
************************************************************/
alter system set "_immediate_commit_propagation"=false;
alter system set "_gc_affinity_time"=0 scope=spfile;
alter system set "_gc_undo_affinity"=false scope=spfile;
alter system set "_undo_autotune"=false;
alter system set "_gc_policy_time"=0 scope=spfile; --11g

--NUMA
ALTER SYSTEM SET "_enable_NUMA_optimization" = FALSE;
ALTER SYSTEM SET "_db_block_numa" = 1;

ALTER SYSTEM SET "_enable_NUMA_support"=FALSE SCOPE=SPFILE SID='*';

select *
from v$event_histogram
where event in ('log file sync','gcs log flush sync',
'log file parallel write','wait for scn ack',
'log file switch completion','gc cr grant 2-way',
'gc buffer busy','gc current block 2-way');

select min(begin_interval_time) min, max(end_interval_time) max
from dba_hist_snapshot
where snap_id between ?????? and ??????;

/*
select snap_id,min(begin_interval_time) min, max(end_interval_time) max from dba_hist_snapshot where snap_id between 29455 and 29468
group by snap_id order by snap_id;
*/

select wait_class_id, wait_class, count(*) cnt
from dba_hist_active_sess_history
where snap_id between 29458 and 29460
group by wait_class_id, wait_class
order by 3;

select event_id, event, count(*) cnt from dba_hist_active_sess_history
where snap_id between 29458 and 29460-- and wait_class_id=??????
group by event_id, event
order by 3;

select sql_id, count(*) cnt from dba_hist_active_sess_history
where snap_id between 12831 and 12838
and event_id in (??????, ??????)
group by sql_id
having count(*)>1000
order by 2;

select sql_text from dba_hist_sqltext where sql_id='??????';

select count(distinct(current_obj#)) from dba_hist_active_sess_history
where snap_id between 12831 and 12838
and event_id=?????? and sql_id='??????';

select current_obj#, count(*) cnt from dba_hist_active_sess_history
where snap_id between 12831 and 12838
and event_id=?????? and sql_id='??????'
group by current_obj#
order by 2;

select object_id, owner, object_name, subobject_name, object_type from dba_objects
where object_id in (??????, ??????, ??????); -- TOP 3

select current_file#, current_block#, count(*) cnt
from dba_hist_active_sess_history
where snap_id between ?????? and ??????
and event_id=?????? and sql_id='??????'
and current_obj# in (??????, ??????, ??????)
group by current_file#, current_block#
having count(*)>50
order by 3;

select segment_name, header_file, header_block
from dba_segments where owner='JHEIDER' and partition_name='??????'
and segment_name in ('??????','??????','??????');

col object format a60
col i format 99
select * from (
select o.owner||'.'||o.object_name||decode(o.subobject_name,NULL,'','.')||
o.subobject_name||' ['||o.object_type||']' object,
instance_number i, stat
from (
select obj#||'.'||dataobj# obj#, instance_number, sum(
GC_BUFFER_BUSY_DELTA
) stat
from dba_hist_seg_stat
where (snap_id between ?????? and ??????)
and (instance_number between 1 and 6)
group by rollup(obj#||'.'||dataobj#, instance_number)
having obj#||'.'||dataobj# is not null
) s, dba_hist_seg_stat_obj o
where o.dataobj#||'.'||o.obj#=s.obj#
order by max(stat) over (partition by s.obj#) desc,
o.owner||o.object_name||o.subobject_name, nvl(instance_number,0)
) where rownum<=40;

 

--没有外键索引
====================================
select owner,table_name,column_name,position from dba_cons_columns a
where constraint_name in
(select constraint_name from dba_constraints b
where
a.table_name = b.table_name and b.constraint_type = 'R' and owner not in ('SYS','SYSTEM')) and
not exists
(select 1 from dba_ind_columns c
where a.table_name = c.table_name and a.column_name = c.column_name and index_owner not in ('SYS','SYSTEM') )

select owner,table_name from dba_constraints where
R_CONSTRAINT_NAME in (select constraint_name from dba_constraints where table_name = 'ACPDRTMP') ;

select distinct * from (select ltrim(sys_connect_by_path(pk_table||'('||pk_col||')','->'),'->') pk_fk from (
select
pk_table,
fk_table,
pk_col
from
(select
a.constraint_name pk_con,
a.table_name pk_table,b.column_name pk_col,
a.owner pk_owner
from user_constraints a,user_cons_columns b
where (a.constraint_type='P' or a.constraint_type='U')
and a.constraint_name=b.constraint_name
and a.owner=b.owner) pk,
(select c.constraint_name fk_con,
c.table_name fk_table,
d.column_name fk_col,
c.R_OWNER r_pk_owner,
c.R_CONSTRAINT_NAME r_pk_con,
c.owner fk_owner
from user_constraints c,user_cons_columns d
where c.constraint_type='R'
and c.constraint_name=d.constraint_name
and c.owner=d.owner) fk
where
pk.pk_owner=fk.r_pk_owner(+)
and pk.pk_con=fk.r_pk_con(+)
order by pk.pk_con
)
connect by prior fk_table=pk_table ) t where instr(pk_fk,'->')>0 order by 1;

 

session
============================================
set pagesize 999
set linesize 131
col state for a10
col event for a20
col program for a20
col sql_text for a30
col username for a20
set feed 1

alter session set nls_date_format='yyyymmdd hh24:mi:ss';

select /* basic 10g */
p.spid,s.sid,s.username,status,s.program,s.last_call_et,s.state,s.event,
'P1-3: '||P1TEXT||':'||P1||','||P2TEXT||':'||P2||','||P3TEXT||':'||P3 P1to3,
s.wait_time,'wait_sec: '||s.seconds_in_wait wait_sec
from v$session s,v$process p
where s.type='USER'
and p.addr=s.paddr
and s.status='ACTIVE'
and s.program not like 'racgimon%'
--and s.sid=5721
--and p.spid=17445
--and s.seconds_in_wait>1
order by wait_sec desc
;

select /* with sql 10g*/
p.spid,s.sid,s.username,status,s.program,s.last_call_et,s.state,s.event,
'P1-3: '||P1TEXT||':'||P1||','||P2TEXT||':'||P2||','||P3TEXT||':'||P3 P1to3,
s.wait_time,'wait_sec: '||s.seconds_in_wait wait_sec,
l.sql_text
from v$session s, v$process p,v$sqlstats l
where s.type='USER'
and p.addr=s.paddr
--and s.status!='ACTIVE'
and s.sql_id=l.sql_id(+)
and s.program not like 'racgimon%'
--and s.sid=6615
order by s.last_call_et desc;

select /* CHECK LGWR */
p.spid,s.sid,s.username,status,s.program,s.last_call_et,s.state,s.event,
'P1-3: '||P1TEXT||':'||P1||','||P2TEXT||':'||P2||','||P3TEXT||':'||P3 P1to3,
s.wait_time,'wait_sec: '||s.seconds_in_wait wait_sec
from v$session s,v$process p
where s.type='BACKGROUND'
and p.addr=s.paddr
and s.status='ACTIVE'
and s.program like '%LGWR%'
--and s.sid=5721
--and p.spid=17445
order by wait_sec desc
;

select type,event,count(*),sum(seconds_in_wait) total_wait_sec
from v$session
where status='ACTIVE' and state='WAITING'
group by type,event
order by 1,3;


/************************************************************
--HINT:
************************************************************/
push_subq
select /*+push_subq(@tmp)*/ hao1.object_name from
hao1,hao2,hao4
where hao1.object_name like '%a%'
and hao1.object_id+hao2.object_id>50
and hao4.object_type=hao1.object_type
and 11 in
(SELECT /*+QB_Name(tmp)*/ hao3.object_id FROM hao3 WHERE hao1.object_id = hao3.object_id);

/*+ PRECOMPUTE_SUBQUERY */

alter session set "_complex_view_merging"=FALSE;

no_unnest
unnest
alter session set "_unnest_subquery"=false;

alter session set "_optimizer_connect_by_cost_based" = FALSE;
alter session set "_optimizer_push_pred_cost_based" = FALSE;
alter session set "_optimizer_cost_based_transformation" = OFF;
alter session set "_optimizer_mjc_enabled"= FALSE;
alter session set "_complex_view_merging" = false;

alter session set "_push_join_union_view"=false;
or
set "_push_join_predicate"=FALSE;
or
use hint NO_PUSH_PRED
Disable join predicate pushdown via either NO_PUSH_PRED hint (on either view)
or set "_optimizer_multi_level_push_pred" to false.

use PUSH_PRED hint
or
alter system set "_optimizer_enhanced_filter_push" = false;
Bug 5505157 - Predicates are not pushed in UNION ALL views [ID 5505157.8]

TRIGGER no_push_join_pred
AFTER logon ON schema
BEGIN
execute immediate
'alter session set "_push_join_predicate"=false';
END;

/************************************************************
--CBO:
************************************************************/
alter session set optimizer_mode = first_rows_10;
alter session set optimizer_index_cost_adj = 1;
alter session set optimizer_index_caching = 0;
alter session set "_new_initial_join_orders" = FALSE;
alter session set "_always_semi_join" = off;
alter session set "_hash_join_enabled" = false;
alter session set "_optimizer_sortmerge_join_enabled" = false;
alter session set "_optimizer_cartesian_enabled" = false;
alter session set "_optimizer_join_sel_sanity_check" = true;

_optimizer_max_permutations
_sort_elimination_cost_ratio
ALTER SESSION SET "_SORT_ELIMINATION_COST_RATIO"=5

Select KSMLRCOM, KSMLRHON, KSMLRNUM, KSMLRSIZ from x$ksmlru where KSMLRSIZ > 5000;


/************************************************************
--HNTEL:
************************************************************/
set heading on
set lin 200
alter session set nls_date_format='yyyy-mm-dd hh24:mi:ss';

--AVERAGE REDO SIZE:
select round(wrblk*blsz/1024/wrs) "avg-redo-sizek",sysdate
from
(select value wrblk from v$sysstat where name='redo blocks written'),
(select value wrs from v$sysstat where name='redo writes'),
(select max(lebsz) blsz from x$kccle);

--WAIT EVENT:
col event for a60
select count(*),event,sysdate from v$session_wait a
where not exists (select 1 from stats$idle_event where event=a.event)
group by event
having count(*)>0
order by 1;

--HOT BLOCKS:
col oname for a39
col subobject_name for a20
col otype for a10
select tch,file#,dbarfil,dbablk,owner||'.'||object_name oname,a.object_type otype,a.subobject_name
from dba_objects a,
(
select * from (select tch,file#,dbarfil,dbablk,obj from x$bh order by tch desc) where rownum<4
) b
where a.data_object_id = b.obj
order by 1 desc;

--TOP SQLS:
col sql_text for a65
SELECT b.cnt, a.hash_value,a.piece,a.sql_text
FROM v$sqltext a,
(
select cnt,sql_address,sql_hash_value from
(
SELECT count(*) cnt,sql_address,sql_hash_value
FROM v$session
WHERE TYPE = 'USER'
AND status = 'ACTIVE'
AND sql_hash_value > 0
GROUP BY sql_address,sql_hash_value
order by 1 desc
)
where rownum<2
) b
where a.address=b.sql_address
and a.hash_value=b.sql_hash_value
ORDER BY 1 desc,2,3;

exit;

select tch , file#, dbablk,
case when obj = 4294967295 then 'rbs/compat segment'
else (select max('('||object_type||')'||owner||'.'||object_name)||decode(count(*),1,'','maybe!') from dba_objects where data_object_id = X.OBJ)
end what
from (select tch , file# ,dbablk,obj from x$bh where state <>0 order by tch desc) x where rownum <=5;

select sid,sql_hash_value,last_call_et from v$session where sid in (select sid from V$latchholder);


/************************************************************
--RMAN:
************************************************************/
Select distinct(fhscn) ,count(*) from x$kcvfh group by fhscn ;
Select distinct(fhsta) ,count(*) from x$kcvfh group by fhsta ;
Select distinct(fhrba_seq) ,count(*) from x$kcvfh group by fhrba_seq ;

--catalog
RMAN> CREATE CATALOG;
RMAN> REGISTER DATABASE;
RMAN> RESYNC CATALOG;
RMAN> UPGRADE CATALOG;
RMAN> DROP CATALOG;

RMAN> RESET DATABASE;

RMAN> RESTORE VALIDATE CHECK LOGICAL ARCHIVELOG ALL;
RMAN> list archivelog all;
RMAN> RESTORE VALIDATE CHECK LOGICAL ARCHIVELOG from sequence 143;
RMAN> CROSSCHECK ARCHIVELOG FROM LOGSEQ=4079 UNTIL LOGSEQ=4079;

SQL> SELECT SEQUENCE#,STATUS FROM V$ARCHIVED_LOG;
SQL> SELECT DB_KEY,DB_NAME,SEQUENCE#,STATUS FROM RC_ARCHIVED_LOG ;

SQL> SELECT DBID FROM V$DATABASE;
SQL> SELECT DB_KEY FROM DB WHERE DB_ID=1979867571;
SQL> EXEC DBMS_RCVCAT.UNREGISTERDATABASE(1,3376366075);

RC_ARCHIVED_LOG VIEW
RC_BACKUP_CONTROLFILE VIEW
RC_BACKUP_CORRUPTION VIEW
RC_BACKUP_DATAFILE VIEW
RC_BACKUP_PIECE VIEW
RC_BACKUP_REDOLOG VIEW
RC_BACKUP_SET VIEW
RC_BACKUP_SPFILE VIEW
RC_CHECKPOINT VIEW
RC_CONTROLFILE_COPY VIEW
RC_COPY_CORRUPTION VIEW
RC_DATABASE VIEW
RC_DATABASE_BLOCK_CORRUPTION VIEW
RC_DATABASE_INCARNATION VIEW
RC_DATAFILE VIEW
RC_DATAFILE_COPY VIEW
RC_LOG_HISTORY VIEW
RC_OFFLINE_RANGE VIEW
RC_PROXY_CONTROLFILE VIEW
RC_PROXY_DATAFILE VIEW
RC_REDO_LOG VIEW
RC_REDO_THREAD VIEW
RC_RESYNC VIEW
RC_RMAN_CONFIGURATION VIEW
RC_STORED_SCRIPT VIEW
RC_STORED_SCRIPT_LINE VIEW
RC_TABLESPACE VIEW

RMAN>CONFIGURE CHANNEL DEVICE TYPE 'SBT_TAPE' PARMS 'ENV=(NB_ORA_CLASS=oraclebkup, SBT_LIBRARY=/usr/openv/netbackup/bin/libobk.so';
RMAN>CATALOG DEVICE TYPE 'SBT_TAPE' BACKUPPIECE 'lij1qaa3_1_1';

--block change tracking
alter database enable block change tracking using file '/tmp/block_track.log';
select filename,status,bytes from v$block_change_tracking;
alter database disable block change tracking;


--check backup status:
select sequence# from v$recovery_log;
select hxfil,fhscn,fhrba_seq,fhrba_bno,fhrba_bof from x$kcvfh;
select sequence#,checkpoint_change#,last_redo_change# from v$thread;
select group#,status,sequence#,first_change# from v$log;
select * from v$instance_recovery;
select * from v$recovery_log;
select * from v$database_block_corruption;

--backup [database|tablespace|datafile|archive]
device type [disk|sbt]
format='d:\rmandb\db_%U'
tag='db20041208'
maxsetsize=100m
copies=2
include current controlfile
keep [forever|until time='sysdae+180']
[skip [readonly|offline|inaccesible]|skip readonly skip offline skip inaccesible]
noexclude;

backup database keep forever;
backup database keep 5 days;

--run backup command:
Run {
allocate channel ch1 type disk format='/u01/backup/%u.bak';
backup database plus archivelog;
release channel ch1;
}

--run script from catalog:
RMAN> RUN { EXECUTE SCRIPT backup_whole_db };

--run external script:
$ rman catalog rman/rman target / @backup_db.rman
$ rman cmdfile=backup.rman msglog=backup.log
RMAN> @backup_db.rman
RMAN> RUN { @backup_db.rman }

connect target /
connect catalog rman/
run
{allocate channel d1 type disk maxpiecesize 4g;
allocate channel d2 type disk maxpiecesize 4g;
allocate channel d3 type disk maxpiecesize 4g;
backup as compressed backupset incremental level = 0 database
format '/orabak/databackup/db_%d_%s_%p_%T' skip readonly
plus archivelog skip inaccessible delete all input
format '/orabak/logbackup/log_%d_%s_%p_%T';
sql 'alter system archive log current';
release channel d3;
release channel d2;
release channel d1;
}

run
{ crosscheck backup;
delete noprompt expired backup;
delete noprompt obsolete redundancy=1;
restore database preview summary;
}
exit;

--10g
SELECT * FROM V$BLOCK_CHANGE_TRACKING;

select start_time,end_time,status,input_type,
output_bytes_display,input_bytes_per_sec_display,output_bytes_per_sec_display,time_taken_display
from v$rman_backup_job_details where start_time >= trunc (sysdate) - 58 order by start_time;
-----------------------------------------------------------------------
--configure backup env e.g.
--------------------------------------
CONFIGURE DEVICE TYPE DISK BACKUP TYPE TO backupset;
configure device type disk backup type to compressed backupset;

-----------------------------------------------------------------------
--backup database e.g.
--------------------------------------
1. backup database;
2. backup database plus archivelog;
3. backup database plus archivelog delete input;
4. backup database plus archivelog delete all input;
5. backup database;
6. BACKUP AS COMPRESSED BACKUPSET DATABASE FORMAT 'D:\BACKUP\%U' PLUS ARCHIVELOG;
------------------------------------------------------------------------
--backup tablesapce e.g.
--------------------------------------
1. backup tablespace users;
2. backup tablespace users plus archivelog;
3. backup tablespace users include current controlfile plus archivelog;
------------------------------------------------------------------------
--backup datafile e.g.
--------------------------------------
1. backup datafile 2;
2. backup datafile 'e:oracleoradatausers01.dbf';
3. backup datafile 'e:oracleoradatausers01.dbf' plus archivelog;
-----------------------------------------------------------------------
--backup archivelog e.g.
--------------------------------------
1. backup archivelog all;
2. backup archivelog from time 'sysdate-3'
3. backup archivelog from sequence 345;
4. backup archivelog all delete input;
5. backup archivelog from time 'sysdate-3' delete input;
6. backup archivelog not backed up 3 times;
7. backup archivelog all delete input until time 'sysdate-2';
----------------------------------------------------------------------
--backup controlfile and spfile e.g.
--------------------------------------
1. backup current controlfile;
2. backup spfile;
3. sql "alter database backup controlfile to "e:rmancontrolcontrolf.bak" ";
backup controlfilecopy 'e:rmancontrolcontrolf.bak';
----------------------------------------------------------------------
--backup backupset e.g.
--------------------------------------
1. backup backupset all;
2. backup backupset completed before 'sysdate-2';
3. backup backupset completed before 'sysdate-2' delete input;
4. backup backupset completed after 'sysdate-2' delete input;
5. backup backupset all
format='e:rmanbackupset_%U.bak'
tag='backup of backupset'
channel 'ORA_DISK_1';
-----------------------------------------------------------------
--other backup e.g.
--------------------------------------
1. backup validate database; --validate database.
2. backup database force; --force to backup datafiles readonly.
3. backup database not backed up [since time='sysdate-2']; --backup not backed up since 2 days.
4. backup check logical database; --backup and check logic for database.
5. backup validate check logical database; --only check logic for database.
6. backup database skip [readonly|offline|inaccessible]; --skip some files.

--monitor the progress of RMAN backup validate
select sid, serial#, context, sofar, totalwork, round(sofar/totalwork*100,2) "%_complete"
from v$session_longops
where opname like 'RMAN%'
and opname not like '%aggregate%'
and totalwork != 0
and sofar <> totalwork
/


--list:
list incarnation [of database];
list backup summary;
list backup;
list expired backup;
list expired archivelog all;
list backup by file;
list backup of tablespace;
list backup of datafile;
list archivelog all;
list backup of archivelog all;
list backup of archivelog sequence 1;
list backup of archivelog time between "to_date('2006-08-08 00:00:00','yyyy-mm-dd hh24:mi:ss')"
and "to_date('2006-08-08 23:59:59','yyyy-mm-dd hh24:mi:ss')";
list backup of archivelog scn between 1 and 2;
list backup of controfile;
list backup of spfile;
list copy;
list copy of database;
list copy of datafile 1;
list copy of archivelog all;
list copy of archivelog all completed after 'sysdate - 1';

--report:
report need backup days=3;  
report need backup redundancy=2;
report need backup recovery window of 2 days;
report unrecoverable;
report obsolete;
report schema;

--crosscheck:
crosscheck backup
crosscheck backup of datafile 1;
crosscheck backup of tablespace users;
crosscheck backup of controfile;
crosscheck backup of controlfile;
crosscheck backup tag='';
crosscheck backup completed after 'sysdate - 2'
crosscheck backup completed between 'sysdate - 5' and 'sysdate -2 '
crosscheck backup device type sbt;
crosscheck archivelog all;
crosscheck archivelog like '%ARC00012.001'
crosscheck archivelog from sequence 1;
crosscheck archivelog until sequence 2;

--change:
change backupset 117,118 delete;
change backuppiece 1304 delete;
change archivelog until logseq =544 delete;
change backupset 31 keep forever logs;
change backupset 32 keep until time 'sysdata + 7' logs;
change backupset 33 unavailable;

--delete:
DELETE BACKUPPIECE 101;
DELETE CONTROLFILECOPY '/tmp/control01.ctl';
DELETE BACKUP OF TABLESPACE users DEVICE TYPE sbt;
DELETE EXPIRED BACKUP;
DELETE NOPROMPT OBSOLETE;
DELETE OBSOLETE REDUNDANCY = 3;
DELETE OBSOLETE RECOVERY WINDOW OF 7 DAYS;
DELETE NOPROMPT ARCHIVELOG UNTIL SEQUENCE = 300;

--set:
set echo on | off;
set DBID dbidn;
--these set commands can only used in run scripts:
run
{
set newname to;
set maxcorrupt for datafile to;
set archivelog destination to;
set until time "to_date('2006/08/08 00:00:00','yyyy/mm/dd hh24:mi:ss')";
set backup copies = 2;
}

--restore:
--restore archivelog "all, from, high, like, logseq, low, scn, sequence, time, until"
restore archivelog all preview;
restore archivelog sequence 1 preview;
restore archivelog time between "to_date('2006-08-08 00:00:00','yyyy-mm-dd hh24:mi:ss')"
and "to_date('2006-08-08 23:59:59','yyyy-mm-dd hh24:mi:ss')" preview;
restore archivelog like '%18%'; -- only can used in catalog mode
restore archivelog from sequence 1 until sequence 2 thread=1;
restore archivelog from logseq 1 until logseq 2;
restore archivelog low sequence 1 high sequence 2;
restore archivelog low logseq 1 high logseq 2;
restore archivelog sequence=1 from tag='TAG20081118T232646';
restore database validate;

run
{
allocate channel c1 type sbt_tape;
SEND 'NB_ORA_SERV=NBU_SERVER,NB_ORA_CLIENT=s851';
set archivelog destination to '/archiv /rman_s852/';
restore archivelog from logseq 264258 until logseq 264260 thread 2;
release channel c1;
}

run
{
allocate channel c1 type 'sbt_tape' parms
'ENV=(TDPO_OPTFILE=/usr/tivoli/tsm/client/oracle/bin64/tdpo.opt)';
recover database until sequence 69840;
}

run
{ allocate channel t1 device type disk;
backup as compressed backupset
skip offline
skip inaccessible
tag full_backup
format '/VOL4/lpar4/subta/rman/subta_full_backup_%U_%p_%T.bk'
(database include current controlfile);
sql 'alter system archive log current';
backup as compressed backupset
format '/VOL4/lpar4/subta/rman/subta_archlog_%U_%T.bk'
(archivelog until time 'SYSDATE'
delete input);
release channel t1;
}
resync catalog;

incomplete recovery
run
{
set until time "to_date('20081118 15:00:00','yyyymmdd hh24:mi:ss')";
restore database;
recover database;
alter database open resetlogs;
}

startup mount;
restore database UNTIL SCN 10000;
recover database UNTIL SCN 10000;
alter database open resetlogs;

startup mount;
restore database UNTIL SEQUENCE 100 thread 1;
recover database UNTIL SEQUENCE=100 thread=1;
alter database open resetlogs;

1. recover database using backup controlfile
2. recover database until cancel
3. recover database using backup controlfile until cancel;
4. recover database until cancel using backup controlfile;

RMAN> run {
allocate channel ch00 type disk;
allocate channel ch01 type disk;
allocate channel ch02 type disk;
allocate channel ch03 type disk;
allocate channel ch04 type disk;
sql 'alter session set NLS_DATE_FORMAT="YYYY-MM-DD:HH24:MI:SS"';
set until time '2010-06-23:15:30:00';
recover database until time '2010-06-23:15:30:00';
release channel ch00;
release channel ch01;
release channel ch02;
release channel ch03;
release channel ch04;
}

%c 备份片的拷贝数
%d 数据库名称
%D 位于该月中的第几天 (DD)
%M 位于该年中的第几月 (MM)
%F 一个基于DBID 唯一的名称,这个格式的形式为c-IIIIIIIIII-YYYYMMDD-QQ,其中IIIIIIIIII 为该数据库的DBID,
YYYYMMDD 为日期,QQ 是一个1-256 的序列
%n 数据库名称,向右填补到最大八个字符
%u 一个八个字符的名称代表备份集与创建时间
%p 该备份集中的备份片号,从1 开始到创建的文件数
%U 一个唯一的文件名,代表%u_%p_%c
%s 备份集的号
%t 备份集时间戳
%T 年月日格式(YYYYMMDD)


--rman trace:
1. Enable debugging with in RMAN with :
$ rman target <un/pw@target_db> catalog <un/pw@catalog_db> debug all trace=rman.trc log=rman.log
2. Enable tracing for ALL channels:
RMAN> allocate channel......... debug=6 trace 5;


--recovery:
select dbid, name, to_char(created, 'DD-MON-YYYY HH24:MI:SS') created, open_mode, log_mode,
to_char(checkpoint_change#, '999999999999999') as checkpoint_change#, controlfile_type,
to_char(controlfile_change#, '999999999999999') as controlfile_change#,
to_char(controlfile_time, 'DD-MON-YYYY HH24:MI:SS') controlfile_time,
to_char(resetlogs_change#, '999999999999999') as resetlogs_change#,
to_char(resetlogs_time, 'DD-MON-YYYY HH24:MI:SS') resetlogs_time
from v$database;

select * from v$log where status='CURRENT';

select status, checkpoint_change#, to_char(checkpoint_time, 'DD-MON-YYYY HH24:MI:SS') as checkpoint_time, count(*)
from v$datafile_header
group by status, checkpoint_change#, checkpoint_time
order by status, checkpoint_change#, checkpoint_time;

select * from v$backup where status = 'ACTIVE';

set pagesize 20000
set linesize 180
set pause off
set serveroutput on
set feedback on
set echo on
set numformat 999999999999999
Spool recovery_info.txt
select substr(name, 1, 50), status from v$datafile;
select substr(name,1,40), recover, fuzzy, checkpoint_change# from v$datafile_header;
select GROUP#,substr(member,1,60) from v$logfile;
select * from v$recover_file;
select distinct status from v$backup;
select hxfil FILENUMBER,fhsta STATUS,fhscn SCN,fhrba_Seq SEQUENCE from x$kcvfh;
select distinct (fuzzy) from v$datafile_header;
spool offexit

/*Force open*/
SQL> startup mount;
SQL> alter session set events '10015 trace name adjust_scn level 1'; -- Note:30681.1
SQL> alter system set events 'immediate trace name adjust_scn level 10;
_minimum_giga_scn 可以将SCN提高1G的倍数

*.event='10513 trace name context forever,level 2:10061 trace name context forever, level 10:10015 trace name context forever,level 1:10013 trace name context forever,level 1'
*.db_block_checksum=false
*._db_always_check_system_ts=false

SQL> startup mount;
SQL> oradebug setmypid
SQL> oradebug DUMPvar SGA kcsgscn_
kcslf kcsgscn_ [7000000100120C0, 7000000100120F0) = 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 00000000 07000000
SQL> col CURRENT_SCN for 999999999999999
SQL> select current_scn from v$database;
CURRENT_SCN
----------------
12622610288579
SQL> oradebug poke 0x7000000100120C 8 0xC7AEE33CFC3
SQL> alter database open read only;

/************************************************************
--RAC:
************************************************************/
/************************************************************
--de-configuring and re-configuring the clusterware :
************************************************************/
cd $GI_HOME/crs/install
./rootcrs.pl -deconfig -force

followed by
cd $GI_HOME
./root.sh
/************************************************************
--RAC addNode.sh:
************************************************************/
## 11g
export IGNORE_PREADDNODE_CHECKS=Y
[grid@rac111 ~]$ /u01/app/11.2.0/grid/oui/bin/addNode.sh "CLUSTER_NEW_NODES={rac112}" "CLUSTER_NEW_VIRTUAL_HOSTNAMES={rac112-vip}" "CLUSTER_NEW_PRIVATE_NODE_NAMES={rac112-priv}"
[root@rac112 grid]# /u01/app/11.2.0/grid/root.sh

[oracle@rac111 ~]$ $ORACLE_HOME/oui/bin/addNode.sh -silent "CLUSTER_NEW_NODES={rac112}"
[root@rac112 dbhome_1]# /u01/app/oracle/product/11.2.0/dbhome_1/root.sh
[oracle@rac111 ~]$ dbca -silent -addInstance -nodeList rac112 -gdbName racdb -instanceName racdb2 -sysDBAUserName sys -sysDBAPassword oracle

## 12c
addnode.sh -help
-ignoreSysPrereqs
For ignoring the results of the system pre-requisite checks.
-ignorePrereq
To ignore running the prerequisite checks.
IGNORE_PREADDNODE_CHECKS does not work in 12.1 (文档 ID 1995563.1)

/************************************************************
--RAC deleteNode.sh:
************************************************************/
[oracle@rac111 ~]$ dbca -silent -deleteInstance -nodeList rac112 -gdbName racdb -instanceName racdb2 -sysDBAUserName sys -sysDBAPassword oracle
[oracle@rac112 ~]$ srvctl disable listener -l listener_name -n rac112
[oracle@rac112 ~]$ srvctl stop listener -l LISTENER -n rac112
[oracle@rac112 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbhome_1 "CLUSTER_NODES={rac112}" -local
[oracle@rac112 ~]$ $ORACLE_HOME/deinstall/deinstall -local
[oracle@rac111 ~]$ $ORACLE_HOME/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/oracle/product/11.2.0/dbhome_1 "CLUSTER_NODES={rac111}"

[grid@rac111 ~]$ olsnodes
[oracle@rac112 ~]$ emctl stop dbconsole
[root@rac112 install]# /u01/app/11.2.0/grid/crs/install/rootcrs.pl -deconfig -force
[root@rac111 install]# /u01/app/11.2.0/grid/bin/crsctl delete node -n rac112

[grid@rac112 ~]$ /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/11.2.0/grid "CLUSTER_NODES={rac112}" CRS=TRUE -silent -local
[grid@rac112 ~]$ /u01/app/11.2.0/grid/deinstall/deinstall –local

[grid@rac111 ~]$ /u01/app/11.2.0/grid/oui/bin/runInstaller -updateNodeList ORACLE_HOME=/u01/app/11.2.0/grid "CLUSTER_NODES={rac111}" CRS=TRUE -silent


## diagcollection
How do I collect the Cluster Health Monitor data?

As grid user, using command “<GI_HOME>/bin/diagcollection.pl --collect --chmos” will produce output for all data that is collected in the repository. There may be too much data and may take long time, so the suggestion is limit the query to an interesting time interval.

For example, issue “<GI_HOME>/bin/diagcollection.pl --collect --crshome $ORA_CRS_HOME --chmos --incidenttime <start time of interesting time period> --incidentduration 05:00”

The above outputs the report that covers 5 hours from the time specified by incidenttime.
The incidenttime must be in MM/DD/YYYYHH:MN:SS where MM is month, DD is date, YYYY is year, HH is hour in 24 hour format, MN is minute, and SS is second. For example, if you want to put the incident time to start from 10:15 PM on June 01, 2011, the incident time is 06/01/201122:15:00. The incidenttime and incidentduration can be changed to capture more data.

Alternatively, ‘oclumon dumpnodeview -allnodes -v -last "11:59:59" > your-filename’ if diagcollection.pl fails with any reason. This will generate a report from the repository up to last 12 hours. The -last value can be changed to get more or less data.
Another example of using oclumon is 'oclumon dumpnodeview -allnodes -v -s "2012-06-01 22:15:00" -e "2012-06-02 03:15:00" > /tmp/chm.log '. The difference in this command is that it specifies the start (-s flag) and end time (-e flag).

$GRID_HOME/bin/diagcollection.sh --chmos --incidenttime 05/21/201508:00:00 --incidentduration 02:00

--diagwait:
It is important that the clusterware stack must be down on all the nodes when changing diagwait .The following steps provides the step-by-step instructions on setting diagwait.

Execute as root
#crsctl stop crs
#<CRS_HOME>/bin/oprocd stop
Ensure that Clusterware stack is down on all nodes by executing
#ps -ef |egrep "crsd.bin|ocssd.bin|evmd.bin|oprocd"
This should return no processes. If there are clusterware processes running and you proceed to the next step, you will corrupt your OCR. Do not continue until the clusterware processes are down on all the nodes of the cluster.
From one node of the cluster, change the value of the "diagwait" parameter to 13 seconds by issuing the command as root:
#crsctl set css diagwait 13 -force
Check if diagwait is set successfully by executing. the following command. The command should return 13. If diagwait is not set, the following message will be returned "Configuration parameter diagwait is not defined"
#crsctl get css diagwait
Restart the Oracle Clusterware on all the nodes by executing:
#crsctl start crs
Validate that the node is running by executing:
#crsctl check crs

./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -verbose
./runcluvfy.sh comp nodecon -i eth1 -n rac1,rac2 -verbose
./runcluvfy.sh stage -pre crsinst -n rac1,rac2 -networks "eth*" -verbose

Trace file Analyze

/************************************************************
--RAC_OFF:
************************************************************/
1. Go to $ORACLE_HOME/rdbms/lib
2. Run the following command
nm -r libknlopt.a | grep -c kcsm.o

3. If above command returns 0, then RAC is not linked. If it returns more than 0, then it is
linked.

Solution
If you find that RAC is NOT linked into the RDBMS home, then do the following steps to relink with RAC on:
1. Shut down any database instances that are running from this home.
2. cd to $ORACLE_HOME/rdbms/lib directory.
3. Issue the following command as the oracle software owner:
make -f ins_rdbms.mk rac_on ioracle

$ORACLE_HOME/bin/skgxpinfo
nm /u01/app/oracle/product/11.2.0.4/dbhome_1/lib/libskgxp11.so | grep rds_enabled

$ cd $ORACLE_HOME/rdbms/lib
$ make -f ins_rdbms.mk ipc_rds ioracle
$ cd $ORACLE_HOME/rdbms/lib
$ make -f ins_rdbms.mk ipc_g ioracle


On Linux/UNIX except AIX:
ar -t $ORACLE_HOME/rdbms/lib/libknlopt.a|grep kcsm.o

On AIX:
ar -X32_64 -t $ORACLE_HOME/rdbms/lib/libknlopt.a|grep kcsm.o
How to Check Whether Oracle Binary/Instance is RAC Enabled and Relink Oracle Binary in RAC (Doc ID 284785.1)

HP-UX
#/sbin/init.d/init.cssd stop

--CRS administration components:
vip : Virtual Internet Protocol address
gsd : Global Services Daemon
ons : Oracle Notification Service

--CRS daemon processes:
ocssd : oracle Cluster Synchronization Services
crsd : Cluster Ready Services
evmd : Event Management
ons : Oracle Notification Service
racg : RACG is a behind-the-scenes process (or thread) that extends clusterware to support Oracle-specific requirements
and complex resources.
Runs server callout scripts when FAN events occur.
Runs as processes (or threads), not as a service (racgmain.exe, racgimon.exe)
oprocd : Process monitor for the cluster

--RACHA and his components
The VIP
ONS/FAN -- Fast Application Notification (FAN) events
Resources
RACG

GCS : the Global Cache Service
GES : Global Enqueue Service
GRD : Global Resource Directory

--The Oracle RAC processes and their identifiers are as follows:
LMS : Global Cache Service Process
LMD : Global Enqueue Service Daemon
LMON : Global Enqueue Service Monitor
LCK0 : Instance Enqueue Process
gcs是lms的映射
ges是lmon的映射

LMON的超时检测时间默认是70s,由如下隐含参数控制:

11:01:57 SQL> select nam.ksppinm NAME,
11:02:07 2 val.KSPPSTVL VALUE
11:02:07 3 from x$ksppi nam,
11:02:07 4 x$ksppsv val
11:02:07 5 where nam.indx = val.indx
11:02:07 6 and nam.ksppinm like '%_lm_rcvr_hang_allow_time%'
11:02:07 7 order by 1;

NAME VALUE
------------------------------ -----------------------------------
_lm_rcvr_hang_allow_time 70 --receiver hang allow time in seconds

在LMON和LMHB的trace文件中也可以找到:
LMON (ospid: 21889074) has no heartbeats for 123 sec. (threshold 70 sec)


--Initialize ocr and vote disk
dd if=/dev/zero of=/dev/rdisk/disk80 bs=8192 count=2560 --OCR
dd if=/dev/zero of=/dev/rdisk/disk81 bs=8192 count=12800 --VOTE DISK

--start CRS:
/etc/init.crs start
$ORA_CRS_HOME/crsctl start crs

--stop CRS:
/etc/init.crs stop
$ORA_CRS_HOME/crsctl stop crs

--disable auto starting:
/etc/init.crs disable

--enable auto starting:
/etc/init.crs enable

--check CRS status:
$ORA_CRS_HOME/crsctl check crs
$ORA_CRS_HOME/crs_stat -t
$ORA_CRS_HOME/crs_stat -p
$ORA_CRS_HOME/crs_stat -v

--start all resources:
$ORA_CRS_HOME/bin/crs_start –all

--stop all resources:
$ORA_CRS_HOME/bin/crs_stop –all

--check OCR(Oracle Configuration Repository):
$ORA_CRS_HOME/ocrcheck

--backup OCR:
$ORA_CRS_HOME/ocrconfig -export /oradata/ocr.data

--show backup of OCR:
$ORA_CRS_HOME/ocrconfig -showbackup

--import OCR backup:
$ORA_CRS_HOME/ocrconfig -import /oradata/ocr.data

--dump OCR:
$ORA_CRS_HOME/ocrdump -backupfile MYFILE

--restore OCR:
$ORA_CRS_HOME/ocrconfig -restore $ORA_CRS_HOME/cdata/crs/week.ocr

--modify ocr
./crsctl status resource "ora.cssd" -f | grep AUTO_START
./crsctl modify resource ora.diskmon -init -attr "AUTO_START=never"
./crsctl modify resource ora.diskmon -init -attr "AUTO_START=always"

--Note :
Don't use "AUTO_START=1" use AUTO_START=always instead !

[oracle@orars12 ~]$ crsctl modify resource "ora.cssd" -attr "AUTO_START=1"
CRS-4995: The command 'Modify resource' is invalid in crsctl. Use srvctl for this command.
[oracle@orars12 ~]$ crsctl status resource "ora.cssd" -f | grep AUTO_START
AUTO_START=never
[oracle@orars12 ~]$ crsctl modify resource ora.cssd -init -attr "AUTO_START=always"

--Deconfig:
##Reconfiguring & Recreating The 11gR2 or 12cR1 Restart or OHAS or SIHA Stack Configuration (Standalone) How to Reconfigure Oracle Restart (文档 ID 1422517.1)
roothas.pl -deconfig -force
##How to Deconfigure/Reconfigure(Rebuild OCR) or Deinstall Grid Infrastructure (文档 ID 1377349.1)
On all remote nodes, as root execute:
# <$GRID_HOME>/crs/install/rootcrs.pl -deconfig -force -verbose
Once the above command finishes on all remote nodes, on local node, as root execute:
# <$GRID_HOME>/crs/install/rootcrs.pl -deconfig -force -verbose -lastnode

./cluvfy comp ssa -verbose

/************************************************************
--Voting disk
Voting disk记录节点成员信息,如包含哪些节点成员、节点的添加删除信息记录,大小为20MB
查看voting disk位置:crsctl query css votedisk
$ crsctl query css votedisk
如果CRS安装过程失败,需要重新安装则需要初始化voting disk盘,可用DD或重建卷
dd if=/dev/zero of=/dev/rhdisk3 bs=8192 count=2560
备份votedisk: dd if=/dev/rhdisk3 of=/tmp/votedisk.bak bs=4096
恢复votedisk: dd if=/tmp/votedisk.bak of=/dev/rhdisk3
添加voting disk镜像盘:
crsctl add css votedisk /dev/rhdisk7 -force
删除voting disk镜像盘
crsctl delete css votedisk /dev/rhdisk7 -force

--OCR
OCR记录节点成员的配置信息,如database、ASM、instance、listener、VIP等CRS资源的配置信息,可存储于裸设备或者群集文件系统上,
推荐设置大小为100MB 如以RAW的方式,则划分一个RAW,例如:/dev/rhdisk2 如果CRS安装过程失败,需要重新安装则需要初始化OCR盘(RAW方式),
可用DD或重建卷
dd if=/dev/zero of=/dev/rhdisk2 bs=8192 count=12800
每四个小时自动发起[u]备份[/u],并保存三个版本,但只存在一个节点上
$ ocrconfig -showbackup
恢复OCR:ocrconfig -restore /u01/app/oracle/product/10.2.0/crs/cdata/crs/backup01.ocr
OCR手动导出:ocrconfig -export /tmp/ocr_bak
OCR手动导入:ocrconfig -import /tmp/ocr_bak
添加OCR镜像盘:
1.用crsctl stop crs停掉CRS服务
2.创建用于镜像OCR的RAW设备,比如为:/dev/rhdisk6
3.用ocrconfig –export 导出OCR的信息
4.编辑/etc/oracle/ocr.loc文件,添加ocrmirrorconfig_loc行
$ cat ocr.loc
ocrconfig_loc=/dev/rhdisk2
ocrmirrorconfig_loc=/dev/rhdisk6
local_only=FALSE
5.用ocrconfig –import 导入OCR的信息
6.检查ocr设置信息
$ ocrcheck
7.最后用crsctl start crs启动CRS服务
************************************************************/


--modify some attributes form CRS:

./crs_stat -p ora.rac1.LISTENER_RAC1.lsnr >> /tmp/ora.rac1.LISTENER_RAC1.lsnr.cap
./crs_stat -p ora.rac2.LISTENER_RAC2.lsnr >> /tmp/ora.rac2.LISTENER_RAC2.lsnr.cap
./crs_stat -p ora.rac1.ASM1.asm >> /tmp/ora.rac1.ASM1.asm.cap
./crs_stat -p ora.rac2.ASM2.asm >> /tmp/ora.rac2.ASM2.asm.cap
./crs_stat -p ora.orcl.orcl1.inst >> /tmp/ora.orcl.orcl1.inst.cap
./crs_stat -p ora.orcl.orcl2.inst >> /tmp/ora.orcl.orcl2.inst.cap

--modify items required.

./crs_register -u ora.rac1.LISTENER_RAC1.lsnr -dir /tmp
./crs_register -u ora.rac2.LISTENER_RAC2.lsnr -dir /tmp
./crs_register -u ora.rac1.ASM1.asm -dir /tmp
./crs_register -u ora.rac2.ASM2.asm -dir /tmp
./crs_register -u ora.orcl.orcl1.inst -dir /tmp
./crs_register -u ora.orcl.orcl2.inst -dir /tmp

crs_profile -update resource_name -t application -o as=0/1

--RAC CHECK:
1 查询并检查OCR设备
ocrcheck
或者
cluvfy comp ocr –n all
cluvfy -post hwos -n <node1>,<node2> -verbose

- Run root.sh in debug mode
- cp $ORA_CRS_HOME/install/rootinstall $ORA_CRS_HOME/install/rootinstall.bak
- cp $ORA_CRS_HOME/install/rootconfig $ORA_CRS_HOME/install/rootconfig.bak

Edit rootinstall and rootconfig scripts and include set -x in first line of the script
e.g:
#!/bin/sh -x
#
# rootinstall.sbs for CRS installs

script /tmp/rootsh.log
./root.sh

- If the CRS stack is not starting after initiating root.sh upload the
following

/tmp/crsctl.<pid> and OS Logs for information on why CRS stack can't startup on initial
installation and execution of root.sh.


2 查询并检查Voting Disk设备
crsctl query css votedisk

3 查询并检查网络接口
oifcfg –getif –global

4 检查ocr备份情况
查找备份存放目录
ocrconfig –showbackup
cd 到上面的目录,检查文件应列出下面文件
3份每四小时备份
2份每天备份
2份每周备份

5 检查各个数据库节点系统时间,如果存在差距大于15分钟,建议修改。

6 检查crs和数据库日志

7 检查各个节点crs一致性
cluvfy comp crs -n all –verbose

8 检查各个节点配置一致性
cluvfy comp peer -n all

9 检查nodeapps,asm,database,instance,service的状态
crs_stat –t

--CRS的管理
CRSCTL命令控制着本地节点的CRS服务(Oracle clusterware processes)
停止CRS:
$ crsctl stop crs或者
$ /etc/init.crs stop或者
$ crs_stop –all
启动CRS:
$ crsctl start crs或者
$ /etc/init.crs start
禁止Clusterware在系统重启后自动启动的方法:
$ /etc/init.crs disable
启动Clusterware在系统重启后自动启动的方法:
$ /etc/init.crs enable
查看CRS服务状态
$ crsctl check crs
OCR的资源管理
OCR的资源,当RAC cluster启动并成功运行以后,你可以使用crs_stat工具看到如下内容:
$ORA_CRS_HOME/bin/crs_stat
通常情况下,所有的资源的状态都应该是online。另外,OCR每4个小时会备份一次。有一些情况你可以通过恢复的OCR的配置文件来恢复出现问题的OCR。
比如使用出现如下情况:
$ crs_stat -t
CRS-0184: Cannot communicate with the CRS daemon.
$ crsctl check boot
Failure 4 at procr_open_key with status 4.
PROC-4: The cluster registry key to be operated on does not exist.
$ crsctl check crs
Failure 1 contacting CSS daemon
Cannot communicate with CRS
Cannot communicate with EVM
引起这个错误的一种可能原因是OCR配置丢了。这个我们可以利用这些备份来还原以前的配置。
# /etc/init.crs disable
# /etc/init.crs stop
先保证所有的crs进程全部停止。
# su – oracle
$ cd $ORA_CRS_HOME/bin
$./ocrconfig -restore /opt/app/oracle/crs/cdata/crs/week.ocr
然后切换到root用户下,重新enable CRS
# /etc/init.crs enable
最后reboot两个节点
SRVCTL命令介绍
SRVCTL命令可以控制RAC数据库中的instance,listener以及services。
通常SRVCTL在ORACLE用户下执行。下面我们来介绍srvctl命令。
1、通过SRVCTL命令来start/stop/check所有的实例:
$ srvctl start|stop|status database -d
2、start/stop指定的实例:
$ srvctl start|stop|status instance -d -i
3、列出当前RAC下所有的
$ srvctl config database -d
4、start/stop/check所有的nodeapps,比如:VIP, GSD, listener, ONS:
$ srvctl start|stop|status nodeapps -n
5、如果你使用ASM,srvctl也可以start/stop ASM实例:
$ srvctl start|stop asm -n [-i ] [-o]
6、可以获取所有的环境信息:
$ srvctl getenv database -d [-i ]
7、设置全局环境和变量:
$ srvctl setenv database -d -t LANG=en
8、从OCR中删除已有的数据库:
$ srvctl remove database -d
9、向OCR中添加一个数据库:
$ srvctl add database -d rac -o /u01/app/oracle/11.2.0/db -p +DATA/rac/spfilerac.ora # -d db_unique_name; -o oracle_home; -p spfile_path
[-m ] [-p] [-A /netmask] [-r {PRIMARY | PHYSICAL_STANDBY |LOGICAL_STANDBY}] [-s ]
10、向OCR中添加一个数据库的实例:
$ srvctl add instance -d -i -n
$ srvctl add instance -d -i -n
11、向OCR中添加一个ASM实例:
$ srvctl add asm -n -i -o
12、添加一个service
$ srvctl add service -d -s -r [-a ] [-P ] [-u]
13、修改在其他节点上的service
$ srvctl modify service -d -s -I -t
14、relocate某个节点的service到其他节点
srvctl relocate service -d -s -I
CRS及相关服务的常用命令的演示
1、查看状态的命令
# su–oracle --先切换到oracle用户
$ crs_stat -t --查看crs及所有的service的状态
$ crsctl check crs --用crsctl命令,检查crs相关服务的状态
$ srvctl status nodeapps -n db01 (db02) --使用srvctl查看所有节点上的服务是否正常
$ srvctl status database -d db --使用srvctl查看数据库状态
启动和关闭相关服务的命令
# su – oracle
$ crs_start–all --启动所有的crs服务
$ crs_stop–all --停止所有的crs服务
$ crsctl start crs --启动crs服务
$ crsctl stop crs --停止crs服务
$ srvctl start nodeapps -n db01(db02) --启动某节点上所有的应用
$ srvctl stop nodeapps -n db01(db02) --停止某节点上所有的应用
$ srvctl start listener -n db01(db02) --启动某个节点的listener
$ srvctl stop listener -n db01(db02) --停止某个节点的listener
$ srvctl start instance –d db–i db01(db02) --启动某个instance
$ srvctl stop instance –d db–i db01(db02) --停止某个instance
$ srvctl start database –d db --启动数据库
$ srvctl stop database –d db --停止数据库

select * from gv$cr_block_server

$ORA_CRS_HOME/bin/oifcfg delif -global en4
$ORA_CRS_HOME/bin/oifcfg setif -global en4/10.161.4.0:public
$ORA_CRS_HOME/bin/oifcfg getif

--CHANGE VIP:
$ORACLE_HOME/bin/srvctl config nodeapps -n njyfxdm -a
crs_register -update ora.rac2.vip -o ov=43.82.127.78
#srvctl modify nodeapps -n njyfxdm -A 10.161.4.35/255.255.255.192/en4
#srvctl modify nodeapps -n njyfxdwm -A 10.161.4.36/255.255.255.192/en4
#ifconfig eth4:1 10.161.4.35/36

<CRS_HOME>/bin/crsctl debug log css "CSSD:5"
<CRS_HOME>/bin/crsctl debug log resource ora.node1.vip:1
<CRS_HOME>/bin/crsctl debug log crs "CRSRTI:5,CRSCOMM:5,CRSMAIN:5,CRSPLACE:5,CRSAPP:5,CRSRES:5,CRSOCR:5,CRSTIMER:5,CRSEVT:5"

ORA_CRSDEBUG=1
export ORA_CRSDEBUG
SRVM_TRACE=TRUE


/************************************************************
--ORACLE DAEMON PROCESS
************************************************************/
SELECT SUBSTR(DEST,1,10) DEST, DESCRIPTION FROM X$MESSAGES ORDER BY DEST;

DBWR database write process
LGWR log write process
SMON system monitor process
PMON process monitor process
CKPT checkpoint process

RECO recover process
Jnnn job queue process
ARCn archiver process
QMNn queue monitor process
SNPn snapshot process
RFS remote file server process
RVWR recovery writer -- flashback database
CTWR change-tracking writer process
TRWR trace writer process
MMAN 内存管理,如果设定了 SGA自动管理,MMAN用来协调SGA内各组件的大小设置和大小调整。
MMON 是Oracle10g可管理性监视器(Manageability Monitor),MMON主要用于AWR,ADDM,MMON会从SGA将统计结果写到系统表中。
The memory monitor (MMON) process was introduced in 10g and is associated with the Automatic Workload Repository
new features used for automatic problem detection and self-tuning. MMON writes out the required statistics for AWR
on a scheduled basis. M000 is slave process of MMON
MMNL 是Oracle10g引入的一个新的后台进程,其全拼名字为Memory Monitor Light ,是AWR的组件之一。
The Memory Monitor Light (MMNL) process is a new process in 10g which works with the Automatic Workload Repository
new features (AWR) to write out full statistics buffers to disk as needed.
RBAL rebalancing disk groups process
ORBn rebalance data extent movements
OSMB asm disk group communicates with instance
WMON wakeup monitor process
DMON dataguard broker process
MRP managed recovery process
Dnnn dispatcher process
Pnnn parallel process
Snnn shared server process
CJQn job queue monitor process
LMSn global cache service process
LMON global enqueue Service monitor process
ln global enqueue Service monitor process
LCKn global enqueue requests and the cross-instance broadcast
DIAG diagnosability daemon process
SMCO 负责空间协调管理工作,负责执行空间的分配和回收,Wnnn命名为W000,W001,W002……,由SMCO动态产生执行上述相关任务
SMCO (space management coordinator) process coordinates the execution of various space management related tasks, such as proactive space allocation and space
reclamation. It dynamically spawns slave processes (Wnnn) to implement the task
LMHB global cache /enqueue service hearbeat monitor

 

You can see the Oracle background processes with this query:

select * from v$session where type ='BACKGROUND';

--Here are some of the most important Oracle background processes:

ARCH - (Optional) Archive process writes filled redo logs to the archive log location(s). In RAC, the various ARCH
processes can be utilized to ensure that copies of the archived redo logs for each instance are available to the
other instances in the RAC setup should they be needed for recovery.

CJQ - Job Queue Process (CJQ) - Used for the job scheduler. The job scheduler includes a main program (the coordinator)
and slave programs that the coordinator executes. The parameter job_queue_processes controls how many parallel job
scheduler jobs can be executed at one time.

CKPT - Checkpoint process writes checkpoint information to control files and data file headers.

CQJ0 - Job queue controller process wakes up periodically and checks the job log. If a job is due, it spawns Jnnnn
processes to handle jobs.

DBWR - Database Writer or Dirty Buffer Writer process is responsible for writing dirty buffers from the database
block cache to the database data files. Generally, DBWR only writes blocks back to the data files on commit, or when
the cache is full and space has to be made for more blocks. The possible multiple DBWR processes in RAC must be
coordinated through the locking and global cache processes to ensure efficient processing is accomplished.

FMON - The database communicates with the mapping libraries provided by storage vendors through an external non-Oracle
Database process that is spawned by a background process called FMON. FMON is responsible for managing the mapping
information. When you specify the FILE_MAPPING initialization parameter for mapping data files to physical devices
on a storage subsystem, then the FMON process is spawned.

LGWR - Log Writer process is responsible for writing the log buffers out to the redo logs. In RAC, each RAC instance
has its own LGWR process that maintains that instance’s thread of redo logs.

LMON - Lock Manager process

MMON - The Oracle 10g background process to collect statistics for the Automatic Workload Repository (AWR).

MMNL - This process performs frequent and lightweight manageability-related tasks, such as session history capture and
metrics computation.

MMAN - is used for internal database tasks that manage the automatic shared memory. MMAN serves as the SGA Memory
Broker and coordinates the sizing of the memory components.

PMON - Process Monitor process recovers failed process resources. If MTS (also called Shared Server Architecture) is
being utilized, PMON monitors and restarts any failed dispatcher or server processes. In RAC, PMON’s role as service
registration agent is particularly important.

Pnnn - (Optional) Parallel Query Slaves are started and stopped as needed to participate in parallel query operations.

RBAL - This process coordinates rebalance activity for disk groups in an Automatic Storage Management instance.

SMON - System Monitor process recovers after instance failure and monitors temporary segments and extents. SMON in a
non-failed instance can also perform failed instance recovery for other failed RAC instance.

WMON - The "wakeup" monitor process


--Data Guard/Streams/replication Background processes
DMON - The Data Guard Broker process.

SNP - The snapshot process.

MRP - Managed recovery process - For Data Guard, the background process that applies archived redo log to the standby
database.

ORBn - performs the actual rebalance data extent movements in an Automatic Storage Management instance. There can be
many of these at a time, called ORB0, ORB1, and so forth.

OSMB - is present in a database instance using an Automatic Storage Management disk group. It communicates with the
Automatic Storage Management instance.

RFS - Remote File Server process - In Data Guard, the remote file server process on the standby database receives
archived redo logs from the primary database.

QMN - Queue Monitor Process (QMNn) - Used to manage Oracle Streams Advanced Queuing.
QMNC和Qnnn:高级队列
QMNC进程对于AQ表来说就相当于CJQ0进程之于作业表。QMNC进程会监视高级队列,并警告从队列中删除等待消息的“出队进程”
(dequeuer):已经有一个消息变为可用。QMNC和Qnnn还要负责队列传播(propagation),也就是说,能够将在一个数据库中
入队(增加)的消息移到另一个数据库的队列中,从而实现出队(dequeueing)。
Qnnn进程对于QMNC进程就相当于Jnnn进程与CJQ0进程的关系。QMNC进程要通知Qnnn进程需要完成什么工作,Qnnn进程则会处理
这些工作。
QMNC和Qnnn进程是可选的后台进程。参数AQ_TM_PROCESSES 可以指定最多创建10个这样的进程(分别名为Q000,…,Q009),以
及一个QMNC进程。如果AQ_TM_PROCESSES设置为0,就没有QMNC或Qnnn进程。不同于作业队列所用的Jnnn进程,Qnnn进程是持久的。
如果将 AQ_TM_PROCESSES设置为10,数据库启动时可以看到10个Qnnn进程和一个QMNC进程,而且在实例的整个生存期中这些进程
都存在。


--Oracle Real Application Clusters (RAC) Background Processes

The following are the additional processes spawned for supporting the multi-instance coordination:

DIAG: Diagnosability Daemon – Monitors the health of the instance and captures the data for instance process failures.

LCKx - This process manages the global enqueue requests and the cross-instance broadcast. Workload is automatically
shared and balanced when there are multiple Global Cache Service Processes (LMSx).

LMON - The Global Enqueue Service Monitor (LMON) monitors the entire cluster to manage the global enqueues and the
resources. LMON manages instance and process failures and the associated recovery for the Global Cache Service (GCS)
and Global Enqueue Service (GES). In particular, LMON handles the part of recovery associated with global resources.
LMON-provided services are also known as cluster group services (CGS)

LMDx - The Global Enqueue Service Daemon (LMD) is the lock agent process that manages enqueue manager service requests
for Global Cache Service enqueues to control access to global enqueues and resources. The LMD process also handles
deadlock detection and remote enqueue requests. Remote resource requests are the requests originating from another
instance.

LMSx - The Global Cache Service Processes (LMSx) are the processes that handle remote Global Cache Service (GCS)
messages. Real Application Clusters software provides for up to 10 Global Cache Service Processes. The number of
LMSx varies depending on the amount of messaging traffic among nodes in the cluster.

The LMSx handles the acquisition interrupt and blocking interrupt requests from the remote instances for Global
Cache Service resources. For cross-instance consistent read requests, the LMSx will create a consistent read version
of the block and send it to the requesting instance. The LMSx also controls the flow of messages to remote instances.

The LMSn processes handle the blocking interrupts from the remote instance for the Global Cache Service resources by:

Managing the resource requests and cross-instance call operations for the shared resources.

Building a list of invalid lock elements and validating the lock elements during recovery.

Handling the global lock deadlock detection and Monitoring for the lock conversion timeouts

 

/************************************************************
--ORACLE DUMP
************************************************************/
Oracle Internal Events, both named event (eg: ERRORSTACK) as numeric events (eg 10046) can be set at the session level.
This document outlines how DataBase Administrators can verify which events have been set in a specific session or in the entire system.

oradebug dump events 1;
oradebug dump events 2;
oradebug dump events 4;
Level 1 - Dump session group's event settings
Level 2 - Dump process group's event settings
Level 4 - Dump system group's event settings (Ie the instance wide events)

Oracle 10g / 11g and above:
oradebug eventdump session;
oradebug eventdump process;
oradebug eventdump system;
Note: level needs to be specified in lowercase !

For current Oracle versions. e.g. 11g, you can also use the next commands
to list the set event in a trace file:
SQL> alter session set events 'immediate eventdump(session)';
SQL> alter session set events 'immediate eventdump(process)';
SQL> alter system set events 'immediate eventdump(system)';
SESSION - Dump session group's event settings
PROCESS - Dump process group's event settings
SYSTEM - Dump system group's event settings(Ie the instance wide events)

开启/停止系统trace都对已连接session不生效,需要单独对session进行查询和设置

 

SYS@AS SYSDBA bip> oradebug eventdump system;
60 trace name ERRORSTACK level 3
10511 trace name CONTEXT level 1, forever
SYS@AS SYSDBA bip> oradebug eventdump process;
10511 trace name CONTEXT level 1, forever
942 trace name ERRORSTACK level 3
600 trace name ERRORSTACK level 3
SYS@AS SYSDBA bip> oradebug eventdump session;
600 trace name ERRORSTACK level 3
942 trace name ERRORSTACK level 3
10511 trace name CONTEXT level 1, forever

SYS@AS SYSDBA bip> oradebug eventdump system;
10511 trace name CONTEXT level 1, forever
SYS@AS SYSDBA bip> oradebug eventdump process;
10511 trace name CONTEXT level 1, forever
942 trace name ERRORSTACK level 3
600 trace name ERRORSTACK level 3
SYS@AS SYSDBA bip> oradebug eventdump session;
600 trace name ERRORSTACK level 3
942 trace name ERRORSTACK level 3
10511 trace name CONTEXT level 1, forever


3. As we can see in this bug, We need a lot of diagnosis to confirm it:
@ - Please confirm the plan below is what we should give the customer, I've
@ included the commands needed to get the information. It is an expansion of
@ your steps #1,2,3 above:
@ .
@ 1. Configure before the problem reproduces ( in LGWR of each instance)
@
@ Set event 10898 to level 200000 (200 msec, or 200000 micro sec)
@
@ SQL> oradebug setorapname LGWR
@ SQL> oradebug event 10898 trace name context forever, level 200000
@ .
@ - After the problem and after remaining diagnostics have been collected it
@ can be reset on each instance:
@ .
@ SQL> oradebug setorapname LGWR
@ SQL> oradebug event 10898 trace name context off
@ .
@ 2. Collect when the problem reproduces :
@ .
@ 2-1. OSW, as before.
@ 2-2. AWR, as before.
@ 2-3. pstack of LGWR process and all LMS processes 8 times for each, on
@ *each* node.
@ 2-4. event 10046 level 8 of LGWR process and LMS processes for a minute, on
@ *each* instance.
@
@ - Use a different sqlplus session for each process being traced:
@ .
@ SQL> oradebug setorapname LGWR
@ SQL> oradebug event 10046 trace name context forever, level 8
@ .. wait one minute, then switch off the trace..
@ SQL> oradebug event 10046 trace name context off
@ .
@ - For each LMS process ( lms0, lms1, etc, etc)
@ .
@ SQL> oradebug setorapname lms0
@ SQL> oradebug event 10046 trace name context forever, level 8
@ .. wait one minute, then switch off the trace..
@ SQL> oradebug event 10046 trace name context off
@ .
@
@ 2-5. Collect x$kjctfs, x$kjctfr, x$kjctfri on *each* instance.
@ .
@ SQL> select * from x$kjctfs;
@ SQL> select * from x$kjctfr;
@ SQL> select * from x$kjctfri;
@ .
@ 2-6. Collect GES_STATE level 10, oradebug ipc, traffic controller info on
@ *each* instance:
@ .
@ SQL> oradebug setmypid
@ SQL> oradebug dump GES_STATE 10
@ SQL> oradebug ipc
@ SQL> oradebug lkdebug -t
@ SQL> oradebug tracefile_name --> Prints name of trace file to be
@ uploaded.
@ .
@ Note: output for 'oradebug ipc' and 'oradebug lkdebug -t' will be in
@ trace file for each process. Therefore, I need trace files for the
@ all processes.
@ .
@ .
@ 2-7. Collect event trace[GSIPC] disk high for a minute for *each*
@ instance.
@ .
@ SQL> alter system set events 'trace[GSIPC] disk high';
@ System altered.
@
@ (collect for a minute)
@ .
@ SQL> alter system set events 'trace[GSIPC] off';
@ System altered.
@ .
@ 3. processstate dump level 10 of LMS0, LMS1, LMD processes 3 times with
@ interval of 1 minute, on *each* instance.
@
@ - For example for LMS0:
@ .
@ SQL> oradebug setorapname LMS0
@ SQL> oradebug dump processstate 10
@ ..wait 1 minute..
@ SQL> oradebug dump processstate 10
@ ..wait 1 minute..
@ SQL> oradebug dump processstate 10
@ .
@ - Repeat for LMS1 + LMD0.
@ .
@ 4. Provide alert files and trace files of LMS0, LMS1, LMD, LMON, LGWR
@ processes for the *all* instances.


hanganalyze
====================================
select * from dual;
oradebug setmypid
oradebug unlimit
oradebug hanganalyze 3
oradebug tracefile_name

oradebug setinst all
oradebug -g def hanganalyze 3


systemstat
====================================
select * from dual;
oradebug setmypid
oradebug unlimit
oradebug dump systemstate 266;
oradebug tracefile_name

/************************************************************
--HANG ANALYZE:
175006.1
215858.1
--Summary Syntax:
~~~~~~~~~~~~~~
--sqlplus -prelim / as sysdba
--ALTER SESSION SET EVENTS 'immediate trace name HANGANALYZE level LL';
--EVENT="60 trace name HANGANALYZE level 5"
--ORADEBUG hanganalyze LL
--sqlplus -prelim / as sysdba

--sqlplus /nolog
-- set _prelim on
-- connect / as sysdba

Levels:
~~~~~~
--The level determines which processes on the system are asked to dump
--an errorstack. The main levels are:
--
-- 10 Dump all processes (not a good idea)
-- 5 Dump all processes involved in wait chains (can be a lot)
-- 4 Dump leaf nodes in wait chains
-- 3 Dump only processes thought to be in a hang
-- 2 Minimal output
-- 1 Very minimal output
--
************************************************************/
2 Hanganalyze and 2 Systemstate dumps.

Hanganalyze will confirm if the db is really hung or just slow.
Systemstate dump shows what each process on the database is doing.

* Beware of taking systemstates on very large systems with large numbers of process.
Systemstates can be very slow and the trace file can be very large.

Using SQL*Plus connect as SYSDBA using the following command:

sqlplus " / as sysdba"
Do this 2 times in 2 separate windows, creating 2 sqlplus sessions (SQL1 and SQL2)
In SQL1 gather the hanganalyze by executing the following:

SQL1> oradebug setmypid
SQL1> oradebug unlimit;
SQL1> oradebug hanganalyze 5

In SQL2 gather the systemstates by executing the following:

SQL2> oradebug setmypid
SQL2> oradebug unlimit;
SQL2> oradebug dump systemstate 10

Gather a second hang analyze having waited at least 1 minute to give time to identify process state changes.
In SQL1 execute the following:

SQL2> oradebug hanganalyze 5

In SQL2 execute the following to collect a second systemstate dump:
SQL2> oradebug setmypid
SQL2> oradebug unlimit;
SQL2> oradebug dump systemstate 10

If you are using systemstate level 266 and it is taking much longer than expected to generate the dump file,
then end this systemstate dump and try level 258 , 10 etc.

If connection to the system is not possible in any form, then please refer to the following article which describes
how to collect systemstates in that situation:

sqlplus -prelim / as sysdba
/*
Using "sqlplus -prelim / as sysdba" is important, when database is hung, you
may not able to get a sqlplus session with "sqlplus / as sysdba", therefore
the switch "-prelim" is used and it only supposed to directly connect to
database SGA withwout establising a session.
*/

Note 121779.1 Taking a SYSTEMSTATE dump when you cannot CONNECT to Oracle.

For RAC environment, you need to use
sqlplus -prelim / as sysdba
oradebug setmypid
oradebug unlimit
oradebug -g all hanganalyze 3
oradebug -g all dump systemstate 266

Event:10708
Text: Print out Tracing information for skxf multi instance comms
---------------------------------------------------------------------------
This short note describes the Oracle EVENT number 10708
Events should NEVER be set by customers unless advised to do so by
Oracle Support Services. See Note 75713.1 for more details.

Cause: When enabled, prints activity of skxf routines
Action: Level indicates details:
1: Print out tracing for each call
2: Print a line on Entry to every skxf routine
10: Include timestamps in the trace

1. Start OS Watcher if possible.
2. Find all LMS processes on all instances:
select pid from v$process where upper(program) like '%LMS%';
3. Then attach to each LMS process and turn on these trace events:
oradebug setorapid < LMS pid>
oradebug Event 10046 trace name context forever, level 12
oradebug Event 10708 trace name context forever, level 103
4. Then reproduce the problem (but turn on 10046/10708 tracing for the local @ session:
alter session set events '10046 trace name context forever, level 12';
alter session set events '10708 trace name context forever, level 103';
...reproduce the problem...
alter session set events '10708 trace name context off';
alter session set events '10046 trace name context off';
5. Turn off LMS tracing on all instances:
oradebug setorapid < LMS pid>
oradebug Event 10046 trace name context off
oradebug Event 10708 trace name context off

Get 10046 and 10053 trace:
SQL>connect username/password
SQL>alter session set timed_statistics = true;
SQL>alter session set statistics_level=all;
SQL>alter session set max_dump_file_size = unlimited;
SQL>alter session set events '10046 trace name context forever, level 12';
SQL><Run your SQL here;>
SQL>alter session set events '10046 trace name context off';

SQL>connect username/password
SQL>alter session set events '10053 trace name context forever, level 1';
SQL><Run your query here;>
SQL>alter session set events '10053 trace name context off';


SQL>ALTER SESSION SET EVENTS 'immediate trace name global_area level 2';
SQL>ORADEBUG DUMP GLOBAL_AREA 2;

ALTER SESSION SET EVENTS 'IMMEDIATE TRACE NAME HEAPDUMP LEVEL 31';

ALTER SESSION SET EVENTS 'immediate trace name LIBRARY_CACHE level 11';

alter system set events '60 trace name errorstack level 3';

ALTER SYSTEM SET EVENTS='1722 TRACE NAME ERRORSTACK LEVEL 3';
ALTER SYSTEM SET EVENTS='1722 TRACE NAME ERRORSTACK OFF';
ALTER SYSTEM SET EVENTS='600 trace name systemstate level 266'
ALTER SYSTEM SET EVENTS='600 trace name library_cache level 11';

alter system set events '30926 trace name errorstack level 10';

event="10511 trace name context forever, level 1" --smon dont clean offline Undo Segment

event = '10061 trace name context forever, level 10' --stop smon cleanup temporary segment 5890312

ALTER SESSION SET EVENTS='22825 TRACE NAME CONTEXT OFF'
ALTER SESSION SET EVENTS='22825 TRACE NAME CONTEXT FOREVER'

_ALLOW_ERROR_SIMULATION=TRUE
ALTER SESSION SET EVENTS '10015 TRACE NAME ADJUST_SCN LEVEL 1'; -- Note:30681.1

_minimum_giga_scn

SQL> oradebug dumpvar sga ksulomx_
ub4 ksulomx_ [3C3CA90, 3C3CA94) = 00000154

event="10706 trace name context forever, level 10"
_ksi_trace=CF


SET SERVEROUTPUT ON
DECLARE
EVENT_LEVEL NUMBER;
BEGIN
FOR EVENT_NUMBER IN 10000..10999 LOOP
SYS.DBMS_SYSTEM.READ_EV(EVENT_NUMBER, EVENT_LEVEL);
IF (EVENT_LEVEL > 0) THEN
SYS.DBMS_OUTPUT.PUT_LINE('EVENT '||TO_CHAR(EVENT_NUMBER)||' IS SET AT LEVEL '||TO_CHAR(EVENT_LEVEL));
END IF;
END LOOP;
END;
/

SET SERVEROUTPUT ON
DECLARE
err_msg VARCHAR2(120);
BEGIN
dbms_output.enable (1000000);
FOR err_num IN 10000..10999 LOOP
err_msg := SQLERRM (-err_num);
IF err_msg NOT LIKE '%Message '||err_num||' not found%' THEN
dbms_output.put_line (err_msg);
END IF;
END LOOP;
END;
/

event=10000
while [ $event -ne 10999 ]
do
event=`expr $event + 1`
oerr ora $event
done

/************************************************************
--HEAPDUMP:
1. collect info

select name, value from dba_hist_pgastat;

select vses.username||':'||vsst.sid||','||vses.serial# username, vstt.name, max(vsst.value) value
from v$sesstat vsst, v$statname vstt, v$session vses
where vstt.statistic# = vsst.statistic# and vsst.sid = vses.sid and vstt.name in
('session pga memory','session pga memory max','session uga memory','session uga memory max',
'session cursor cache count','session cursor cache hits','session stored procedure space',
'opened cursors current','opened cursors cumulative') and vses.username is not null
group by vses.username, vsst.sid, vses.serial#, vstt.name order by vses.username, vsst.sid, vses.serial#, vstt.name;

2. generate heap dump

SQL> oradebug setosapid nnnnn
SQL> oradebug unlimit
SQL> oradebug dump heapdump 536870925
SQL> oradebug tracefile_name

3. generate trace auto When the problem is intermittend

SQL> alter session set events '4030 trace name heapdump level 25';

- init.ora: event="4030 trace name heapdump level 25"

- spfile: issue the command: SQL> ALTER SYSTEM SET EVENT='4030 trace name heapdump level 25' scope=spfile;

4. limit pga and generate ora-600 when pga grows over limitation.

event ='10261 trace name context forever, level 500000:600 trace name heapdump level 536870917:600 trace name errorstack level 3'

536870925 = 536870912+8+4+1

Level Purpose
1 Generate heapdump of top level PGA.Under 10g this will also dump the "top uga" and "top call" heaps.
2 Generate heapdump of top level SGA
4 Generate heapdump of top level UGA
8 Generate heapdump of level Current Call Heap
16 Generate heapdump of level User Call Heap
32 Generate heapdump of large allocation Heap (large pool)
64 Generate heapdump of the streams pool heap. (New with 10g)
128 Generate heapdump of the java pool heap. (New with 10g)

With 10g we can now request that we also dump extra heap statistics by ORing the level used above with the following values :
Level Purpose
0x10000000 (268435456) Print statistics for the five largest subheaps in the heap
0x20000000 (536870912) Print statistics for the five largest subheaps in any subheap starting at the heap specified


-- LISTENER:
TRACE_LEVEL_listener_name=16

trcasst filename.trc>[outfile]

ALTER SYSTEM SET listener_networks='((NAME=ora.ASMNET1LSNR_ASM.lsnr) (LOCAL_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))")(REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))"))' SCOPE=MEMORY SID='+ASM1';
Wed Mar 23 01:15:35 2016
ALTER SYSTEM SET local_listener=' (ADDRESS=(PROTOCOL=TCP)(HOST=192.168.1.34)(PORT=1521))' SCOPE=MEMORY SID='+ASM1';
Wed Mar 23 01:21:17 2016
NOTE: [crsd.bin@rac122 (TNS V1-V3) 4209] opening OCR file +DATA.255.4294967295
Wed Mar 23 01:21:50 2016
ALTER SYSTEM SET listener_networks='((NAME=ora.ASMNET1LSNR_ASM.lsnr) (LOCAL_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))")(REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))"))','((NAME=ora.ASMNET1LSNR_ASM.lsnr) (REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.2)(PORT=1522)))"))' SCOPE=MEMORY SID='+ASM1';
Wed Mar 23 01:22:09 2016
ALTER SYSTEM SET listener_networks='((NAME=ora.ASMNET1LSNR_ASM.lsnr) (LOCAL_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))")(REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))"))','((NAME=ora.ASMNET1LSNR_ASM.lsnr) (REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.2)(PORT=1522)))"))' SCOPE=MEMORY SID='+ASM1';
Wed Mar 23 01:23:02 2016
ALTER SYSTEM SET listener_networks='((NAME=ora.ASMNET1LSNR_ASM.lsnr) (LOCAL_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))")(REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.1)(PORT=1522)))"))','((NAME=ora.ASMNET1LSNR_ASM.lsnr) (REMOTE_LISTENER="(DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=10.10.10.2)(PORT=1522)))"))' SCOPE=MEMORY SID='+ASM1';

-- SQLNET:
DIAG_ADR_ENABLED=off
TRACE_LEVEL_CLIENT=16/off/user/admin/support
TRACE_LEVEL_SERVER=16/off/user/admin/support

客户端:在sqlnet.ora 文件中增加以下行。
trace_level_client=16
trace_directory_client=c:\tmp ==> change this to an existed directory
trace_file_client=client
trace_unique_client=on
trace_timestamp_client=on


服务器端:在sqlnet.ora 文件中增加以下行。
trace_level_server=16
trace_file_server=server
trace_directory_server=/tmp ==> Change this to an existed directory
trace_timestamp_server=ON ==> Only in Oracle8i onwards


-- We have added SUBSCRIBE_FOR_NODE_DOWN_EVENT parameter to fix Bug # 5186756.
-- Trying workaround of the bug 4518443 by setting:
'SUBSCRIBE_FOR_NODE_DOWN_EVENT_<listener_name>=OFF', but problem still occurs.
SUBSCRIBE_FOR_NODE_DOWN_EVENT_<listener_name>=OFF
/*setting SUBSCRIBE_FOR_NODE_DOWN_EVENT to OFF will disable TNS listener from connecting to ONS 7580576.994 */
returning error: 28781

LOCAL_OS_AUTHENTICATION_<listener_name> = OFF
Password Not Required When Administering 10g or Newer Listeners Using Lsnrctl Utility [ID 372717.1]

--OVM
DIRECT_HANDOFF_TTC_LISTENER = OFF


2、 加大监听相关参数QUEUESIZE:
LISTENER= (DESCRIPTION=
(ADDRESS=(PROTOCOL=tcp)(HOST=sales-server)(PORT=1521) (QUEUESIZE=300)))
3、 调整操作系统参数somaxconn由1024调大至8192


/************************************************************
--EVENT NAME:
************************************************************/
SET linesize 120
SET feedback off
SET SERVEROUTPUT ON

DECLARE
err_msg VARCHAR2(120);
BEGIN
dbms_output.enable (1000000);
FOR err_num IN 10121..10121 LOOP
err_msg := SQLERRM (-err_num);
IF err_msg NOT LIKE '%Message '||err_num||' not found%' THEN
dbms_output.put_line (err_msg);
END IF;
END LOOP;
END;
/


/************************************************************
--DBMS_STATS:
************************************************************/
alter session set events '38028 trace name context forever';

相关控制:
Dbms_stats. get_stats_history_retention,dbms_stats.alter_stats_history_retention
统计信息变化:
Dba(all/user)_tab_stats_history
恢复统计信息:
dbms_stats.restore_table_stats
select DBMS_STATS.GET_STATS_HISTORY_RETENTION from dual;
select DBMS_STATS.GET_STATS_HISTORY_AVAILABILITY from dual;
execute DBMS_STATS.ALTER_STATS_HISTORY_RETENTION (60)

# parallel
alter system set events '38028 trace name context forever';

dbms_stats.gather_table_stats(:usr,:tab,method_opt => 'FOR ALL COLUMNS SIZE 1',cascade => TRUE,
estimate_percent => 25,granularity=>'all',degree => 4,no_invalidate => false);

DBMS_STATS.gather_table_stats(ownname => 'TEST',tabname => 'TEST' ,estimate_percent => dbms_stats.auto_sample_size,
method_opt=>'for all indexed columns size auto',cascade => TRUE, degree => 8);

exec dbms_stats.DELETE_TABLE_STATS(ownname=>'TEST',tabname=>'T');

select column_name,num_distinct,num_nulls,num_buckets,density from dba_tab_col_statistics where owner='TEST' and table_name='T'
select * from user_histograms where table_name='T';
exec dbms_stats.gather_table_stats(ownname=>'TEST', tabname=>'T', estimate_percent=>15, method_opt=>'FOR ALL COLUMNS SIZE 1', degree=>8, cascade=>TRUE);
exec dbms_stats.gather_table_stats(ownname=>'TEST', tabname=>'T', estimate_percent=>15, method_opt=>'FOR ALL INDEXED COLUMNS', degree=>8, cascade=>TRUE);
exec dbms_stats.gather_table_stats(ownname=>'TEST', tabname=>'T', estimate_percent=>15, method_opt=>'FOR COLUMNS', degree=>8, cascade=>TRUE);
exec dbms_stats.gather_table_stats(ownname=>'BIP', tabname=>'OG_USER_D01', partname=>'OG_USER_D01_20110827', estimate_percent=>1, method_opt=>'FOR COLUMNS LASTLOGINDATE SIZE 254', degree=>8);
exec dbms_stats.gather_schema_stats(ownname=>'EHRDD', estimate_percent=>5, method_opt=>'FOR ALL COLUMNS SIZE 1', degree=>8, cascade=>TRUE);

EXEC DBMS_STATS.SET_INDEX_STATS(ownname=>'BIP',INDNAME=>'IP_MASTER_INDX',NUMROWS=>100000000,NUMLBLKS=>100000000,NUMDIST=>100000000);
select num_distinct, density, column_name, table_name from user_tab_col_statistics where table_name = 'IP_MASTER';
CREATE unique INDEX IP_MASTER_INDX ON IP_MASTER(START_IP_NO,END_IP_NO) TABLESPACE TBS_OASIS;
EXEC DBMS_STATS.SET_COLUMN_STATS(ownname=>'BIP',tabname=>'IP_MASTER',COLNAME=>'START_IP_NO',DISTCNT=>100000000,DENSITY=>0.000000001);
exec dbms_scheduler.disable('GATHER_STATS_JOB');

execute DBMS_STATS.CREATE_STAT_TABLE (ownname=>'TEST',stattab=>'T14_0129',tblspace=>'SIEB_DATA');
execute DBMS_STATS.EXPORT_TABLE_STATS (ownname=>'SIEBEL',tabname=>'S_PROD_LN',stattab=>'T14_0129',statid=>'S_PROD_LN_0129');

SET SERVEROUT ON
DECLARE VAL VARCHAR2(200);
BEGIN
VAL := DBMS_STATS.GET_PARAM('METHOD_OPT');
DBMS_OUTPUT.PUT_LINE(VAL);
END;
/

 

 

SELECT DBMS_STATS.GET_PARAM('METHOD_OPT') FROM DUAL;
SELECT DBMS_STATS.GET_PARAM('ESTIMATE_PERCENT') FROM DUAL;
SELECT DBMS_STATS.GET_PARAM('DEGREE') FROM DUAL;
SELECT DBMS_STATS.GET_PARAM('GRANULARITY') FROM DUAL;


EXEC DBMS_STATS.SET_PARAM(PNAME=>'METHOD_OPT',PVAL=>'FOR ALL INDEXED COLUMNS SIZE 1');
EXEC DBMS_STATS.SET_PARAM(PNAME=>'ESTIMATE_PERCENT',PVAL=>1);
EXEC DBMS_STATS.SET_PARAM(PNAME=>'DEGREE',PVAL=>8);
EXEC DBMS_STATS.SET_PARAM(PNAME=>'NO_INVALIDATE',PVAL=>'FALSE');
EXEC DBMS_STATS.SET_PARAM(PNAME=>'NO_INVALIDATE',PVAL=>'TRUE');
EXEC DBMS_STATS.SET_PARAM(PNAME=>'NO_INVALIDATE',PVAL => DBMS_STATS.AUTO_INVALIDATE);
EXEC DBMS_STATS.SET_PARAM(PNAME=>'GRANULARITY',PVAL => 'PARTITION');

Doc ID: 725845.1 How to Change Default Parameters for Gathering Statistics
Doc ID: 785110.1 Bad Plan Still Persistent even after Gathering New Fresh Statistics

SELECT NUM_ROWS,AVG_ROW_LEN,BLOCKS,EMPTY_BLOCKS,AVG_SPACE
FROM DBA_TABLES WHERE TABLE_NAME = ? ;

COL LOW_VALUE FOR A20
COL HIGH_VALUE FOR A20
COL DATA_TYPE FOR A10
COL TABLE_NAME FOR A10
COL COLUMN_NAME FOR A10
SELECT TABLE_NAME,COLUMN_NAME,DATA_TYPE,DATA_LENGTH,NUM_DISTINCT,LOW_VALUE,HIGH_VALUE,DENSITY,AVG_COL_LEN
FROM DBA_TAB_COLUMNS WHERE TABLE_NAME= ? AND COLUMN_NAME= ? ;

SELECT NUM_ROWS,LEAF_BLOCKS,DISTINCT_KEYS,AVG_LEAF_BLOCKS_PER_KEY,AVG_DATA_BLOCKS_PER_KEY,CLUSTERING_FACTOR
FROM DBA_INDEXES WHERE INDEX_NAME= ? ;

SELECT PNAME, PVAL1 FROM SYS.AUX_STATS$ WHERE SNAME = 'SYSSTATS_MAIN';


--11g
DBMS_STATS.SET_GLOBAL_PREFS
DBMS_STATS.SET_TABLE_PREFS
DBMS_STATS.GET_PREFS

exec dbms_stats.set_table_prefs(null,'EMP','STALE_PERCENT',1);
exec dbms_stats.set_table_prefs(null,'EMP','STALE_PERCENT',null);
select dbms_stats.get_prefs('STALE_PERCENT',null,'EMP') from dual;
Metalink Note:390737.1


增量统计分析
确保Global或者大表的如下参数设置:
INCREMENTAL=TRUE
PUBLISH=TRUE
GRANULARITY=TRUE
1. 表级设置
exec DBMS_STATS.SET_TABLE_PREFS('TEST','TEST','INCREMENTAL','TRUE');
select dbms_stats.get_prefs('INCREMENTAL', ownname=>'TEST', tabname=>'TEST') from dual;
2. 分区级设置
exec dbms_stats.set_GLOBAL_prefs('INCREMENTAL','TRUE');
select dbms_stats.get_prefs('INCREMENTAL') from dual;

SQL> select dbms_stats.get_prefs('PUBLISH') from dual;

DBMS_STATS.GET_PREFS('PUBLISH')
--------------------------------------------------------------------------------
TRUE

SQL> select dbms_stats.get_prefs('GRANULARITY') from dual;

DBMS_STATS.GET_PREFS('GRANULARITY')
--------------------------------------------------------------------------------
AUTO

 

exec dbms_stats.set_table_prefs('Schema_name','Table_name','ESTIMATE_PERCENT',1);
SELECT DBMS_STATS.GET_PREFS ('ESTIMATE_PERCENT','TEST','TEST') FROM DUAL;
exec dbms_stats.set_global_prefs('ESTIMATE_PERCENT',1);
SELECT DBMS_STATS.GET_PREFS ('ESTIMATE_PERCENT') FROM DUAL;
SELECT DBMS_STATS.GET_PREFS ('PUBLISH') FROM DUAL;

The automated maintenance tasks infrastructure (known as AutoTask) schedules tasks to run automatically in Oracle Scheduler windows
known as maintenance windows. By default,one window is scheduled for each day of the week. Automatic optimizer statistics collection
runs as part of AutoTask and is enabled by default to run in all predefined maintenance windows.

If for some reason automatic optimizer statistics collection is disabled, then you can enable it using the ENABLE procedure in the
DBMS_AUTO_TASK_ADMIN package:

BEGIN
DBMS_AUTO_TASK_ADMIN.ENABLE(
client_name => 'auto optimizer stats collection',
operation => NULL,
window_name => NULL);
END;
/

When you want to disable automatic optimizer statistics collection, you can disable it using the DISABLE procedure in the DBMS_AUTO_TASK_ADMIN package:

BEGIN
DBMS_AUTO_TASK_ADMIN.DISABLE(
client_name => 'auto optimizer stats collection',
operation => NULL,
window_name => NULL);
END;
/

select client_name,status from dba_autotask_client;

BEGIN
dbms_auto_task_admin.enable(client_name=>'auto optimizer stats collection',operation=>NULL,window_name=>NULL);
dbms_auto_task_admin.enable(client_name=>'auto space advisor',operation=>NULL,window_name=>NULL);
dbms_auto_task_admin.enable(client_name=>'sql tuning advisor',operation=>NULL,window_name=>NULL);
END;
/


DECLARE
job BINARY_INTEGER := :job;
next_date TIMESTAMP
WITH TIME ZONE := :mydate;
broken BOOLEAN := FALSE;
job_name VARCHAR2(30) := :job_name;
job_subname VARCHAR2(30) := :job_subname;
job_owner VARCHAR2(30) := :job_owner;
job_start TIMESTAMP
WITH TIME ZONE := :job_start;
job_scheduled_start TIMESTAMP
WITH TIME ZONE := :job_scheduled_start;
window_start TIMESTAMP
WITH TIME ZONE := :window_start;
window_end TIMESTAMP
WITH TIME ZONE := :window_end;
chain_id VARCHAR2(14) := :chainid;
credential_owner VARCHAR2(30) := :credown;
credential_name VARCHAR2(30) := :crednam;
destination_owner VARCHAR2(30) := :destown;
destination_name VARCHAR2(30) := :destnam;
job_dest_id VARCHAR2(14) := :jdestid;
log_id NUMBER := :log_id;
BEGIN
BEGIN
dbms_stats.gather_schema_stats( ownname=> '"SGPMS_LN"' , CASCADE=> TRUE, estimate_percent=> NULL, degree=> 100, no_invalidate=> FALSE, granularity=> 'ALL', method_opt=> 'FOR ALL COLUMNS SIZE AUTO', OPTIONS=> 'GATHER');
END;
:mydate := next_date;
IF broken THEN
:b := 1;
ELSE
:b := 0;
END IF;
END;

How to Change Default Parameters for Gathering Statistics in Oracle 11g [ID 1493227.1]
How to Gather Optimizer Statistics on 11g [ID 749227.1]
How to Disable Automatic Statistics Collection in 11g [ID 1056968.1]
How To Collect Statistics On Partitioned Table in 10g and 11g [ID 1417133.1]
How to Use Pending Statistics [ID 1456776.1]

Begin
DBMS_STATS.SET_GLOBAL_PREFS('CONCURRENT','TRUE');
End;
/

exec dbms_stats.set_global_prefs('ESTIMATE_PERCENT',0.0001);
select dbms_stats.get_prefs ('ESTIMATE_PERCENT') from dual;
SQL> select dbms_stats.GET_PARAM ('AUTOSTATS_TARGET') from dual;

DBMS_STATS.GET_PARAM('AUTOSTATS_TARGET')
--------------------------------------------------------------------------------
AUTO

exec dbms_stats.set_table_prefs(ownname=>'TEST',tabname=>'TEST',pname=>'ESTIMATE_PERCENT',pvalue=>0.01);
select dbms_stats.get_prefs (ownname=>'TEST',tabname=>'TEST',pname=>'ESTIMATE_PERCENT') from dual;
select * from dba_tab_stat_prefs where owner='TEST' and table_name='TEST';

锁定数据线性增长的表统计信息
exec dbms_stats.lock_table_stats(ownname=>'TEST',tabname=>'TEST');
select table_name,stattype_locked from dba_tab_statistics where owner='TEST' and stattype_locked='ALL';

锁定数据线性增长的分区统计信息
exec dbms_stats.lock_partition_stats(ownname => 'TEST', tabname => 'TEST', partname => 'P01');
select table_name,partition_name,stattype_locked from dba_tab_statistics where owner='TEST' and table_name ='TEST' and partition_name ='P01';


/************************************************************
--AGE OUT A PARTICULAR CURSOR:
************************************************************/
GRANT SELECT ON T.TEST TO OTHER_USER;
REVOKE SELECT ON T.TEST FROM OTHER_USER;
DBMS_STATS.SET_TABLE_STATS (ownname=>'T',tabname=>'TEST',no_invalidate=>FALSE);
dbms_shared_pool.purge('address, hash_value','C',0)

/************************************************************
--LOGMINER:
************************************************************/
SELECT DECODE(REQUEST,0,'HOLDER:','WAITER:')||SID SID,ID1,ID2,LMODE,REQUEST,TYPE
FROM V$LOCK
WHERE (ID1,ID2,TYPE) IN
(SELECT ID1,ID2,TYPE
FROM V$LOCK
WHERE REQUEST>0)
ORDER BY ID1,REQUEST;

EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => '/crmdb_arch1/crmdb_1_536_693541790.dbf',OPTIONS => DBMS_LOGMNR.NEW);
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS =>DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG);
SELECT USERNAME,SQL_REDO,SQL_UNDO FROM V$LOGMNR_CONTENTS WHERE USERNAME='TEST' AND PERATION='DELETE';

--simple
begin
dbms_logmnr.add_logfile(logfilename=>'/u01/app/oracle/tita/fra/TITA/archivelog/2011_11_18/o1_mf_1_145_7dd32yc8_.arc',options=>dbms_logmnr.new);
end;
/

begin
dbms_logmnr.start_logmnr(options=>16);
end;
/

select username,os_username,machine_name,session_info,sql_redo,timestamp from v$logmnr_contents;

 


/************************************************************
--SPATIAL:
************************************************************/
start ?/md/admin/mdinst.sql

select comp_id,version,status from dba_registry where comp_id in ('JAVAVM','ORDIM','XDB');

select comp_id, control, schema, version, status, comp_name
from dba_registry
where comp_id='SDO';

execute validate_sdo;

select object_name, object_type, status from dba_objects
where owner='MDSYS' and status <> 'VALID'
order by object_name;

/************************************************************
--DATE FUNCTION:
************************************************************/
SELECT SCN_TO_TIMESTAMP(ORA_ROWSCN) FROM TEST;

SELECT NUMTODSINTERVAL(600, 'MINUTE') FROM DUAL;

SELECT TO_CHAR(SYSDATE,'CC WW W D DDD YYYY/MM/DD') FROM DUAL;
其中CC為世紀
WW為一年中的周
W為一個月中的周
D為星期几
DDD為一年中的第几天


/************************************************************
--SINGLE NODE OR RAC:
************************************************************/
set serveroutput on

declare
inst_tab dbms_utility.instance_table;
inst_cnt NUMBER;
begin
if dbms_utility.is_cluster_database then
dbms_utility.active_instances(inst_tab, inst_cnt);
dbms_output.put_line('-' || inst_tab.FIRST);
dbms_output.put_line(TO_CHAR(inst_cnt));
else
dbms_output.put_line('Not A Clustered Database');
end if;
end;
/

select dbms_utility.port_string from dual;
DBMS_FEATURE_USAGE_REPORT


/************************************************************
--DATAGUARD:
************************************************************/
The most reliable method to assess ARCH performance is to enable log_archive_trace to level 128 to
get additional messages printed to the alert log.
_LOG_ARCHIVE_CALLOUT='LOCAL_FIRST=TRUE'

SQL> show parameter standby_file_management

NAME TYPE VALUE
------------------------------------ ----------- ------------------------------
standby_file_management string MANUAL

Alter database create datafile ‘primary db上目录下的新数据文件‘ as ‘standby某个目录下的新数据文件’;
SQL> alter database create datafile '/u01/product/oracle/dbs/UNNAMED00010' as '/data/qis/log_data03.dbf';


alter system set log_archive_dest_2 = 'service="(DESCRIPTION=(ADDRESS_LIST=(ADDRESS=(PROTOCOL=tcp)(HOST=asalkar-pc)
(PORT=1521))(CONNECT_DATA=(SERVICE_NAME=drcl_XPT)(INSTANCE_NAME=drcl)(SERVER=dedicated)))"',
' ARCH SYNC NOAFFIRM delay=0 OPTIONAL max_failure=0 max_connections=5 reopen=300 db_unique_name="drcl"
register net_timeout=180 valid_for=(online_logfile,primary_role)'

--DELETE DATAGUARD ARCHIVELOG:
#!/bin/sh
applied_days=2
cat /dev/null>applied_arch.lst
sqlplus -s "/ as sysdba"<<EOF>/dev/null
set feedback off
set pages 0
set head off
set timing off
set echo off
spool applied_arch.lst
select 'rm -f '||name from v\$archived_log
where DEST_ID=2 and name like '%.dbf'
and SEQUENCE#<(select max(SEQUENCE#) from v\$archived_log where applied='YES')
and COMPLETION_TIME<=sysdate-${applied_days};
spool off;
exit
EOF

## Exec the shell
cat /dev/null>applied_arch.sh
cat applied_arch.lst |grep -v spooling>applied_arch.sh
chmod u+x applied_arch.sh
sh applied_arch.sh
chmod -x applied_arch.sh
mv applied_arch.sh ./log/rm_appl_arc_`date +"%Y%m%d%H%M"`.log

rm -f applied_arch.lst

##remove expired archive log from database
/data/oracle9/product/9.2.0/bin/rman target / nocatalog<</dev/null
crosscheck archivelog all;
delete noprompt expired archivelog all;
exit
EOF

--11g:
Standby Database SQL> alter session set STANDBY_MAX_DATA_DELAY=0;

create or replace TRIGGER cmc_taf_service_trg_startup
after startup on database
begin
cmc_taf_service_proc;
end;
/

create or replace TRIGGER cmc_taf_manage_trg_rolechange
after db_role_change on database
begin
cmc_taf_service_proc;
end;
/


/************************************************************
--AWR:
************************************************************/
declare
i number;
cursor c is
select to_char(s.startup_time, 'dd Mon "at" HH24:mi:ss') instart_fmt,
di.instance_name inst_name,
di.instance_number instance_number,
di.db_name db_name,
di.dbid dbid,
lag(s.snap_id, 1, 0) over(partition by di.instance_number order by s.snap_id) begin_snap_id,
s.snap_id end_snap_id,
to_char(s.begin_interval_time, 'yyyymmddhh24mi') beginsnapdat,
to_char(s.end_interval_time, 'yyyymmddhh24mi') endsnapdat,
s.snap_level lvl
from dba_hist_snapshot s,
dba_hist_database_instance di,
gv$instance i,
v$database d
where s.dbid = d.dbid
and di.dbid = d.dbid
and s.instance_number = i.instance_number
and di.instance_number = i.instance_number
and di.dbid = s.dbid
and di.instance_number = s.instance_number
and di.startup_time = s.startup_time
and to_char(s.begin_interval_time, 'yyyymmdd hh24:mi:ss') >=
'20140120 08:00:00'
and to_char(s.begin_interval_time, 'yyyymmdd hh24:mi:ss') <=
'20140120 10:00:00' --想生成的天数
order by di.db_name, i.instance_name, s.snap_id;
begin
i := 0;
for c1 in c loop
if i = 0 then
dbms_output.put_line('set heading off');
dbms_output.put_line('set pages 100000');
dbms_output.put_line('set linesize 1500');
dbms_output.put_line('set trimspool on');
dbms_output.put_line('set trimout on');
dbms_output.put_line('set term off');
dbms_output.put_line('set verify off');
dbms_output.put_line('set feedback off');
end if;
if c1.begin_snap_id > 0 then
dbms_output.put_line('spool D:\awr\' || c1.inst_name || '_' ||
c1.begin_snap_id || '_' || c1.end_snap_id || '_' ||
c1.beginsnapdat || '_' || c1.endsnapdat ||
'.html');
dbms_output.put_line('select output from table(dbms_workload_repository.awr_report_html( ' ||
c1.dbid || ',' || c1.instance_number || ',' ||
c1.begin_snap_id || ',' || c1.end_snap_id ||
',0 ));');
dbms_output.put_line('spool off');
end if;
i := i + 1;
end loop;
end;
/

awr snapshot
======================================
exec dbms_workload_repository.create_snapshot;
exec dbms_scheduler.disable('SYS.GATHER_STATS_JOB');
exec dbms_scheduler.enable('SYS.GATHER_STATS_JOB');

alter system set "_optimizer_autostats_job"=false scope=spfile;
alter system set "_optimizer_autostats_job"=true scope=spfile;

SELECT max(snap_id) FROM WRM$_SNAPSHOT;
select snap_interval, retention from dba_hist_wr_control;

begin
dbms_workload_repository.modify_snapshot_settings (
interval => 60,
retention => 10*24*60
);
end;
/

dbms_workload_repository.drop_snapshot_range(low_snap_id => , high_snap_id => , dbid => );

execute dbms_workload_repository.create_baseline (start_snap_id =>1,end_snap_id =>100, baseline_name => 'baseline name', dbid => yourdbid);

exec DBMS_SWRF_INTERNAL.AWR_EXTRACT(dmpfile => 'awr.dmp',dmpdir => 'TMP_DIR',bid => 3, eid => 5);
$ORACLE_HOME/rdbms/admin/awrextr.sql

exec DBMS_SWRF_INTERNAL.AWR_LOAD(SCHNAME => 'AWR',dmpfile => 'awr.dmp', dmpdir => 'TMP_DIR');
exec DBMS_SWRF_INTERNAL.MOVE_TO_AWR(SCHNAME => 'AWR');
$ORACLE_HOME/rdbms/admin/awrload.sql

select * from table(DBMS_WORKLOAD_REPOSITORY.AWR_REPORT_TEXT(dbid,inst_id,b_id,e_id));


/************************************************************
--SCHEDULERS:
************************************************************/
SELECT OCCUPANT_NAME,MOVE_PROCEDURE FROM V$SYSAUX_OCCUPANTS;

BEGIN
DBMS_SCHEDULER.SET_ATTRIBUTE('AUTO_SPACE_ADVISOR_JOB','LOGGING_LEVEL',DBMS_SCHEDULER.LOGGING_FULL);
END;
/

BEGIN
DBMS_SCHEDULER.SET_ATTRIBUTE('AUTO_SPACE_ADVISOR_JOB','AUTO_DROP',FALSE);
END;
/

BEGIN
DBMS_SCHEDULER.SET_SCHEDULER_ATTRIBUTE ('log_history','90');
END;
/

SELECT OWNER, JOB_NAME, JOB_CLASS, OPERATION, ADDITIONAL_INFO FROM USER_SCHEDULER_JOB_LOG;

select schema_user,job,last_date,last_sec,this_sec,next_date,next_sec,total_time,broken,interval,what,instance
from dba_jobs;

--CHANGE INSTANCE:
SQL> SHOW USER
USER 为 "SYS"
SQL> SELECT JOB, LOG_USER, INSTANCE FROM DBA_JOBS WHERE JOB = 11;
JOB LOG_USER INSTANCE
---------- ------------------------------ ----------
11 ZHEJIANG 2

SQL> EXEC DBMS_IJOB.INSTANCE(11, 1, FALSE)
PL/SQL 过程已成功完成。

SQL> SELECT JOB, LOG_USER, INSTANCE FROM DBA_JOBS WHERE JOB = 11;
JOB LOG_USER INSTANCE
---------- ------------------------------ ----------
11 ZHEJIANG 1

EXEC DBMS_SCHEDULER.CREATE_JOB (JOB_NAME => 'TEST_JOB', JOB_TYPE => 'PLSQL_BLOCK', JOB_ACTION => 'BEGIN P_TEST; END;', START_DATE => SYSTIMESTAMP, REPEAT_INTERVAL => 'FREQ=MINUTELY; BYSECOND=0', ENABLED => TRUE);
EXEC DBMS_SCHEDULER.RUN_JOB (JOB_NAME => 'TEST_FULL_JOB_DEFINITION',USE_CURRENT_SESSION => FALSE);
EXEC DBMS_SCHEDULER.STOP_JOB (JOB_NAME => 'TEST_FULL_JOB_DEFINITION, TEST_PROG_SCHED_JOB_DEFINITION');
EXECUTE DBMS_SCHEDULER.SET_ATTRIBUTE('WEEKNIGHT_WINDOW','DURATION',INTERVAL '10' HOUR);
EXECUTE DBMS_SCHEDULER.SET_ATTRIBUTE('AUTO_TASKS_JOB_CLASS','SYSTEM',TRUE);
EXECUTE DBMS_SCHEDULER.SET_ATTRIBUTE('JOB_CLASS','SERVICE',VALUE);
EXECUTE DBMS_SCHEDULER.SET_ATTRIBUTE('WEEKNIGHT_WINDOW','REPEAT_INTERVAL','FREQ=DAILY;BYDAY=MON, TUE, WED, THU, FRI;BYHOUR=22;BYMINUTE=0;BYSECOND=0');
EXECUTE DBMS_SCHEDULER.SET_ATTRIBUTE('WEEKEND_WINDOW','REPEAT_INTERVAL','FREQ=DAILY;BYDAY=SAT;BYHOUR=22;BYMINUTE=0;BYSECOND=0');

EXEC DBMS_SCHEDULER.OPEN_WINDOW (WINDOW_NAME => '"SYS"."WEEKNIGHT_WINDOW"',DURATION => INTERVAL '10' MINUTE,FORCE => TRUE);
EXEC DBMS_SCHEDULER.CLOSE_WINDOW (WINDOW_NAME => '"SYS"."WEEKNIGHT_WINDOW"');
EXECUTE DBMS_SCHEDULER.OPEN_WINDOW('WEEKNIGHT_WINDOW', NULL, FALSE);
EXECUTE DBMS_SCHEDULER.CLOSE_WINDOW('WEEKEND_WINDOW');

SELECT * FROM DBA_SCHEDULER_GLOBAL_ATTRIBUTE WHERE ATTRIBUTE_NAME = 'CURRENT_OPEN_WINDOW';
UPDATE SYS.SCHEDULER$_GLOBAL_ATTRIBUTE SET VALUE = '' WHERE OBJ# = (SELECT OBJ# FROM OBJ$ WHERE NAME = 'CURRENT_OPEN_WINDOW');
COMMIT;

EXEC DBMS_SCHEDULER.ENABLE(NAME=>'"SYS"."WEEKNIGHT_WINDOW"');
EXEC DBMS_SCHEDULER.DISABLE(NAME=>'"SYS"."WEEKNIGHT_WINDOW"', FORCE=>TRUE);

EXEC DBMS_SCHEDULER.CREATE_WINDOW (WINDOW_NAME => 'TEST_WINDOW',RESOURCE_PLAN => NULL,START_DATE => SYSTIMESTAMP,REPEAT_INTERVAL => 'FREQ=HOURLY; BYMINUTE=0',END_DATE => NULL,DURATION => INTERVAL '60' MINUTE,WINDOW_PRIORITY => 'LOW',COMMENTS => 'WINDOW WITH AN INLINE SCHEDULE.');
BEGIN
DBMS_SCHEDULER.CREATE_WINDOW(
WINDOW_NAME=>'WEEKNIGHT_WINDOW',
RESOURCE_PLAN=>NULL,
REPEAT_INTERVAL=>'FREQ=DAILY;BYDAY=MON,TUE,WED,THU,FRI;BYHOUR=3;BYMINUTE=0; BYSECOND=0',
DURATION=>INTERVAL '480' MINUTE,
COMMENTS=>'WEEKNIGHT WINDOW FOR MAINTENANCE TASK');
END;
/

EXEC DBMS_SCHEDULER.DROP_WINDOW (WINDOW_NAME=>'"SYS"."WEEKNIGHT_WINDOW"', FORCE=>TRUE);

EXEC DBMS_SCHEDULER.SET_ATTRIBUTE(NAME=>'WEEKEND_WINDOW',ATTRIBUTE=>'START_DATE',VALUE=>SYSDATE+10)

EXEC DBMS_SCHEDULER.PURGE_LOG (LOG_HISTORY => 10,WHICH_LOG => 'JOB_AND_WINDOW_LOG',JOB_NAME => 'TEST_FULL_JOB_DEFINITION');

 

/************************************************************
--RESOURCE PLAN:
************************************************************/
select * from V$RSRC_PLAN_HISTORY order by SEQUENCE#;


/************************************************************
--ASM:
************************************************************/
select TYPE from v$asm_diskgroup;
select name, header_status, mount_status, state, path from v$asm_disk;
select name,FAILGROUP from v$asm_disk;

select disk_kffxp disk#,
XNUM_KFFXP extent#,
case lxn_kffxp
when 0 then 'Primary Copy'
when 1 then 'Mirrored Copy'
when 2 then '2nd Mirrored Copy or metadata'
else 'Unknown' END TYPE
from x$kffxp
--where
--number_kffxp=295
--and xnum_kffxp!=65534
order by 2;

On SLES10:
# /etc/init.d/boot.udev stop
# /etc/init.d/boot.udev start

On RHEL5/OEL5/OL5:
# /sbin/udevcontrol reload_rules
# /sbin/start_udev
On RHEL6/OL6:
#/sbin/udevadm control --reload-rules
#/sbin/start_udev

# UDEV 99-oracle-asmdevices.rules
KERNEL=="sdb1", SUBSYSTEM=="block", NAME="asm1", OWNER="grid", GROUP="dba", MODE="0660"
KERNEL=="sdc1", SUBSYSTEM=="block", NAME="asm2", OWNER="grid", GROUP="dba", MODE="0660"
KERNEL=="sdd1", SUBSYSTEM=="block", NAME="asm3", OWNER="grid", GROUP="dba", MODE="0660"
KERNEL=="sde1", SUBSYSTEM=="block", NAME="asm4", OWNER="grid", GROUP="dba", MODE="0660"
KERNEL=="sdf1", SUBSYSTEM=="block", NAME="asm5", OWNER="grid", GROUP="dba", MODE="0660"
KERNEL=="sdg1", SUBSYSTEM=="block", NAME="asm6", OWNER="grid", GROUP="dba", MODE="0660"

mkdir -p /u01/app/12.1.0/grid
mkdir -p /u01/app/grid
mkdir -p /u01/app/oracle
chown -R grid:oinstall /u01
chown oracle:oinstall /u01/app/oracle
chmod -R 775 /u01/

Shared_pool_size=128M
Db_cache_size=256M
Processes=100
Asm_power_limit=5
_asm_ausize
_asm_stripesize
create diskgroup DATA external redundancy disk
'/dev/rdsk/c3t19d16s4',
'/dev/rdsk/c3t19d17s4',
'/dev/rdsk/c3t19d18s4';
localconfig add

ksh
cd /dev
for d in `ls asm*`
do
kfed read $d > /tmp/$d.kfed
done

<ASM Oracle Home>/bin/amdu -diskstring '/dev/oracleasm/disks/*' -dump 'DR_ARCHIVE'

alter diskgroup...check all repair
alter diskgroup...check all norepair
Bug 5100163 - ASM diskgroup created in 10g can have unformatted blocks - may become unmountable (文档 ID 5100163.8)

select COMPATIBILITY,DATABASE_COMPATIBILITY from v$asm_diskgroup;

alter DISKGROUP data SET ATTRIBUTE 'COMPATIBLE.ASM' = '11.2.0.0.0';
alter DISKGROUP data SET ATTRIBUTE 'COMPATIBLE.RDBMS' = '11.2.0.0.0';
ALTER DISKGROUP data SET ATTRIBUTE 'access_control.enabled' = 'true';
ALTER DISKGROUP data SET ATTRIBUTE 'access_control.umask' = '026';

srvctl modify database -d yxscdb -a "DATA1,DATA1,DATA3"


/************************************************************
--Flex:
************************************************************/
asmcmd showclustermode
srvctl status asm -detail
srvctl config asm
srvctl modify asm -count 4

crsctl get cluster mode status
crsctl set cluster mode flex
crsctl stop crs
crsctl start crs -wait

[root@seed bin]# ./crsctl get cluster mode status
Cluster is running in "standard" mode
[grid@rac1 bin]$ ./asmcmd showclustermode
Connected to an idle instance.
ASM cluster : Flex mode disabled


[root@seed grid]# crsctl get cluster mode status
Cluster is running in "standard" mode
[grid@rac1 ~]$ asmcmd showclustermode
ASM cluster : Flex mode enabled

/************************************************************
--AUDIT:
************************************************************/
DB/TRUE:启动审计功能,并且把审计结果存放在数据库的 SYS.AUD$ 表中
OS:启动审计功能,并把审计结果存放在操作系统的审计信息中,需要修改参数AUDIT_FILE_DEST
DB_EXTENDED:具有DB/TRUE的功能,另外填写AUD$的SQLBIND和SQLTEXT字段
NONE/FALSE:关闭审计功能

SQLPLUS> @$ORACLE_HOME/rdbms/admin/cataudit.sql

select * from sys.aud$;
select * from all_def_audit_opts;
select * from dba_obj_audit_opts;
select * from dba_stmt_audit_opts;
select username,priv_used,ses_actions from dba_audit_object;
SELECT action_name,obj_name,sessionid,username,ses_actions,timestamp from dba_audit_trail;

The SES_ACTIONS column is a session summary—a string of 16 characters, one for each action type in the order ALTER, AUDIT,
COMMENT, DELETE, GRANT, INDEX, INSERT, LOCK, RENAME, SELECT, UPDATE, REFERENCES, EXECUTE, READ. (Positions 15, and 16 are
reserved for future use). The characters are: - for none, S for success, F for failure, and B for both.

If user had auditing set up BY SESSION and we see that the ACTION_NAME appears as 'SESSION REC' in each case, which shows
that auditing is being done at the session level.

If user had auditing set at the access level, and here we see a listing of every insert and delete, along with the timestamp
of the action, even though several inserts were performed on the same table in the same session.

You can see that you get much more detail with BY ACCESS auditing. But, of course, there is a price to pay. Every audited
action adds a row to the AUD$ table (which DBA_AUDIT_TRAIL is a view of), and this table will get huge if there is a lot of
audited activity. So, it will need to be watched and probably pruned occasionally.


--CHANGE DEFAULT TABLESPACE(SYSTEM)
sql>connect / as sysdba;
sql>alter table aud$ move tablespace <new tablespace>;
sql>alter index I_aud1 rebuild online tablespace <new tablespace>;
SQL> alter table audit$ move tablespace <new tablespace>;
SQL> alter index i_audit rebuild online tablespace <new tablespace>;
SQL> alter table audit_actions move tablespace <new tablespace>;
SQL> alter index i_audit_actions rebuild online tablespace <new tablespace>;

SQL> connect system/manager
SQL> grant audit system to scott;
SQL> connect scott/tiger
SQL> audit session;
SQL> noaudit session;

AUDIT ROLE;
AUDIT ROLE WHENEVER SUCCESSFUL;
AUDIT ROLE WHENEVER NOT SUCCESSFUL;
AUDIT SELECT TABLE, UPDATE TABLE;
AUDIT SELECT TABLE, UPDATE TABLE BY hr, oe;
AUDIT DELETE ANY TABLE;
AUDIT CREATE ANY DIRECTORY;
AUDIT DIRECTORY;
AUDIT READ ON DIRECTORY bfile_dir;
AUDIT SELECT ON hr.employees;
AUDIT SELECT ON hr.employees WHENEVER SUCCESSFUL;
AUDIT SELECT ON hr.employees WHENEVER NOT SUCCESSFUL;
AUDIT INSERT, UPDATE ON oe.customers;
AUDIT ALL ON hr.employees_seq;
AUDIT ALTER, GRANT, INSERT, UPDATE, DELETE ON DEFAULT;
AUDIT TABLE;
AUDIT SESSION;

AUDIT INSERT ON TEST.TEST BY SESSION;
AUDIT SELECT ON TEST.TEST BY ACCESS;
AUDIT SELECT TABLE, UPDATE TABLE BY scott, blake;

AUDIT DROP ANY TABLE;
AUDIT DROP ANY TABLE BY TEST;
AUDIT CREATE VIEW;
AUDIT TRUNCATE TABLE;
AUDIT ALL STATEMENTS BY MMOG BY ACCESS WHENEVER SUCCESSFUL;


/************************************************************
--FGA:
************************************************************/
BEGIN
DBMS_FGA.ADD_POLICY (
object_schema => 'PERSONAL',
object_name => 'CMP_REIMBURSE_PAY_INSTR',
policy_name => 'AUDIT_CMP_REIMBURSE_PAY_INSTR',
audit_condition => NULL,
audit_column => 'TRANSTATS,TRXSQNB',
handler_schema => NULL,
handler_module => NULL,
enable => TRUE,
statement_types => 'UPDATE');
END;
/

BEGIN
dbms_fga.drop_policy(object_schema =>'PERSONAL',
object_name =>'CMP_REIMBURSE_PAY_INSTR',
policy_name =>'AUDIT_CMP_REIMBURSE_PAY_INSTR');
END;
/

BEGIN
DBMS_FGA.ADD_POLICY(
object_schema => 'T',
object_name => 'TT',
policy_name => 'chk_t',
audit_condition => 'sys_context(''userenv'',''session_user'')=''TT''',
enable => TRUE,
statement_types => 'INSERT, UPDATE, SELECT, DELETE',
audit_trail => DBMS_FGA.DB);
END;
/

begin
DBMS_FGA.DROP_POLICY(
object_schema => 'T',
object_name => 'T',
policy_name => 'chk_t');
end;
/

SELECT POLICY_NAME FROM DBA_AUDIT_POLICIES;
SELECT DBUID, LSQLTEXT FROM SYS.FGA_LOG$ WHERE POLICYNAME='';
select * from DBA_FGA_AUDIT_TRAIL;


/************************************************************
--ORACLE TEXT:
************************************************************/
Select pre_name, pre_object from ctx_preferences;

 

/************************************************************
--OEM AND GRID CONTROL:
************************************************************/
emca -deconfig dbcontrol db -repos drop -cluster
emca -config dbcontrol db -repos create -cluster
emctl unsecure dbconsole
emctl secure dbconsole
Enter Enterprise Manager Root password : ******
Enter a Hostname for this OMS :
emctl start dbconsole

$ORACLE_HOME\db_1\$HOSTNAME\sysman\config\emd.properties
$ORACLE_HOME\db_1\sysman\admin\supportedtzs.lst

emctl resetTZ agent
emctl config agent getTZ

agentTZRegion=Asia/Shanghai

exec mgmt_admin.cleanup_agent('kdyya1:3872');


创建一个EM资料库
emca -repos create
重建一个EM资料库
emca -repos recreate
删除一个EM资料库
emca -repos drop
配置数据库的 Database Control
emca -config dbcontrol db
删除数据库的 Database Control配置
emca -deconfig dbcontrol db
重新配置db control的端口,默认端口在1158
emca -reconfig ports
emca -reconfig ports -dbcontrol_http_port 1160
emca -reconfig ports -agent_port 3940
先设置ORACLE_SID环境变量后,启动EM console服务

配置dbconsole的步骤
emca -repos create
emca -config dbcontrol db
emctl start dbconsole
重新配置dbconsole的步骤
emca -repos drop
emca -repos create
emca -config dbcontrol db
emctl start dbconsole


/************************************************************
--OS DIAGNOSE:
************************************************************/

*** CTUDOSE 10/30/12 02:51 pm ***
@ Therefore, the action plan will change into:
@ 1) truss/strace the startup session by using the provided script.
@ #!/bin/sh
@ while [ -z "$prc" ]; do
@ prc=`ps -f -C oracle | grep oracleSTS3 | awk '{print $2}'`
@ done
@ truss -f -o /tmp/truss.log -p $prc
@ .
@ 2. Please also truss the MMON process when it starts.
@ .
@ $ps -ef | grep ora_mmon_STS3
@ $truss -eafo /tmp/mmon.out -p <pid of mmon process for STS3 instance>
@ .
@ 3. collect the systemstate dump at level 266 for any hang seen.
@ .
@ oradebug setmypid
@ oradebug unlimit
@ oradebug -g all hanganalyze 1
@ oradebug -g all dump systemstate 267
@ <wait for 1 min>
@ oradebug -g all hanganalyze 1
@ oradebug -g all dump systemstate 267
@ <wait for 1 min>
@ oradebug -g all hanganalyze 1
@ oradebug -g all dump systemstate 267
@ .
@ 4. we should also collect the OS message logs
@ .
@ $errpt -a >> errpt.log

--WINDOWS COMMAND
for /L %G in (1,1,50) do start /b sqlplus test/test@orcl

truss -o ls.truss ls -al: 跟踪ls -al的运行,将输出信息写到文件/tmp/ls.truss中。
strace -f -o vim.strace vim: 跟踪vim及其子进程的运行,将输出信息写到文件vim.strace。
ltrace -p 234: 跟踪一个pid为234的已经在运行的进程。

truss -f -d sqlplus / as sysdba

truss -af sqlplus usernane/password@service

[oracle@rac1 ~]$ strace sqlplus "/ as sysdba"
execve("/home/oracle/10.2.0.4/db/bin/sqlplus", ["sqlplus", "/ as sysdba"], [/* 33 vars */]) = 0
brk(0) = 0x18dba000
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b2c62655000
uname({sys="Linux", node="rac1", ...}) = 0
access("/etc/ld.so.preload", R_OK) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/tls/x86_64/libsqlplus.so", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/home/oracle/10.2.0.4/db/lib/tls/x86_64", 0x7fff48451080) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/tls/libsqlplus.so", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/home/oracle/10.2.0.4/db/lib/tls", 0x7fff48451080) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/x86_64/libsqlplus.so", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/home/oracle/10.2.0.4/db/lib/x86_64", 0x7fff48451080) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/libsqlplus.so", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\0\252\2\0\0\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=997409, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b2c62656000
mmap(NULL, 2043464, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x2b2c62657000
mprotect(0x2b2c6273d000, 1048576, PROT_NONE) = 0
mmap(0x2b2c6283d000, 53248, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0xe6000) = 0x2b2c6283d000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libclntsh.so.10.1", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\340F&\0\0\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0750, st_size=21188349, ...}) = 0
mmap(NULL, 21949888, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x2b2c6284a000
mprotect(0x2b2c63a7a000, 2093056, PROT_NONE) = 0
mmap(0x2b2c63c79000, 675840, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x122f000) = 0x2b2c63c79000
mmap(0x2b2c63d1e000, 110016, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x2b2c63d1e000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libnnz10.so", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\200\275\f\0\0\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0644, st_size=3796601, ...}) = 0
mmap(NULL, 4848040, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x2b2c63d39000
mprotect(0x2b2c64028000, 1044480, PROT_NONE) = 0
mmap(0x2b2c64127000, 724992, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x2ee000) = 0x2b2c64127000
mmap(0x2b2c641d8000, 2472, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x2b2c641d8000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib/tls/x86_64/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/lib/tls/x86_64", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/lib/tls/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/lib/tls", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/lib/x86_64/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/lib/x86_64", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/lib/libdl.so.2", O_RDONLY) = 3
read(3, "\177ELF\1\1\1\0\0\0\0\0\0\0\0\0\3\0\3\0\1\0\0\0P\332`\0004\0\0\0"..., 832) = 832
close(3) = 0
stat("/lib", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0
open("/usr/lib/tls/x86_64/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/lib/tls/x86_64", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/usr/lib/tls/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/lib/tls", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/usr/lib/x86_64/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/lib/x86_64", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/usr/lib/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/lib", {st_mode=S_IFDIR|0755, st_size=65536, ...}) = 0
open("/usr/local/lib/tls/x86_64/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/local/lib/tls/x86_64", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/tls/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/local/lib/tls", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/x86_64/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/local/lib/x86_64", 0x7fff48450ff0) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/libdl.so.2", O_RDONLY) = -1 ENOENT (No such file or directory)
stat("/usr/local/lib", {st_mode=S_IFDIR|0755, st_size=4096, ...}) = 0
open("/etc/ld.so.cache", O_RDONLY) = 3
fstat(3, {st_mode=S_IFREG|0644, st_size=145661, ...}) = 0
mmap(NULL, 145661, PROT_READ, MAP_PRIVATE, 3, 0) = 0x2b2c641d9000
close(3) = 0
open("/lib64/libdl.so.2", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\20\16@b9\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=23360, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b2c641fd000
mmap(0x3962400000, 2109696, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x3962400000
mprotect(0x3962402000, 2097152, PROT_NONE) = 0
mmap(0x3962602000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x2000) = 0x3962602000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libm.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/libm.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib/libm.so.6", O_RDONLY) = 3
read(3, "\177ELF\1\1\1\0\0\0\0\0\0\0\0\0\3\0\3\0\1\0\0\0\20\344\\\0004\0\0\0"..., 832) = 832
close(3) = 0
open("/usr/lib/libm.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/libm.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib64/libm.so.6", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0`>\200b9\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=615136, ...}) = 0
mmap(0x3962800000, 2629848, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x3962800000
mprotect(0x3962882000, 2093056, PROT_NONE) = 0
mmap(0x3962a81000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x81000) = 0x3962a81000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libpthread.so.0", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/libpthread.so.0", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib/libpthread.so.0", O_RDONLY) = 3
read(3, "\177ELF\1\1\1\0\0\0\0\0\0\0\0\0\3\0\3\0\1\0\0\0000\210_\0004\0\0\0"..., 832) = 832
close(3) = 0
open("/usr/lib/libpthread.so.0", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/libpthread.so.0", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib64/libpthread.so.0", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\200W\300b9\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=145592, ...}) = 0
mmap(0x3962c00000, 2204528, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x3962c00000
mprotect(0x3962c16000, 2093056, PROT_NONE) = 0
mmap(0x3962e15000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x15000) = 0x3962e15000
mmap(0x3962e17000, 13168, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x3962e17000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libnsl.so.1", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/libnsl.so.1", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib/libnsl.so.1", O_RDONLY) = 3
read(3, "\177ELF\1\1\1\0\0\0\0\0\0\0\0\0\3\0\3\0\1\0\0\0 \301\227\0004\0\0\0"..., 832) = 832
close(3) = 0
open("/usr/lib/libnsl.so.1", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/libnsl.so.1", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib64/libnsl.so.1", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0\240@\200i9\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=114352, ...}) = 0
mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x2b2c641fe000
mmap(0x3969800000, 2194096, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x3969800000
mprotect(0x3969815000, 2093056, PROT_NONE) = 0
mmap(0x3969a14000, 8192, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x14000) = 0x3969a14000
mmap(0x3969a16000, 6832, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x3969a16000
close(3) = 0
open("/home/oracle/10.2.0.4/db/lib/libc.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/home/oracle/10.2.0.4/db/lib/libc.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib/libc.so.6", O_RDONLY) = 3
read(3, "\177ELF\1\1\1\0\0\0\0\0\0\0\0\0\3\0\3\0\1\0\0\0\320\257I\0004\0\0\0"..., 832) = 832
close(3) = 0
open("/usr/lib/libc.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/usr/local/lib/libc.so.6", O_RDONLY) = -1 ENOENT (No such file or directory)
open("/lib64/libc.so.6", O_RDONLY) = 3
read(3, "\177ELF\2\1\1\0\0\0\0\0\0\0\0\0\3\0>\0\1\0\0\0p\332\1b9\0\0\0"..., 832) = 832
fstat(3, {st_mode=S_IFREG|0755, st_size=1713088, ...}) = 0
mmap(0x3962000000, 3494168, PROT_READ|PROT_EXEC, MAP_PRIVATE|MAP_DENYWRITE, 3, 0) = 0x3962000000
mprotect(0x396214c000, 2097152, PROT_NONE) = 0
mmap(0x396234c000, 20480, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_DENYWRITE, 3, 0x14c000) = 0x396234c000
mmap(0x3962351000, 16664, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_FIXED|MAP_ANONYMOUS, -1, 0) = 0x3962351000
close(3) = 0


/************************************************************
--ORACLE TOOLS:
************************************************************/
--copy
sqlplus -S /nolog >> result.log < set heading off feedback off pagesize 0 verify off echo off arrarysize 5000 copycommit 2
conn system/AZF5t5m4
copy from sgpms_cx/cxtest@yxdb to sgpm/aoeuid1@sgpms append A_RCVED_FLOW using select * from A_RCVED_FLOW
exit
EOF

select dbms_utility.data_block_address_file(16799532) "file",
dbms_utility.data_block_address_block(16799532) "block"
from dual;

--FIND THE OBJECT FROM THE SEGMENTS/EXTENTS:
SELECT SEGMENT_NAME, SEGMENT_TYPE FROM DBA_EXTENTS
WHERE FILE_ID = <file_number> and <block_number> BETWEEN BLOCK_ID
AND BLOCK_ID + BLOCKS - 1;

SELECT DBMS_ROWID.ROWID_BLOCK_NUMBER(ROWID),DBMS_ROWID.ROWID_RELATIVE_FNO(ROWID) FROM T_DUMP;


/******expdp and impdp******/
set lines 150 pages 100 numwidth 7
col program for a38
col username for a10
col spid for a7
select to_char(sysdate,'YYYY-MM-DD HH24:MI:SS') "DATE", s.program, s.sid, s.serial#,
s.status, s.username, d.job_name, p.spid, s.serial#, p.pid
from v$session s, v$process p, dba_datapump_sessions d
where p.addr=s.paddr and s.saddr=d.saddr;

expdp system/system directory=expdp_dir network_link=rgst dumpfile=rgst.dmp logfile=rgst.log parallel=4 full=y
expdp system/system directory=expdp_dir network_link=scr dumpfile=scr.dmp logfile=scr.log parallel=4 full=y

expdp test/test directory=wkdir dumpfile=test1.dmp logfile=test1.log tables=test trace=480300
expdp test/test directory=wkdir dumpfile=test1.dmp logfile=test1.log METRICS=y
impdp \'/ as sysdba\' directory=expdir network_link=nddz schemas=og TABLE_EXISTS_ACTION=skip
impdp \'/ as sysdba\' directory=expdir network_link=nddz tables=og.GOODS_ROLELOCK_DETAIL query=og.GOODS_ROLELOCK_DETAIL:\"WHERE recdate\<to_date\(\'20110401\',\'yyyymmdd\'\)\" CONTENT=DATA_ONLY TABLE_EXISTS_ACTION=APPEND

/******ORA-00600 Troublshooter******/
Note.561152.1 P1-914CU. Int/Pub Testing version (for ZR project) of ORA-600/7445 lookup (153788.1)
Note.153788.1 P1-914CU. Ext/Pub Troubleshoot an ORA-600 or ORA-7445 Error Using the Error Lookup Tool
Note.7445.1 P1-914CU. Ext/Pub Troubleshoot an ORA-600 or ORA-7445 Error Using the Error Lookup Tool
Note.600.1 P1-914CU. Ext/Pub Troubleshoot an ORA-600 or ORA-7445 Error Using the Error Lookup Tool

./route.pl -m 1 -v 10.2 /route/hnyyb1_ora_15575.trc

--relink:
To relink Oracle executables:
1. Verify whether ORACLE_HOME is correctly set.
2. Set LD_LIBRARY_PATH to include
$ORACLE_HOME/lib.
3. Execute the appropriate relink command:
– relink all (relinks all Oracle executables)
– relink oracle (relinks the database executables)
– relink network (relinks listener, cman, names)
– relink oemagent (relinks the Intelligent Agent)
– relink utilities (relinks sqlplus, imp, exp)
– relink ctx (relinks Oracle Text)

Using the Oracle Relink Utility
Oracle Corporation does not distribute complete executables. Instead, it distributes
individual object files, library archives of object files, and some source files which then get
“linked” at the operating system level during installation to create usable executables. This
guarantees a reliable integration with functions provided by the OS system libraries. If the
OS or Oracle object files change, then Oracle executables should be “relinked” to create
updated executables that are properly integrated with the operating system.
Relinking occurs automatically when a patch set is installed, but must be performed
manually when:
? An OS upgrade has occurred
? A change has been made to the OS system libraries. This can occur during the
application of an OS patch.
? A new install failed during the relinking phase
? An individual Oracle patch has been applied (explicit relink instructions are usually
included in README or integrated into the patch install script)
To relink, you may use the relink utility as shown in the slide, or manually relink with the
make command. Unlike OPatch, the relink utility does not return positive feedback. Unless
you receive an error message, the relink succeeded.


/************************************************************
--SOME UTILITIES:
************************************************************/
--materialized view:
FROM $ORACLE_HOME/rdbms/admin/utlxmv.sql to create MV_CAPABILITIES_TABLE
EXEC dbms_mview.explain_mview('your mv name', '111');
SELECT capability_name, possible, related_text,msgtxt
FROM mv_capabilities_table
WHERE statement_id = '111' ORDER BY seq;

--query rewitre:
FROM @?/rdbms/admin/utlxrw.sql to create REWRITE_TABLE
exec dbms_mview.explain_rewrite(query_text,'MV_C','111');
SELECT message FROM rewrite_table WHERE statement_id = '111'
ORDER BY sequence;

select distinct
t2.spid,
t1.sid,
t1.serial#,
t5.event,
t1.machine,
t4.value CPU_USAGE,
t1.osuser,
t1.username,
t1.logon_time,
t1.status,
'alter system kill session '||chr(39)||t1.sid||','||t1.serial#||chr(39)||';',
t3.hash_value,
t3.sql_text
from v$session t1,
v$process t2,
v$sql t3,
v$sesstat t4,
v$session_wait t5
where t1.paddr = t2.addr and
t1.sql_hash_value = t3.hash_value and
t1.sid = t4.sid and
t1.sql_address = t3.address and
t1.sid = t5.sid and
t1.username is not null and
t1.status = 'ACTIVE' and
t4.statistic# = 12
-- and t1.sid in (72,80)
-- and t2.spid = '4157462'
-- and t1.username = 'SYS'
-- and t5.event not like '%message%'
-- and t5.event not like '%Net%'
-- and t5.event not like '%pipe%'
order by to_number(t2.spid);


/************************************************************
--ALERT ERROR:
************************************************************/
COL REASON FORMAT A30
COL OBJECT_TYPE FORMAT A20
COL CREATION_TIME FORMAT A30
COL TIME_SUGGESTED FORMAT A30
COL SUGGESTED_ACTION FORMAT A30
SELECT OBJECT_TYPE,MESSAGE_TYPE,REASON,CREATION_TIME,TIME_SUGGESTED,SUGGESTED_ACTION FROM DBA_OUTSTANDING_ALERTS;

Block recover would occur during the following Circumstance
> If a user issue blockrecover command from Rman
> If a Server process fails and it finds the block to be corrupt in memory, and its a Current block it will try to get the Good
image of the block and do a recovery from Current redo
> If there is some error reported while modifying the block and after repeated attempt its not able to modify the block due to
OS related errors or session exceeded

 


/************************************************************
--FLASHBACK:
************************************************************/
SYS.SMON_SCN_TIME http://www.itpub.net/thread-612742-1-1.html
SELECT FLASHBACK_ON FROM V$DATABASE;
SHOW PARAMETER DB_FLASHBACK_RETENTION_TARGET;
SELECT * FROM FLASHBACK_TRANSACTION_QUERY;
SELECT * FROM V$FLASHBACK_DATABASE_STAT;
SELECT * FROM V$FLASHBACK_DATABASE_LOG;
SELECT * FROM V$RECOVERY_FILE_DEST;

FLASHBACK TABLE TABLENAME TO SCN XXX;
FLASHBACK TABLE TEST TO TIMESTAMP TO_TIMESTAMP(??2005-05-07 15:00:00??,??YYYY-MM-DD HH24:MI:SS??);
FLASHBACK TABLE TEST_DROP TO BEFORE DROP;

SELECT * FROM TEST AS OF TIMESTAMP TO_TIMESTAMP('02-MAY-07 10.55.20.000000');
SELECT * FROM TEST VERSIONS BETWEEN TIMESTAMP TO_TIMESTAMP ('06172009:17:52:00','MMDDYYYY:HH24:MI:SS') AND TO_TIMESTAMP ('06172009:18:14:00','MMDDYYYY:HH24:MI:SS');
SELECT * FROM TEST AS OF TIMESTAMP(SYSTIMESTAMP-INTERVAL'2' DAY);

BACKUP RECOVERY AREA;
/*To free space in the FRA we could do take a backup of the Flash Recovery Area using the command BACKUP RECOVERY AREA.
This command will take the backup of all the files in the FRA to tape only. After this the space occupied by the files
in the FRA will be marked as reclaimable*/

ALTER SYSTEM SET DB_RECOVERY_FILE_DEST_SIZE=10G;
SELECT * FROM V$FLASH_RECOVERY_AREA_USAGE;
SELECT * FROM V$RECOVERY_FILE_DEST;

RMAN> crosscheck archivelog all;
RMAN> delete expired archivelog all;


/************************************************************
--DISTRIBUTED TRANSACTION:
************************************************************/
1.找到TRAN_ID
Select 'Rollback force '''||LOCAL_TRAN_ID||'''' from sys.pending_trans$;

2.將其COMMIT或ROLLBACK:
rollback force LOCAL_TRAN_ID;
commit force LOCAL_TRAN_ID;

3.如果执行失败,则强行从资料字典中删除该交易记录:
set transaction use rollback segment system;
delete from dba_2pc_pending where local_tran_id = LOCAL_TRAN_ID;
delete from pending_sessions$ where local_tran_id = LOCAL_TRAN_ID;


/************************************************************
--11G:
************************************************************/
Please provide me with a diagnostic package of the issue using the Automatic Diagnostic Repository Command Interface (ADRCI)

ADRCI is the new tool in 11g and gathers all diagnostic data and packages it into a ZIP file ready to upload to Oracle Support.

Steps :
1. With 11g ORACLE_HOME and PATH set,from the OS prompt enter
%]adrci
adrci tool is started.
2.
adrci> show home
diag/rdbms/v11/V11 (example)
This will list all the ADR homes. Note the home path for current database or instance
3.
set the home path (for exmple)
adrci> set homepath diag/rdbms/v11/V11
4.
adrci> show problem
This will list the most recent problems encountered by the database.
The Oracle Error (i.e. ORA-600, ORA-7445, ORA-1578) you are experiencing will be listed.
You will also see a PROBLEM_ID , Problem_key and Date associated with the problem.
5.
Package the trace files
adrci> ips pack problem <problem id> in /tmp
Package will be generated in /tmp.
6.
Upload this package to the SR .
Reference : 738732.1 - ADR How to Package Diagnostic Information in 11g


/************************************************************
--STREAMS:
************************************************************/
SELECT streams_pool_size_for_estimate, streams_pool_size_factor,
estd_spill_count, estd_spill_time, estd_unspill_count,
estd_unspill_time
FROM v$streams_pool_advice;

BEGIN
DBMS_AQADM.ALTER_QUEUE_TABLE (
queue_table => 'NY1_QUEUE_TABLE',
primary_instance => 1,
secondary_instance => 2) ;
end;
/

In 10.2 and above, the GV$STREAMS_TRANSACTION dynamic performance view can be used to assess any long running transactions when asynchronous
capture is used. An example query would be:
select streams_name "Streams Name",
streams_type "Streams Type",
xidusn||'.'||xidslt||'.'||xidsqn XID,
to_number (sysdate-last_message_time)*1440
"Time Since Last Message (min)"
from GV$STREAMS_TRANSACTION
where to_number (sysdate-last_message_time)*1440 >= 20
order by "Time Since Last Message (min)";
A typical alert log entry is similar to:

C001: long running txn detected, xid: 0x0001.001.00008b0f

Ignoring a Long-Running Transaction :

execute dbms_apply_adm.stop_apply('<apply_name>');
execute dbms_apply_adm.set_parameter
('<apply_name>','_ignore_transaction','<local_transaction_id>');
execute dbms_apply_adm.start_apply('<apply_name>');

exec dbms_capture_adm.stop_capture('CAPTURE_name');
exec dbms_capture_adm.set_parameter('CAPTURE_name',
'_ignore_transaction','trans_id ');
exec dbms_capture_adm.start_capture('CAPTURE_name');


/************************************************************
--INSTALLATIONS:
************************************************************/
oracle-rdbms-server-11gR2-preinstall
https://public-yum.oracle.com/public-yum-ol6.repo

rpm -q binutils compat-libstdc++ gcc glibc libaio libgcc libstdc++ \
make sysstat unixodbc

mkdir OL6U5Repo
mount -t iso9660 -o loop /net/oel/oel65x86-64/OracleLinux-R6-U5-Server-x86_64-dvd.iso /OL6U5Repo/
mount -t iso9660 -o loop /dbfs/FS1/OEL5.10_64.V40139-01.iso /media

cd /etc/yum.repos.d/

vi ol6u5.repo
[OracleLinux6U5]
NAME="Oracle Linux 6 Update 5"
BASEURL=file:///OL6U5Repo
ENABLED=1
GPGCHECK=0

yum update
yum install tigervnc-server -y

name=Oracle Linux $releasever Latest ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/latest/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=1

[public_ol6_ga_base]
name=Oracle Linux $releasever GA installation media copy ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/0/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0

[public_ol6_u1_base]
name=Oracle Linux $releasever Update 1 installation media copy ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/1/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0

[public_ol6_u2_base]
name=Oracle Linux $releasever Update 2 installation media copy ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/2/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0

[public_ol6_u3_base]
name=Oracle Linux $releasever Update 3 installation media copy ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/3/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0

[public_ol6_u4_base]
name=Oracle Linux $releasever Update 4 installation media copy ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/4/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0

[public_ol6_u5_base]
name=Oracle Linux $releasever Update 5 installation media copy ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/5/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0

[public_ol6_UEK_latest]
name=Latest Unbreakable Enterprise Kernel for Oracle Linux $releasever ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/UEK/latest/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=$uek

[public_ol6_UEKR3_latest]
name=Unbreakable Enterprise Kernel Release 3 for Oracle Linux $releasever ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/UEKR3/latest/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=$uekr3

[public_ol6_UEK_base]
name=Unbreakable Enterprise Kernel for Oracle Linux $releasever ($basearch)
baseurl=http://public-yum.oracle.com/repo/OracleLinux/OL6/UEK/base/$basearch/
gpgkey=http://public-yum.oracle.com/RPM-GPG-KEY-oracle-ol6
gpgcheck=1
enabled=0


lsattr -El sys0|grep maxuproc
chdev -l sys0 -a maxuproc='5000'
lsuser -f root | grep capabilities
chuser capabilities=CAP_BYPASS_RAC_VMM,CAP_PROPAGATE,CAP_NUMA_ATTACH oracle
chuser capabilities=CAP_BYPASS_RAC_VMM,CAP_PROPAGATE,CAP_NUMA_ATTACH root
chuser data=’-1′ fsize='-1' core='-1' cpu='-1' rss=’-1′ stack=’-1′ nofiles='-1' oracle
chuser data=’-1′ fsize='-1' core='-1' cpu='-1' rss=’-1′ stack=’-1′ nofiles='-1' root

/****************************
vi /etc/security/limits
root:
nofiles = -1
fsize = -1
core = -1
cpu = -1
data = -1
rss = -1
stack = -1
oracle:
fsize = -1
core = -1
cpu = -1
data = -1
rss = -1
stack = -1
nofiles = -1
****************************/


/****************************
vmo -p -o lru_file_repage=0
vmo -p -o minperm%=10
vmo -p -o maxperm%=20
vmo -p -o maxclient%=15
vmo -p -o v_pinshm=0
vmo -p -o minfree=4096
vmo -p -o maxfree=8192
****************************/


/****************************
no -a | grep 'rfc1323'
no -a | grep 'ipqmaxlen'
no -a | grep 'sb_max'
no -a | grep 'udp_sendspace'
no -a | grep 'udp_recvspace'
no -a | grep 'tcp_sendspace'
no -a | grep 'tcp_recvspace'
-----------------------------
no -p -o ipqmaxlen=512
no -p -o rfc1323=1
no -p -o sb_max=41943040
no -p -o udp_recvspace=20971520
no -p -o udp_sendspace=2097152
no -p -o tcp_sendspace=262144
no -p -o tcp_recvspace=262144
****************************/


/****************************
# smitty aio
maxreqs 16384 Maximum number of REQUESTS True
Select change / Show Characteristics of Asynchronous I/O
Set STATE to “available”
maxreqs=16384
maxservers=300
minservers=150
****************************/


/****************************
instfix -i | grep 'All filesets for IY'
lslpp -l
-----------------------------
bos.cifs_fs.rte (建议客户安装该包,否则Oracle安装时会报错。)
bos.adt.base
bos.adt.lib
bos.adt.libm
bos.perf.perfstat
bos.perf.libperfstat
bos.perf.proctools
rsct.basic.rte (RAC only , V2.4.7.3 + IZ01838或者V2.4.8.1 以及更高版本)
rsct.compat.clients.rte (For RAC only)
xlC.aix50.rte:7.0.0.4(或更新的版本)
xlC.rte:7.0.0.1(或更新的版本)
****************************/


/****************************
hagsuser
/etc/group
lsgroup ALL
lslpp -l cluster.*
****************************/


/****************************
/etc/hosts
ifconfig -l
ifconfig –a
lsattr -El en0
****************************/


/****************************
vi /etc/hosts.equiv
vi $HOME/.rhosts (root and oracle)
****************************/


/****************************
ORACLE_BASE=/oracle/app/oracle
ORACLE_HOME=$ORACLE_HOME/product/10.2.0/db
ORA_CRS_HOME=$ORACLE_HOME/product/10.2.0/crs
ORACLE_SID=oraIPS1
ORACLE_TERM=vt100
ORACLE_OWNER=oracle
export ORACLE_BASE ORACLE_HOME ORA_CRS_HOME ORACLE_SID ORACLE_TERM ORACLE_OWNER

LD_LIBRARY_PATH=/oracle/app/oracle/product/10.2.0/db/lib32:/oracle/app/oracle/product/10.2.0/db/lib
NLS_LANG=American_America.ZHS16GBK
ORA_NLS33=/oracle/app/oracle/product/10.2.0/db/nls/data
PATH=/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:/usr/java14/jre/bin:/usr/java14/bin:/usr/bin:/etc:/usr/sbin:/usr/ucb:/usr/bin/X11:/sbin:/usr/java14/jre/bin:/usr/java14/bin:/oracle/app/oracle/product/10.2.0/db/OPatch:/oracle/app/oracle/product/10.2.0/db/bin:/oracle/app/oracle/product/10.2.0/crs/bin:/bin:/usr/ccs/bin:/usr/bin/X11:.:/oracle/app/oracle/product/10.2.0/db/OPatch:/oracle/app/oracle/product/10.2.0/db/bin:/oracle/app/oracle/product/10.2.0/crs/bin:/bin:/usr/ccs/bin:/usr/bin/X11:.
TMP=/tmp
TMPDIR=/tmp
export LD_LIBRARY_PATH NLS_LANG ORA_NLS33 PATH TMP TMPDIR OPATCH_PLATFORM_ID
export AIXTHREAD_SCOPE=S
umask 022
export DISPLAY=22.188.216.71:0.0
set -o vi
export EDITOR=vi
****************************/

--LONG OPS:
SELECT s.sid,
s.serial#,
s.machine,sl.opname,sl.target,sl.message,s.program,sl.sql_hash_value,
TRUNC(sl.elapsed_seconds/60) || ':' || MOD(sl.elapsed_seconds,60) elapsed,
TRUNC(sl.time_remaining/60) || ':' || MOD(sl.time_remaining,60) remaining,
ROUND(sl.sofar/sl.totalwork*100, 2) progress_pct
FROM v$session s,
v$session_longops sl
WHERE s.sid = sl.sid
AND s.serial# = sl.serial#
AND sl.totalwork>0
AND sl.sofar<>sl.totalwork;

 

 

/************************************************************
--ORACLE CONCEPT:
************************************************************/
会触发检查点的情况哪种情况是完全检查点)
1.当发生日志组切换的时候
2.当符合LOG_CHECKPOINT_TIMEOUT,LOG_CHECKPOINT_INTERVAL,fast_start_io_target,fast_start_mttr_target参数设置的时候
3.当运行ALTER SYSTEM SWITCH LOGFILE的时候
4.当运行ALTER SYSTEM CHECKPOINT的时候
5.当运行alter tablespace XXX begin backup,end backup的时候
6.当运行alter tablespace ,datafile offline的时候;


LGWR写的触发条件
1)每三秒钟
2)log buffer 用了三分之一时候
3)commit
4)检查点
5)DBWn写的时候

# control file block size
select cfbsz from X$kcccf;

# redo file block size
select lebsz from X$kccle;


IOPS

? The standard unit of measurement for I/O operations per second.
Includes all reads and writes.
? This is how you rate a I/O stack components ability to process small
block random I/O requests.
? Used to describe I/O rate driven applications (OLTP, random I/O )
Mbytes/s


? Mega”Bytes” per sec
? Used to measure large block sequential transfer rates, with no
response time characterization
? Used to describe data rate driven applications (DSS, OLAP)
? Transaction/s – its anything you claim it to be.

Oracle query to gather this data from your database:

set lines 250
set pages 9999
spool sysmetric_outp.log
alter session set nls_date_format=’dd-mm-yyyy hh24:mi’;
select min(begin_time), max(end_time),
sum(case metric_name when ‘Physical Read Total Bytes Per Sec’ then average end)
Physical_Read_Total_Bps,
sum(case metric_name when ‘Physical Write Total Bytes Per Sec’ then average end)
Physical_Write_Total_Bps,
sum(case metric_name when ‘Redo Generated Per Sec’ then average end)
Redo_Bytes_per_sec,
sum(case metric_name when ‘Physical Read Total IO Requests Per Sec’ then average end)
Physical_Read_IOPS,
sum(case metric_name when ‘Physical Write Total IO Requests Per Sec’ then average end)
Physical_write_IOPS,
sum(case metric_name when ‘Redo Writes Per Sec’ then average end) Physical_redo_IOPS,
sum(case metric_name when ‘Current OS Load’ then average end) OS_LOad,
sum(case metric_name when ‘CPU Usage Per Sec’ then average end)
DB_CPU_Usage_per_sec,
sum(case metric_name when ‘Host CPU Utilization (%)’ then average end) Host_CPU_util, –
NOTE 100% = 1 loaded RAC node
sum(case metric_name when ‘Network Traffic Volume Per Sec’ then average end)
Network_bytes_per_sec,
snap_id
from dba_hist_sysmetric_summary
group by snap_id
order by snap_id;
spool off

--WALLET:
--sqlnet.ora:
ENCRYPTION_WALLET_LOCATION=(SOURCE=(METHOD=FILE)(METHOD_DATA=(DIRECTORY=/home/oracle/wallet)))
alter system set encryption key identified by "wallet";
ALTER SYSTEM SET ENCRYPTION WALLET CLOSE IDENTIFIED BY "wallet";
ALTER SYSTEM SET ENCRYPTION WALLET OPEN IDENTIFIED BY "wallet";
alter table t modify (id encrypt);


--service
execute dbms_service.modify_service (service_name => 'ora11g' -
, aq_ha_notifications => true -
, failover_method => dbms_service.failover_method_basic -
, failover_type => dbms_service.failover_type_select -
, failover_retries => 180 -
, failover_delay => 5 -
, clb_goal => dbms_service.clb_goal_long);

srvctl add service -d ora11g -s ora11g -r ora11g1,ora11g2 -P basic -e select -m basic
srvctl modify service -d rgstdg -s dbo8i -P basic -e select -m basic


select a.table_owner, a.table_name, a.inserts,a.updates,a.deletes ,b.num_rows
from dba_tab_modifications a, dba_tables b
where a.table_name = b.table_name
and table_owner not in
('SYS', 'SYSTEM', 'SYSMAN', 'DMSYS', 'OLAPSYS', 'XDB',
'EXFSYS', 'CTXSYS', 'WMSYS', 'DBSNMP', 'ORDSYS',
'OUTLN', 'TSMSYS', 'MDSYS')
and inserts > 0 and partitioned='NO' and a.inserts/decode(b.num_rows,0,1,b.num_rows)>=0.1
or a.table_name = b.table_name
and table_owner not in
('SYS', 'SYSTEM', 'SYSMAN', 'DMSYS', 'OLAPSYS', 'XDB',
'EXFSYS', 'CTXSYS', 'WMSYS', 'DBSNMP', 'ORDSYS',
'OUTLN', 'TSMSYS', 'MDSYS')
and updates > 0 and partitioned='NO' and a.updates/decode(b.num_rows,0,1,b.num_rows)>=0.1 or
a.table_name = b.table_name
and table_owner not in
('SYS', 'SYSTEM', 'SYSMAN', 'DMSYS', 'OLAPSYS', 'XDB',
'EXFSYS', 'CTXSYS', 'WMSYS', 'DBSNMP', 'ORDSYS',
'OUTLN', 'TSMSYS', 'MDSYS')
and deletes > 0 and partitioned='NO' and a.deletes/decode(b.num_rows,0,1,b.num_rows)>=0.1 ;


# Partition tables:
create table test
(
dt date,
cc number(38,0),
zc number(38,0),
id number(38,0)
)
partition by range (dt)
interval(numtodsinterval(1,'day'))
SUBPARTITION BY list(CC)
SUBPARTITION TEMPLATE(
SUBPARTITION sp1 VALUES(1),
SUBPARTITION sp2 VALUES(2),
SUBPARTITION sp3 VALUES(9))
(
PARTITION SYS_001 VALUES LESS THAN (TO_DATE('20141101','yyyymmdd'))
);

numtoyminterval

/************************************************************
--INMEMORY :
************************************************************/
SELECT * from V$INMEMORY_AREA;
SELECT OBJD, TSN, ALLOCATED_LEN, NUM_ROWS, NUM_COLS FROM V$IM_HEADER;
SELECT INMEMORY, INMEMORY_PRIORITY, INMEMORY_COMPRESSION, INMEMORY_DISTRIBUTE, INMEMORY_DUPLICATE FROM USER_TABLES


--LINKS
http://www.juliandyke.com
http://www.orainternals.com
http://www.databasejournal.com
http://blog.tanelpoder.com/
http://psoug.org/reference/hints.html

http://www.jlcomp.demon.co.uk/
http://jonathanlewis.wordpress.com/
http://www.jlcomp.demon.co.uk/cbo_book/ind_book.html

--old version binary
sac.us.oracle.com

 

 


Here is explanation when data needs to spill to temporary tablespace.

For example, a sort operator uses a work area (sometimes called the sort area) to perform the in-memory sort of a set of rows. Similarly, a hash-join operator
uses a work area (also called the hash area) to build a hash table from its left input. If the amount of data to be processed by these two operators does not
fit into a work area, the input data is divided into smaller pieces. This enables some data pieces to be processed in memory while the rest are spilled to
temporary disk storage to be processed later. Although bitmap operators do not spill to disk when their associated work area is too small, their complexity is
inversely proportional to the size of their work area. Thus, these operators run faster with larger work area.

 


/************************************************************
--CHECK DB:
************************************************************/
spool current.lst

alter session set nls_date_format = 'DD-MON-YYYY HH24:MI:SS';
set feedback on
set heading on
set echo on
set pagesize 1000
set linesize 200
set numwidth 50
col checkpoint_change# format 999999999999999999999

select status, checkpoint_change#, fuzzy,
to_char(checkpoint_time, 'DD-MON-YYYY HH24:MI:SS') as checkpoint_time, count(*)
from v$datafile_header
group by status, checkpoint_change#, fuzzy, checkpoint_time
order by status, checkpoint_change#, fuzzy, checkpoint_time;

col current_scn format 999999999999999999999

select dbid, name, database_role, switchover_status, created, resetlogs_change#, resetlogs_time, open_mode, log_mode, checkpoint_change#,
controlfile_type, controlfile_change#, controlfile_time
from v$database;

select dbid,name, open_mode, controlfile_type, current_scn from v$database;

select dbid,name,open_mode, controlfile_type from v$database;

select dbid, name, created, resetlogs_change#, resetlogs_time, open_mode, log_mode, checkpoint_change#,
controlfile_type, controlfile_change#, controlfile_time from v$database;

select sum(bytes)/1024/1024 DB_IN_MG from v$datafile;


select * from v$database_incarnation;

col file# format 9999
col status format a10
col name format a75

select file#, status, name, tablespace_name, ts# from v$datafile_header where status <> 'ONLINE';

select file#, status, name, ts# from v$datafile where status <> 'ONLINE';

select file#, ts#, name, enabled, status, checkpoint_time from v$datafile order by file#;

select file#, creation_time, checkpoint_time, resetlogs_time from v$datafile_header order by file#;

select ts#, name from v$tablespace order by ts#;

col hxfil format 9999
col fhscn format 999999999999999999999999
col fhtmn format a30
col fhrba_Seq format 9999999999999
col FHBCP_THR format 9999
set numwidth 50

select hxfil FILE#, fhsta STATUS, fhscn SCN, fhrba_Seq SEQUENCE, fhtnm TABLESPACE, FHBCP_THR Thread from x$kcvfh;

select fhsta, count(*)
from x$kcvfh
group by fhsta
order by fhsta;

select min(fhrba_Seq), max(fhrba_Seq) from X$KCVFH;

select df.enabled, dh.error, dh.fuzzy, dh.status, dh.checkpoint_change#,
to_char(dh.checkpoint_time, 'DD-MON-YYYY HH24:MI:SS') as checkpoint_time,
count(*)
from v$datafile_header dh, v$datafile df
where dh.file# = df.file#
group by df.enabled, dh.error, dh.fuzzy, dh.status, dh.checkpoint_change#, dh.checkpoint_time
order by df.enabled, dh.checkpoint_change#, dh.checkpoint_time;


archive log list;
show parameter recover
show parameter control_file
show parameter name
show parameter cluster

alter session set nls_date_format = 'DD-MON-YYYY HH24:MI:SS';
col member format a75
col sequence format 99999
col group# format 99
col status format a15
col first_change# format 99999999999999999
col next_change# format 9999999999999999999
col members format 99
set linesize 250

select f.member, v.group#, v.sequence#, v.thread#, v.status from v$log v, v$logfile f
where v.group#=f.group# order by v.group#;

select * from v$log order by first_change#;

select * from v$logfile;

select * from v$backup where status = 'ACTIVE';

select * from v$recover_file;

col space_limit format 999999999999
col space_used format 999999999999
col SPACE_RECLAIMABLE format 999999999999

SELECT name, space_limit/1024/1024 LIMITmg, space_used/1024/1024 USEDmg, SPACE_RECLAIMABLE, NUMBER_OF_FILES FROM V$RECOVERY_FILE_DEST;

SELECT * FROM V$FLASH_RECOVERY_AREA_USAGE;

select * from V$FLASHBACK_DATABASE_LOG;

select * from x$krfblog;

archive log list;

show parameter flash
show parameter recover
show parameter undo

select flashback_on from v$database;

col name format a20
col scn format 999999999999999999
col database_incarnation# format 999
col guarantee_flashback_database format a3
col storage_size format 999999999999999

select name,scn,time,database_incarnation#,guarantee_flashback_database,storage_size from v$restore_point;

spool off

 

/************************************************************
--Force open:
************************************************************/
ACTION PLAN
============
The note below provides steps to force open a database when using Automatic Undo. Please remember that once you force open a database it is unsupported by oracle. You must export, create a new clean database with new files and import to become supported again.

Add the following events in the init.ora. Do not edit the spfile.
EVENT="10231 trace name context forever, level 10:10233 trace name context forever, level 10"

The events will skip corrupt blocks during export. Make sure to list any other events together with a colon (:) between each event, otherwise some may be missed.

When a block is skipped, any data in the block is totally ignored.

Please refer to Note:33405.1 Extracting Data from a Corrupt Table using SKIP_CORRUPT_BLOCKS or Event 10231 for more information on the event.

ORA-8103 Troubleshooting, Diagnostic and Solution (Doc ID 268302.1)

To create a pfile from an spfile that you can edit and use for force open:

Example:

create pfile= '/tmp/initora11g.ora' from spfile;

SIDE Note: For a partial database restore/recover make sure to have the SYSTEM, UNDO, SYSAUX, and table's tablespace in place. These tablespaces are a required. You can OFFLINE DROP the other files that are a part of the original database.

AUTOMATIC UNDO
----------------
Note 283945.1 TECH: Summary For Forcing The Database Open With '_ALLOW_RESETLOGS_CORRUPTION` with Automatic Undo Management

Warning
~~~~~~~
The following instructions should only be used under the explicit direction
of Oracle Support. These steps should only be used when all other conventional
means of recovering the database have failed. Please note that there is no
guarantee that this method will succeed.

IF THE STEPS BELOW DO ALLOW YOU TO OPEN YOUR DATABASE THEN IT IS ESSENTIAL THAT
THE DATABASE BE REBUILT AS IT IS NO LONGER SUPPORTED. FAILURE TO DO SO MAY LEAD
TO DATA DICTIONARY INCONSISTENCIES, INTERNAL ERRORS AND CORRUPTIONS.

** Note: The steps here apply to Oracle 9i or higher and only and when Automatic
Undo Management is being used. **

Steps to attempt to force the database open:
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

1) Backup the database while the database is closed.

THE INSTRUCTIONS HERE ARE DESTRUCTIVE. YOU ARE STRONGLY ADVISED TO BACKUP THE DATABASE BEFORE PROCEEDING. IF YOU DO NOT DO THIS YOU MAY LOSE THE CHANCE TO TRY OTHER OPTIONS.


2) If your datafiles are from different points in time, it is best to try to use system tablespace datafiles at a similar timestamp to the OLDEST files you have online in the database. This reduces the chance that you will get problems during the bootstrap phase of opening the database.


3) Edit your *init<sid>.ora file to change undo_management and add two parameters.

* Change UNDO_MANAGEMENT=AUTO to

UNDO_MANAGEMENT=MANUAL

* Remove or comment out UNDO_TABLESPACE and UNDO_RETENTION.

* Add

_ALLOW_RESETLOGS_CORRUPTION = TRUE
_CORRUPTED_ROLLBACK_SEGMENTS =(comma separated list of Automatic Undo segments)

Example:

_corrupted_rollback_segments=(_SYSSMU1$, _SYSSMU2$, _SYSSMU3$, _SYSSMU4$, _SYSSMU5$, _SYSSMU6$, _SYSSMU7$, _SYSSMU8$, _SYSSMU9$, _SYSSMU10$, _SYSSMU11$, _SYSSMU12$, _SYSSMU13$, _SYSSMU14$, _SYSSMU15$, _SYSSMU16$, _SYSSMU17$, _SYSSMU18$, _SYSSMU19$, _SYSSMU20$, _SYSSMU21$)

You have to list EVERY undo segments known to your database in a hidden parameter (_corrupted_rollback_segments) - if you miss just one, then Oracle will attempt to use it for transaction recovery as soon as the open is done and the database will then crash.

Note, sometimes the alert log will tell you what Automatic Undo segments are in use. Search the alert log for SYSS. If the alert log does not contain that information then use _SYSSMU1$ through _SYSSMU10$ as shown in the example above.

In UNIX you can issue this command to get the undo segment names:

$ strings system01.dbf | grep _SYSSMU | cut -d $ -f 1 | sort -u

From the output of the strings command above, add a $ to end of each _SYSSMU undo segment name.

For windows, you can download the 'strings' command from here:

- http://technet.microsoft.com/en-us/sysinternals/bb897439.aspx

Grep can be found here:

- http://www.wingrep.com/resources/binaries/WindowsGrep23.exe

If you don't want to install these tools, you can either:

- If you have a unix box somewhere, transfer the SYSTEM datafile and run the commands there.
or
- Try setting the first 21 undo segments as listed above.

* If you only have a spfile available, you can from the closed, nomount or the mount stage create an init<sid>.ora file as follows:

SQL> CREATE PFILE FROM SPFILE;

Do NOT edit the SPFILE.


4) Invoke SQL*Plus, startup mount, check that correct init<sid>.ora was used and all datafiles are in the status of online or system.

$ sqlplus "/as sysdba"

SQL> startup mount pfile = (full path / file name to init<sid>.ora)

Confirm that the hidden parameters from step 3 were used:

SQL> show parameters corrupt

You should see both hidden parameters listed. If not, the wrong init<sid>.ora may have been modified. Do not continue until "show parameters corrupt" shows both hidden parameters.

SQL> show parameters undo

You should see undo management set to manual. If not, the wrong init<sid>.ora may have been modified. Do not continue until "show parameters undo" shows undo management as manual.

Check that all files you want to open with are listed as ONLINE or as SYSTEM.

SQL> select name, file#, status from v$datafile where status not in ('SYSTEM', 'ONLINE');

If any rows are returned from the query above, bring the file(s) online with:

SQL> ALTER DATABASE DATAFILE file# ONLINE;

and repeat until there are no files in an OFFLINE status.


5) Perform a fake incomplete recovery then open the database with resetlogs.

SQL> recover database until cancel;

or

SQL> recover database using backup controlfile until cancel;

WHEN PROMPTED FOR AN ARCHIVELOG FILE TYPE cancel THEN PRESS ENTER.

SQL> ALTER DATABASE OPEN RESETLOGS;

NOTE: EVEN IF YOU GET AN ERROR ON CANCEL THAT THE FILE NEEDS MORE MEDIA RECOVERY TRY TO OPEN ANYWAY.

6) If the database opens try selecting from a table. For example:

SQL> SELECT SYSDATE FROM DUAL;

If you get a row back the database is open and "functional". If you wish, you may try to select from a other tables to make sure the database is functional enough for the required export.

With the database open and functional you should attempt to export the database IMMEDIATELY. Once you have an export the database MUST be recreated from scratch. This means dropping and deleting ALL datafiles and creating a new database from scratch.

A database which has been opened in this way but not rebuilt will not be supported by Oracle. Any delay in exporting the contents or any attempt to use the system may cause irreparable damage.

NOTE: BE SURE TO REVERSE / REMOVE THE INIT.ORA PARAMETERS ADDED IN STEP 3 OTHERWISE YOU MAY ACCIDENTALLY CORRUPT ANY NEW DATABASE CREATED USING THE SAME INIT<sid>.ORA FILE.


set pagesize 20000
set linesize 180
set pause off
set serveroutput on
set feedback on
set echo on
set numformat 999999999999999
Spool recover.lst
select substr(name, 1, 50), status from v$datafile;
select controlfile_type from v$database;
select substr(name,1,50), recover, fuzzy, checkpoint_change# from v$datafile_header;
select * from v$backup;
select name, open_mode, checkpoint_change#, ARCHIVE_CHANGE# from v$database;
select GROUP#,THREAD#,SEQUENCE#,MEMBERS,ARCHIVED,STATUS,FIRST_CHANGE# from v$log;
select GROUP#,substr(member,1,60) from v$logfile;
select * from v$log_history;
select * from v$recover_file;
select * from v$recovery_log;
select HXFIL File_num,substr(HXFNM,1,40) File_name,FHTYP Type,HXERR Validity,
FHSCN SCN, FHTNM TABLESPACE_NAME,FHSTA status ,FHRBA_SEQ Sequence from X$KCVFH;
spool off


/************************************************************
--Reliable message :
************************************************************/
SR 3-9796047571 : much "reliable message" wait on Exadata X4-2
alter system set "_clusterwide_global_transactions"=false;
High rdbms ipc reply and DFS lock handle in 11gR2 RAC With XA Enabled Application (Doc ID 1361615.1)

select CHANNEL,sum(wait_count) sum_wait_count from GV$CHANNEL_WAITS
group by CHANNEL order by sum(wait_count);

Bug 19557279 : EXCESSIVELY HIGH RELIABLE MESSAGE
alter system set result_cache_max_size=0;
alter system set parallel_force_local=TRUE;
alter system set "_optimizer_ads_use_result_cache"=false;
@ if above parameter reduces these waits, you should apply below patch for
@ permanent fix
@ 18416368

very High Waits For 'Reliable Message' After Upgrade to 11.2.0.4 When Using Result Cache (文档 ID 1951729.1)

/************************************************************
--Random value form Enum :
************************************************************/
with temp as (
select id from
(select 1 id from dual union all
select 2 id from dual union all
select 9 id from dual)
order by dbms_random.value)
select * from temp where rownum=1

 


/************************************************************
--Log file sync :
************************************************************/
10GR2后,ORACLE提供了一个新参数commit_write来控制客户端commit后,ORACLE写日志的行为。这个参数值有四种组合。
immediate , wait
immediate , nowait
batch , wait
batch , nowait

11G新参数:
commit_logging的值可以设置为batch/immediate。
commit_wait的值可以设置为wait/nowait。

_use_adaptive_log_file_sync


-- NAME: LFSDIAG.SQL
-- ------------------------------------------------------------------------
-- AUTHOR: Michael Polaski - Oracle Support Services
-- ------------------------------------------------------------------------
-- PURPOSE:
-- This script is intended to provide a user friendly guide to troubleshoot
-- log file sync waits. The script will look at important parameters involved
-- in log file sync waits, log file sync wait histogram data, and the script
-- will look at the worst average log file sync times in the active session
-- history data and AWR data and dump information to help determine why those
-- times were the highest. The script will create a file called
-- lfsdiag_<timestamp>.out in your local directory.

set echo off
set feedback off
column timecol new_value timestamp
column spool_extension new_value suffix
select to_char(sysdate,'Mondd_hh24mi') timecol,
'.out' spool_extension from sys.dual;
column output new_value dbname
select value || '_' output
from v$parameter where name = 'db_name';
spool lfsdiag_&&dbname&&timestamp&&suffix
set trim on
set trims on
set lines 140
set pages 100
set verify off
alter session set optimizer_features_enable = '10.2.0.4';

PROMPT LFSDIAG DATA FOR &&dbname&&timestamp
PROMPT Note: All timings are in milliseconds (1000 milliseconds = 1 second)

PROMPT
PROMPT IMPORTANT PARAMETERS RELATING TO LOG FILE SYNC WAITS:
column name format a40 wra
column value format a40 wra
select inst_id, name, value from gv$parameter
where ((value is not null and name like '%log_archive%') or
name like '%commit%' or name like '%event=%' or name like '%lgwr%')
and name not in (select name from gv$parameter where (name like '%log_archive_dest_state%'
and value = 'enable') or name = 'log_archive_format')
order by 1,2,3;

PROMPT
PROMPT ASH THRESHOLD...
PROMPT
PROMPT This will be the threshold in milliseconds for average log file sync
PROMPT times. This will be used for the next queries to look for the worst
PROMPT 'log file sync' minutes. Any minutes that have an average log file
PROMPT sync time greater than the threshold will be analyzed further.
column threshold_in_ms new_value threshold format 999999999.999
select min(threshold_in_ms) threshold_in_ms
from (select inst_id, to_char(sample_time,'Mondd_hh24mi') minute,
avg(time_waited)/1000 threshold_in_ms
from gv$active_session_history
where event = 'log file sync'
group by inst_id,to_char(sample_time,'Mondd_hh24mi')
order by 3 desc)
where rownum <= 10;

PROMPT
PROMPT ASH WORST MINUTES FOR LOG FILE SYNC WAITS:
PROMPT
PROMPT APPROACH: These are the minutes where the avg log file sync time
PROMPT was the highest (in milliseconds).
column minute format a12 tru
column event format a30 tru
column program format a40 tru
column total_wait_time format 999999999999.999
column avg_time_waited format 999999999999.999
select to_char(sample_time,'Mondd_hh24mi') minute, inst_id, event,
sum(time_waited)/1000 TOTAL_WAIT_TIME , count(*) WAITS,
avg(time_waited)/1000 AVG_TIME_WAITED
from gv$active_session_history
where event = 'log file sync'
group by to_char(sample_time,'Mondd_hh24mi'), inst_id, event
having avg(time_waited)/1000 > &&threshold
order by 1,2;

PROMPT
PROMPT ASH LFS BACKGROUND PROCESS WAITS DURING WORST MINUTES:
PROMPT
PROMPT APPROACH: What is LGWR doing when 'log file sync' waits
PROMPT are happening? LMS info may be relevent for broadcast
PROMPT on commit and LNS data may be relevant for dataguard.
PROMPT If more details are needed see the ASH DETAILS FOR WORST
PROMPT MINUTES section at the bottom of the report.
column inst format 999
column minute format a12 tru
column event format a30 tru
column program format a40 wra
select to_char(sample_time,'Mondd_hh24mi') minute, inst_id inst, program, event,
sum(time_waited)/1000 TOTAL_WAIT_TIME , count(*) WAITS,
avg(time_waited)/1000 AVG_TIME_WAITED
from gv$active_session_history
where to_char(sample_time,'Mondd_hh24mi') in (select to_char(sample_time,'Mondd_hh24mi')
from gv$active_session_history
where event = 'log file sync'
group by to_char(sample_time,'Mondd_hh24mi'), inst_id
having avg(time_waited)/1000 > &&threshold and sum(time_waited)/1000 > 1)
and (program like '%LGWR%' or program like '%LMS%' or
program like '%LNS%' or event = 'log file sync')
group by to_char(sample_time,'Mondd_hh24mi'), inst_id, program, event
order by 1,2,3,5 desc, 4;

PROMPT
PROMPT HISTOGRAM DATA FOR LFS AND OTHER RELATED WAITS:
PROMPT
PROMPT APPROACH: Look at the wait distribution for log file sync waits
PROMPT by looking at "wait_time_milli". Look at the high wait times then
PROMPT see if you can correlate those with other related wait events.
column event format a40 wra
select inst_id, event, wait_time_milli, wait_count
from gv$event_histogram
where event in ('log file sync','gcs log flush sync',
'log file parallel write','wait for scn ack',
'log file switch completion','gc cr grant 2-way',
'gc buffer busy','gc current block 2-way') or
event like '%LGWR%' or event like '%LNS%'
order by 2 desc,1,3;

PROMPT
PROMPT ORDERED BY WAIT_TIME_MILLI
select inst_id, event, wait_time_milli, wait_count
from gv$event_histogram
where event in ('log file sync','gcs log flush sync',
'log file parallel write','wait for scn ack',
'log file switch completion','gc cr grant 2-way',
'gc buffer busy','gc current block 2-way')
or event like '%LGWR%' or event like '%LNS%'
order by 3,1,2 desc;

PROMPT
PROMPT REDO WRITE STATS
PROMPT
PROMPT "redo write time" in centiseconds (100 per second)
PROMPT 11.1: "redo write broadcast ack time" in centiseconds (100 per second)
PROMPT 11.2: "redo write broadcast ack time" in microseconds (1000 per millisecond)
column value format 99999999999999999999
column milliseconds format 99999999999999.999
select v.version, ss.inst_id, ss.name, ss.value,
decode(substr(version,1,4),
'11.1',decode (name,'redo write time',value*10,
'redo write broadcast ack time',value*10),
'11.2',decode (name,'redo write time',value*10,
'redo write broadcast ack time',value/1000),
decode (name,'redo write time',value*10)) milliseconds
from gv$sysstat ss, v$instance v
where name like 'redo write%' and value > 0
order by 1,2,3;

PROMPT
PROMPT AWR WORST AVG LOG FILE SYNC SNAPS:
PROMPT
PROMPT APPROACH: These are the AWR snaps where the average 'log file sync'
PROMPT times were the highest.
column begin format a12 tru
column end format a12 tru
column name format a13 tru
select dhs.snap_id, dhs.instance_number inst, to_char(dhs.begin_interval_time,'Mondd_hh24mi') BEGIN,
to_char(dhs.end_interval_time,'Mondd_hh24mi') END,
en.name, se.time_waited_micro/1000 total_wait_time, se.total_waits,
se.time_waited_micro/1000 / se.total_waits avg_time_waited
from dba_hist_snapshot dhs, wrh$_system_event se, v$event_name en
where (dhs.snap_id = se.snap_id and dhs.instance_number = se.instance_number)
and se.event_id = en.event_id and en.name = 'log file sync' and
dhs.snap_id in (select snap_id from (
select se.snap_id, se.time_waited_micro/1000 / se.total_waits avg_time_waited
from wrh$_system_event se, v$event_name en
where se.event_id = en.event_id and en.name = 'log file sync'
order by avg_time_waited desc)
where rownum < 4)
order by 1,2;

PROMPT
PROMPT AWR REDO WRITE STATS
PROMPT
PROMPT "redo write time" in centiseconds (100 per second)
PROMPT 11.1: "redo write broadcast ack time" in centiseconds (100 per second)
PROMPT 11.2: "redo write broadcast ack time" in microseconds (1000 per millisecond)
column stat_name format a30 tru
select v.version, ss.snap_id, ss.instance_number inst, sn.stat_name, ss.value,
decode(substr(version,1,4),
'11.1',decode (stat_name,'redo write time',value*10,
'redo write broadcast ack time',value*10),
'11.2',decode (stat_name,'redo write time',value*10,
'redo write broadcast ack time',value/1000),
decode (stat_name,'redo write time',value*10)) milliseconds
from wrh$_sysstat ss, wrh$_stat_name sn, v$instance v
where ss.stat_id = sn.stat_id
and sn.stat_name like 'redo write%' and ss.value > 0
and ss.snap_id in (select snap_id from (
select se.snap_id, se.time_waited_micro/1000 / se.total_waits avg_time_waited
from wrh$_system_event se, v$event_name en
where se.event_id = en.event_id and en.name = 'log file sync'
order by avg_time_waited desc)
where rownum < 4)
order by 1,2,3;

PROMPT
PROMPT AWR LFS AND OTHER RELATED WAITS FOR WORST LFS AWRs:
PROMPT
PROMPT APPROACH: These are the AWR snaps where the average 'log file sync'
PROMPT times were the highest. Look at related waits at those times.
column name format a40 tru
select se.snap_id, se.instance_number inst, en.name,
se.total_waits, se.time_waited_micro/1000 total_wait_time,
se.time_waited_micro/1000 / se.total_waits avg_time_waited
from wrh$_system_event se, v$event_name en
where se.event_id = en.event_id and (en.name in
('log file sync','gcs log flush sync',
'log file parallel write','wait for scn ack',
'log file switch completion','gc cr grant 2-way',
'gc buffer busy','gc current block 2-way')
or en.name like '%LGWR%' or en.name like '%LNS%')
and se.snap_id in (select snap_id from (
select se.snap_id, se.time_waited_micro/1000 / se.total_waits avg_time_waited
from wrh$_system_event se, v$event_name en
where se.event_id = en.event_id and en.name = 'log file sync'
order by avg_time_waited desc)
where rownum < 4)
order by 1, 6 desc;

PROMPT
PROMPT AWR HISTOGRAM DATA FOR LFS AND OTHER RELATED WAITS FOR WORST LFS AWRs:
PROMPT Note: This query won't work on 10.2 - ORA-942
PROMPT
PROMPT APPROACH: Look at the wait distribution for log file sync waits
PROMPT by looking at "wait_time_milli". Look at the high wait times then
PROMPT see if you can correlate those with other related wait events.
select eh.snap_id, eh.instance_number inst, en.name, eh.wait_time_milli, eh.wait_count
from wrh$_event_histogram eh, v$event_name en
where eh.event_id = en.event_id and
(en.name in ('log file sync','gcs log flush sync',
'log file parallel write','wait for scn ack',
'log file switch completion','gc cr grant 2-way',
'gc buffer busy','gc current block 2-way')
or en.name like '%LGWR%' or en.name like '%LNS%')
and snap_id in (select snap_id from (
select se.snap_id, se.time_waited_micro/1000 / se.total_waits avg_time_waited
from wrh$_system_event se, v$event_name en
where se.event_id = en.event_id and en.name = 'log file sync'
order by avg_time_waited desc)
where rownum < 4)
order by 1,3 desc,2,4;

PROMPT
PROMPT ORDERED BY WAIT_TIME_MILLI
PROMPT Note: This query won't work on 10.2 - ORA-942
select eh.snap_id, eh.instance_number inst, en.name, eh.wait_time_milli, eh.wait_count
from wrh$_event_histogram eh, v$event_name en
where eh.event_id = en.event_id and
(en.name in ('log file sync','gcs log flush sync',
'log file parallel write','wait for scn ack',
'log file switch completion','gc cr grant 2-way',
'gc buffer busy','gc current block 2-way')
or en.name like '%LGWR%' or en.name like '%LNS%')
and snap_id in (select snap_id from (
select se.snap_id, se.time_waited_micro/1000 / se.total_waits avg_time_waited
from wrh$_system_event se, v$event_name en
where se.event_id = en.event_id and en.name = 'log file sync'
order by avg_time_waited desc)
where rownum < 4)
order by 1,4,2,3 desc;

PROMPT
PROMPT ASH DETAILS FOR WORST MINUTES:
PROMPT
PROMPT APPROACH: If you cannot determine the problem from the data
PROMPT above, you may need to look at the details of what each session
PROMPT is doing during each 'bad' snap. Most likely you will want to
PROMPT note the times of the high log file sync waits, look at what
PROMPT LGWR is doing at those times, and go from there...
column program format a45 wra
column sample_time format a25 tru
column event format a30 tru
column time_waited format 999999.999
column p1 format a40 tru
column p2 format a40 tru
column p3 format a40 tru
select sample_time, inst_id inst, session_id, program, event, time_waited/1000 TIME_WAITED,
p1text||': '||p1 p1,p2text||': '||p2 p2,p3text||': '||p3 p3
from gv$active_session_history
where to_char(sample_time,'Mondd_hh24mi') in (select
to_char(sample_time,'Mondd_hh24mi')
from gv$active_session_history
where event = 'log file sync'
group by to_char(sample_time,'Mondd_hh24mi'), inst_id
having avg(time_waited)/1000 > &&threshold)
and time_waited > 0.5
order by 1,2,3,4,5;

select to_char(sysdate,'Mondd hh24:mi:ss') TIME from dual;

spool off

PROMPT
PROMPT OUTPUT FILE IS: lfsdiag_&&dbname&&timestamp&&suffix
PROMPT


/************************************************************
--12C multi lgwr :
************************************************************/
_use_single_log_writer=ADAPTIVE
_max_outstanding_log_writes=2

taskset -pc 0,1,4,5 `pid of lgwr`


/************************************************************
--Sqlloader and Spool:
************************************************************/
linux下配置 oracle数据导出工具sqluldr2


/************************************************************
--Security:
************************************************************/
alter system set encryption key identified by "super5281";


/************************************************************
--EM12c:
************************************************************/
EM12c API:
http://docs.oracle.com/cd/E24628_01/doc.121/e38530/toc.htm
http://docs.oracle.com/cd/E24628_01/em.121/e17786/toc.htm
http://docs.oracle.com/cd/E24628_01/doc.121/e28814/cloud_part5.htm#sthref1388

EM12c MIB:
12c Cloud Control: Steps to Configure SNMP Trap Notification Method to Send Traps to a Third Party SNMP Console (文档 ID 1532325.1)
http://docs.oracle.com/cd/E24628_01/doc.121/e24473/appdx_mib.htm#EMADM14470
http://docs.oracle.com/cd/E24628_01/doc.121/e24473/appdx_mib_defn.htm#EMADM12892
http://docs.oracle.com/cd/E24628_01/doc.121/e24473/appdx_snmp_trap_map.htm#EMADM12894


/************************************************************
--Exlogic Apex Rest:
************************************************************/
Exlogic:
https://docs.oracle.com/cd/E24628_01/doc.121/e35776/toc.htm

Apex:
http://www.oracle.com/technetwork/developer-tools/apex/documentation/index.html

Rest:
http://www.oracle.com/technetwork/developer-tools/rest-data-services/documentation/index.html

/************************************************************
--Oracle join path:
************************************************************/
嵌套循环连接(NESTED LOOP JOIN)
群集连接(CLUSTER JOIN)
排序合并连接(SORT MERGE JOIN)
笛卡尔连接(CARTESIAN JOIN)
哈希连接(HASH JOIN) ORACLE 7.3
索引连接(INDEX JOIN) ORACLE 8
上面的表是外部表,即驱动表,下面的表是内部表

 

 

/************************************************************
--Support.oracle.com:
************************************************************/
1) RAC and DB Support Tools Bundle (文档 ID 1594347.1)

2) Do yo have the OSWatcher utility running ?

If the utility OSWatcher has been installed then archive a subset of the OS Watcher files. covering the crash date/time

OSWatcher Black Box User Guide (Includes: [Video]) (Doc ID 301137.1)
OSWIatcher Black Box User Guide (Note:1531223.1)
OSWatcher (Includes: [Video]) (文档 ID 301137.1)

Goto to oswbb directory

$ cd archive
$ tar -cvf archive_23.tar */*_15.11.13.2300.dat

Please upload the created file

3) Please can your linux Administrator configure the "kdump" which creates a Linux kernel dump (/vmcore) when we have a crash ?

Please see

Linux OS Service 'kdump' (Doc ID 550097.1)

4) For CRS /RAC environments we have a audit tool (ORAchk) that verifies system and Oracle settings using
the requirements and best practices.

ORAchk - Health Checks for the Oracle Stack (Doc ID 1268927.2)

 

/************************************************************
--JDBC:
************************************************************/
The JDBC connection string needs to be in a correct format.

JDBC connection string examples:

jdbc:oracle:thin:[USER/PASSWORD]@[HOST][:PORT]:SID

sample connect string (with service name orcl):
String url = "jdbc:oracle:thin:@//myhost:1521/orcl";

jdbc:oracle:thin:[USER/PASSWORD]@//[HOST][:PORT]/SERVICE

sample connect string (with instance name orcl):
String url = "jdbc:oracle:thin:@myhost:1521:orcl";


/************************************************************
--QUIESCE:
************************************************************/
ALTER SYSTEM QUIESCE RESTRICTED;
ALTER SYSTEM UNQUIESCE;

/************************************************************
--DBCA:
************************************************************/
dbca -silent -createDatabase -templateName General_Purpose.dbc \
-gdbName omc -sid omc \
-sysPassword welcome1 -systemPassword welcome1 \
-emConfiguration NONE \
-datafileDestination /u01/app/oracle/oradata \
-redoLogFileSize 128 \
-recoveryAreaDestination /u01/app/oracle/flash_recovery_area \
-storageType FS \
-characterSet UTF8 -nationalCharacterSet AL16UTF16 \
-listeners LISTENER \
-memoryPercentage 40 \
-databaseType OLTP

dbca -silent -createDatabase -templateName New_Database.dbt -gdbName orcl -sid orcl -sysPassword welcome1 -systemPassword welcome1 -emConfiguration NONE -redoLogFileSize 8192 -storageType ASM -asmsnmpPassword welcome1 -diskGroupName DATAC1 -recoveryGroupName FAST -nodelist cnx4db01,cnx4db02,cnx4db03,cnx4db04 -characterSet AL32UTF8 -nationalCharacterSet UTF8 -initParams "_enable_numa_support"=FALSE,"_file_size_increase_increment"=2143289344,processes=1024 -databaseType MULTIPURPOSE
dbca -silent -deleteDatabase -sourceDB orcl

dbca -silent -createDatabase -templateName /u01/app/oracle/product/11.2.0.4/dbhome_1/assistants/dbca/templates/General_Purpose.dbc -gdbName orcl11g -sid orcl11g -SysPassword change_on_install -SystemPassword manager -emConfiguration NONE -storageType ASM -asmSysPassword change_on_install -diskGroupName DGDATA -characterSet AL32UTF8 -memoryPercentage 20

asmca -silent -diskGroupName DATA -diskList '/dev/asm1,/dev/asm2,/dev/asm3,/dev/asm4' -redundancy NORMAL -diskString '/dev/asm*' -configureLocalASM -passwordFileLocation +DATA/orapwASM -au_size 1

第一步 oracle10g 依赖包检查:
32位Linux中:
rpm -q binutils compat-db compat-libstdc++-33 glibc glibc-devel glibc-headers gcc gcc-c++ libXp libstdc++ cpp make libaio ksh elfutils-libelf sysstat libaio libaio-devel setarch --qf '%{name}.%{arch}\n'|sort
64位Linux中:
rpm -q binutils compat-db compat-libstdc++-33 glibc glibc-devel glibc-headers gcc gcc-c++ libXp libstdc++ cpp make libaio ksh elfutils-libelf sysstat libaio libaio-devel setarch --qf '%{name}.%{arch}\n'|sort


# yum install oracle-validated
第二步 创建oracle用户和组
因为root用户是不能安装oracle的,所以必须创建oracle用户。
groupadd oinstall
groupadd dba
mkdir -p /u01/app/oracle/product/10.2.0/db_1
useradd -g oinstall -G dba -d /home/oracle oracle
passwd oracle
chown -R oracle:oinstall /u01
chmod -R 775 /u01

第三步 静态安装
静态安装文件是预先设置好参数了,那么等oracle安装的时候要手工输入的都可以直接读取 静态安装文件(当然是x界面的啦)。如果你用x界面的操作系统那么,按着界面提示一步步完成也是一样的。
把10201_database_linux32.zip文件解压后 得到database目录,

/home/oracle/database/response下有有 enterprise.rsp、standard.rsp和custom.rsp三个应答文件,分别对应企业版、标准版和定制的安装。
我们以以enterprise.rsp为例:<Value Unspecified>基本上是要填的有些可以不填
UNIX_GROUP_NAME="oinstall"
ORACLE_HOME_NAME="hefaproduct"
ORACLE_HOME="/u01/app/oracle/product/10.2.0/db_1"
SHOW_INSTALL_PROGRESS_PAGE= true
SHOW_END_OF_INSTALL_MSGS= true
COMPONENT_LANGUAGES={"zh_CN"}
s_nameForDBAGrp= "dba"
s_nameForOPERGrp="dba"
INSTALL_TYPE="EE"
n_configurationOption=3
RESTART_SYSTEM=false
RESTART_REMOTE_SYSTEM=false
s_dbSid= "orcl"
s_dbRetChar="Unicode standard UTF-8 AL32UTF8"

开始安装

换oracle用户登录:注意文件位置视自己刚刚修改的文件位置而定
./runInstaller -silent -ignoreSysPrereqs -responseFile /home/oracle/database/response/enterprise.rsp

Installation in progress (Mon Sep 21 23:37:00 CST 2009)
............................................................... 18% Done.
............................................................... 36% Done.
............................................................... 54% Done.
............................................................... 72% Done.
................ 76% Done.
Install successful
Linking in progress (Mon Sep 21 23:39:49 CST 2009)
Link successful
Setup in progress (Mon Sep 21 23:43:11 CST 2009)
.............. 100% Done.
Setup successful
End of install phases.(Mon Sep 21 23:43:20 CST 2009)
WARNING:A new inventory has been created in this session. However, it has not yet been registered as the central inventory of this system.
To register the new inventory please run the script '/u01/app/oracle/oraInventory/orainstRoot.sh' with root privileges.
If you do not register the inventory, you may not be able to update or patch the products you installed.
The following configuration scripts
/u01/app/oracle/product/10.2.0/db_1/root.sh
need to be executed as root for configuring the system. If you skip the execution of the configuration tools, the configuration will not be complete and the product wont function properly. In order to get the product to function properly, you will be required to execute the scripts and the configuration tools after exiting the OUI.
The installation of Oracle Database 10g was successful.(恭喜你成功了如果没这个提示就看看他说的日志继续修改下ok)
Please check '/u01/app/oracle/oraInventory/logs/silentInstall2009-09-21_11-36-22PM.log' for more details.

接下来按提示 操作, root身份登录
/u01/app/oracle/oraInventory/orainstRoot.sh
/u01/app/oracle/product/10.2.0/db_1/root.sh

第四步 增加数据实例
修改/home/oracle/database/response/dbca.rsp(就 是解压安装文件目录下的)
根据数据库建立方式的不同编辑不同的数据库库选项。
比如在本次安装过程中设置了下列参数:(注意下面参数视情况而定啦不要照抄该选项原文件都有 说明的)
RESPONSEFILE_VERSION = "10.0.0"
OPERATION_TYPE = "createDatabase"
GDBNAME = "orcl"
SID = "orcl"
TEMPLATENAME = "New_Database.dbt"
SYSPASSWORD = "xiao"
SYSTEMPASSWORD = "xiao"
DATAFILEDESTINATION ="/u01/app/oracle/oradata/"
RECOVERYAREADESTINATION="/u01/app/oracle/flash_recovery_area"
STORAGETYPE=FS
CHARACTERSET = "UTF8"
MEMORYPERCENTAGE = "40"
SCRIPTDESTINATION ="/u01/app/oracle/admin/ora10g/scripts"
EMCONFIGURATION=”LOCAL”
SYSMANPASSWORD = "xiao"
DBSNMPPASSWORD = "xiao"
以上是部分选项的参考,只需要修改createdate注释括起来的选项,注释中会写清楚这 个选项是否必填。
下面就可以开始SILENT模式的数据库创建了。

dbca -silent -createDatabase -responseFile /home/oracle/dbca.rsp
dbca -silent -createDatabase -templateName /u01/app/oracle/product/10.2.0/db_1/assistants/dbca/templates/Transaction_Processing.dbc -gdbName test10g -sid test10g -responseFile NO_VALUE -characterSet ZHS16GBK

 

oracle@oracle10g ~]$ dbca -silent -createDatabase -templateName /u01/app/oracle/product/10.2.0/db_1/a ssistants/dbca/templates/Transaction_Processing.dbc -gdbName test10g -sid test10g -responseFile NO_VAL UE -characterSet ZHS16GBK

An unexpected exception has been detected in native code outside the VM.
Unexpected Signal : 11 occurred at PC=0xF7604D04
Function=cfree+0x34
Library=/lib/libc.so.6

检查/etc/hosts

最后提示创建成功后

用root执行如下脚本:
/u01/app/oracle/oraInventory/orainstRoot.sh

注意/home/oracle/database/response/根据dbca.rsp 所在位置定

这里视情况而定 根据 oracle的home目录来找这个文件

第五步 配置oracle自启动
修改/etc/oratab 将N改为Y
在su - oracle 主目录下 编辑 vi .bash_profile
修改ORACLE_HOME和ORACLE_SID
修改/etc/rc.local,添加如下内容:
su - oracle -c 'lsnrctl start'
su - oracle -c 'dbstart'
修改ORACLE_HOME/bin下面的dbstart 修改内容:
ORATAB=/etc/oratab

 

过程中碰到的错误:
1.模板 General Purpose 不存在。请为数据库创建操作指定现有模板
解决:更改dbca.rsp文件中的General Purpose为 General_Purpose.dbc即可
2.SQL> startup
ORA-01078: failure in processing system parameters
LRM-00109: could not open parameter file '/usr/local/oracle/app/oracle/product/10.2.0/db_1/dbs/initorcl.ora'
解决办法:
cp /usr/local/oracle/app/oracle/product/10.2.0/db_1/dbs/init.ora /usr/local/oracle/app/oracle/product/10.2.0/db_1/dbs/initorcl.ora
3.SQL> startup;
ORA-00371: not enough shared pool memory, should be atleast 62198988 bytes
解决办法:
vi $ORACLE_HOME/dbs/initORCL.ora将其中的shared_pool_size = 35000000改成62198988
4.dbca时日志里的错误:ORA-27123: unable to attach to shared memory segment
解决方法:
32位机器要修改/etc/sysctl.conf文件将kernel.shmmax改成 2G
不是越大越好
5.ORA-01102: cannot mount database in EXCLUSIVE mode
解决办法:fuser -k lkORCL 改文件位于ORACLE_HOME下dbs目录下。原因是因为该文件被占用,要释放掉。
6.安装日志中有Caught UnknownHostException
修改/etc/hosts文件,将配置中的ORACLE_HOME_NAME的值配置到这个 文件中,可以设置成127.0.0.1

Pasted from: <http://surelei.iteye.com/blog/1537516>

./dbca -silent -createDatabase -templateName /u01/app/oracle/product/10.2.0/db_1/assistants/dbca/templates/Transaction_Processing.dbc -gdbName test10g -sid test10g -responseFile NO_VALUE -characterSet ZHS16GBK

[oracle@oracle10g response]$ cat enterpriise1.rsp
####################################################################
## Copyright(c) Oracle Corporation 1998,2005. All rights reserved.##
## ##
## Specify values for the variables listed below to customize ##
## your installation. ##
## ##
## Each variable is associated with a comment. The comment ##
## identifies the variable type. ##
## ##
## Please specify the values in the following format: ##
## ##
## Type Example ##
## String "Sample Value" ##
## Boolean True or False ##
## Number 1000 ##
## StringList {"String value 1","String Value 2"} ##
## ##
## The values that are given as <Value Required> need to be ##
## specified for a silent installation to be successful. ##
## ##
## ##
## This response file is generated by Oracle Software ##
## Packager. ##
####################################################################


RESPONSEFILE_VERSION=2.2.1.0.0

#------------------------------------------------------------------------------
#Name : UNIX_GROUP_NAME
#Datatype : String
#Description: Unix group to be set for the inventory directory.
#Example : UNIX_GROUP_NAME = "install"
#------------------------------------------------------------------------------
UNIX_GROUP_NAME=oinstall

#------------------------------------------------------------------------------
#Name : FROM_LOCATION
#Datatype : String
#Description: Complete path to the products.xml.
#Example : FROM_LOCATION = "../stage/products.xml"
#------------------------------------------------------------------------------
FROM_LOCATION="../stage/products.xml"

#------------------------------------------------------------------------------
#Name : NEXT_SESSION_RESPONSE
#Datatype : String
#Description: Optionally specifies the full path of the next session's response
# file. If only a file name is specified, the response file is
# retrieved from the <TEMP>/oraInstall directory. This variable is
# only active if NEXT_SESSION is set to true.
#Example: NEXT_SESSION_RESPONSE = "nextinstall.rsp"
#------------------------------------------------------------------------------
NEXT_SESSION_RESPONSE=<Value Unspecified>

#------------------------------------------------------------------------------
#Name : ORACLE_HOME
#Datatype : String
#Description: Complete path of the Oracle Home.
#Example : ORACLE_HOME = "/product/10.2.0/db_1"
#------------------------------------------------------------------------------
ORACLE_HOME="/u01/app/oracle/product/10.2.0/db_1"

#------------------------------------------------------------------------------
#Name : ORACLE_HOME_NAME
#Datatype : String
#Description: Oracle Home Name. Used in creating folders and services.
#Example : ORACLE_HOME_NAME = "OraDbHome1"
#------------------------------------------------------------------------------
ORACLE_HOME_NAME="OraDbHome1"

#------------------------------------------------------------------------------
#Name : TOPLEVEL_COMPONENT
#Datatype : StringList
#Description: The top level component to be installed in the current session.
#Example : TOPLEVEL_COMPONENT = {"oracle.server","10.2.0.1.0"}
#------------------------------------------------------------------------------
TOPLEVEL_COMPONENT={"oracle.server","10.2.0.1.0"}

#------------------------------------------------------------------------------
#Name : DEINSTALL_LIST
#Datatype : StringList
#Description: List of components to be deinstalled during a deinstall session.
#Example : DEINSTALL_LIST = {"oracle.server","10.2.0.1.0"}
#------------------------------------------------------------------------------
DEINSTALL_LIST={"oracle.server","10.2.0.1.0"}

#------------------------------------------------------------------------------
#Name : SHOW_SPLASH_SCREEN
#Datatype : Boolean
#Description: Set to true if the initial splash screen in OUI needs to be
# shown.
#Example : SHOW_SPLASH_SCREEN = true
#------------------------------------------------------------------------------
SHOW_SPLASH_SCREEN=false

#------------------------------------------------------------------------------
#Name : SHOW_WELCOME_PAGE
#Datatype : Boolean
#Description: Set to true if the Welcome page in OUI needs to be shown.
#Example : SHOW_WELCOME_PAGE = false
#------------------------------------------------------------------------------
SHOW_WELCOME_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_NODE_SELECTION_PAGE
#Datatype : Boolean
#Description: Set to true if the node selection page in OUI needs to be shown.
#
# This is only relevent when performing an upgrade of Oracle
# Clusterware.
#
#Example : SHOW_NODE_SELECTION_PAGE = false
#------------------------------------------------------------------------------
SHOW_NODE_SELECTION_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_SUMMARY_PAGE
#Datatype : Boolean
#Description: Set to true if the summary page in OUI needs to be shown.
#
# The summary page shows the list of components that will be
# installed in this session.
#
#Example : SHOW_SUMMARY_PAGE = true
#------------------------------------------------------------------------------
SHOW_SUMMARY_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_INSTALL_PROGRESS_PAGE
#Datatype : Boolean
#Description: Set to true if the install progress page in OUI needs to be
# shown.
#
# This page shows the current status in the installation. The
# current status includes the product being installed and the file
# being copied.
#
#Example : SHOW_INSTALL_PROGRESS_PAGE = true
#------------------------------------------------------------------------------
SHOW_INSTALL_PROGRESS_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_CONFIG_TOOL_PAGE
#Datatype : Boolean
#Description: Set to true if the config assistants page in OUI needs to be
# shown.
#
# This page shows the list of configuration assistants that are
# part of this installation and are configured to launch
# automatically. It shows the status of each assistant, including
# any failures with detailed information on why it failed.
#
#Example : SHOW_CONFIG_TOOL_PAGE = true
#------------------------------------------------------------------------------
SHOW_CONFIG_TOOL_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_XML_PREREQ_PAGE
#Datatype : Boolean
#Description: This variable determines whether or not to show the prereq page.
#Example : SHOW_XML_PREREQ_PAGE = true
#------------------------------------------------------------------------------
SHOW_XML_PREREQ_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_ROOTSH_CONFIRMATION
#Datatype : Boolean
#Description: Set to true if the Confirmation dialog asking to run the root.sh
# script in OUI needs to be shown.
#Example: SHOW_ROOTSH_CONFIRMATION = false
#------------------------------------------------------------------------------
SHOW_ROOTSH_CONFIRMATION=true

#------------------------------------------------------------------------------
#Name : SHOW_END_SESSION_PAGE
#Datatype : Boolean
#Description: Set to true if the end of session page in OUI needs to be shown.
#
# This page shows if the installation is successful or not.
#
#Example : SHOW_END_SESSION_PAGE = true
#------------------------------------------------------------------------------
SHOW_END_SESSION_PAGE=false

#------------------------------------------------------------------------------
#Name : SHOW_EXIT_CONFIRMATION
#Datatype : Boolean
#Description: Set to true if the confirmation when exiting OUI needs to be shown.
#Example : SHOW_EXIT_CONFIRMATION = true
#------------------------------------------------------------------------------
SHOW_EXIT_CONFIRMATION=false

#------------------------------------------------------------------------------
#Name : NEXT_SESSION
#Datatype : Boolean
#Description: Set to true to allow users to go back to the File Locations page
# for another installation. This flag also needs to be set to true
# in order to process another response file
# (see NEXT_SESSION_RESPONSE).
#Example : NEXT_SESSION = true
#------------------------------------------------------------------------------
NEXT_SESSION=false

#------------------------------------------------------------------------------
#Name : NEXT_SESSION_ON_FAIL
#Datatype : Boolean
#Description: Set to true to allow users to invoke another session even if
# current install session has failed. This flag is only relevant
# if NEXT_SESSION is set to true.
#Example : NEXT_SESSION_ON_FAIL = true
#------------------------------------------------------------------------------
NEXT_SESSION_ON_FAIL=false

#------------------------------------------------------------------------------
#Name : SHOW_DEINSTALL_CONFIRMATION
#Datatype : Boolean
#Description: Set to true if deinstall confimation is needed during a deinstall
# session.
#Example : SHOW_DEINSTALL_CONFIRMATION = true
#------------------------------------------------------------------------------
SHOW_DEINSTALL_CONFIRMATION=false

#------------------------------------------------------------------------------
#Name : SHOW_DEINSTALL_PROGRESS
#Datatype : Boolean
#Description: Set to true if deinstall progress is needed during a deinstall
# session.
#Example : SHOW_DEINSTALL_PROGRESS = true
#------------------------------------------------------------------------------
SHOW_DEINSTALL_PROGRESS=false

#------------------------------------------------------------------------------
#Name : RESTART_SYSTEM
#Datatype : Boolean
#Description: Set to true to allow automatic restart of the system, if set to
# false then installer will exit without restarting, no exit
# confirmation dialog is shown
#Example : RESTART_SYSTEM = false
#------------------------------------------------------------------------------
RESTART_SYSTEM=false

#------------------------------------------------------------------------------
#Name : RESTART_REMOTE_SYSTEM
#Datatype : Boolean
#Description: Set to true to allow automatic restart of the remote systems, if
# set to false then installer will not restart the remote systems,
# no exit confirmation dialog is shown
#Example : RESTART_REMOTE_SYSTEM = false
#------------------------------------------------------------------------------
RESTART_REMOTE_SYSTEM=false

#------------------------------------------------------------------------------
#Name : REMOVE_HOMES
#Datatype : StringList
#Description: List of the homes to be removed during a deinstall session. Each
# home is represented by its full path.
#Example : REMOVE_HOMES = {<full_path_of_home1>,<full_path_of_home2>, ...}
#------------------------------------------------------------------------------
REMOVE_HOMES=<Value Unspecified>

#------------------------------------------------------------------------------
#Name : ORACLE_HOSTNAME
#Datatype : String
#Description: This variable holds the hostname of the system as set by the
# user.
#
# It can be used to force the installation to use an alternative
# hostname rather than using the first hostname found on the
# system (e.g., for systems with multiple hostnames and network
# interfaces).
#
#Example : ORACLE_HOSTNAME =
#------------------------------------------------------------------------------
ORACLE_HOSTNAME=<Value Unspecified>

#------------------------------------------------------------------------------
#Name : SHOW_END_OF_INSTALL_MSGS
#Datatype : Boolean
#Description: Set to true if the text on end of install screen is to be shown.
# The text is always available under
# <Oracle Home>/install/readme.txt.
#Example : SHOW_END_OF_INSTALL_MSGS = true
#------------------------------------------------------------------------------
SHOW_END_OF_INSTALL_MSGS=false

#------------------------------------------------------------------------------
#Name : CLUSTER_NODES
#Datatype : StringList
#Description: This variable represents the cluster node names selected by the
# user for installation.
#Example : CLUSTER_NODES = {"node1","node2"}
#------------------------------------------------------------------------------
CLUSTER_NODES=<Value Unspecified>

#------------------------------------------------------------------------------
#Name : COMPONENT_LANGUAGES
#Datatype : StringList
#Description: Languages in which the components will be installed.
#
# The following choices are available. The value should contain
# only one of these choices.
# en, : English
# fr, : French
# ar, : Arabic
# bn, : Bengali
# pt_BR,: Brazilian Portuguese
# bg, : Bulgarian
# fr_CA,: Canadian French
# ca, : Catalan
# hr, : Croatian
# cs, : Czech
# da, : Danish
# nl, : Dutch
# ar_EG,: Egyptian
# en_GB,: English (United Kingdom)
# et, : Estonian
# fi, : Finnish
# de, : German
# el, : Greek
# iw, : Hebrew
# hu, : Hungarian
# is, : Icelandic
# in, : Indonesian
# it, : Italian
# ja, : Japanese
# ko, : Korean
# es, : Latin American Spanish
# lv, : Latvian
# lt, : Lithuanian
# ms, : Malay
# es_MX,: Mexican Spanish
# no, : Norwegian
# pl, : Polish
# pt, : Portuguese
# ro, : Romanian
# ru, : Russian
# zh_CN,: Simplified Chinese
# sk, : Slovak
# sl, : Slovenian
# es_ES,: Spanish
# sv, : Swedish
# th, : Thai
# zh_TW,: Traditional Chinese
# tr, : Turkish
# uk, : Ukrainian
# vi, : Vietnamese
#
#Example : COMPONENT_LANGUAGES = {"en"}
#------------------------------------------------------------------------------
COMPONENT_LANGUAGES={"en"}

#------------------------------------------------------------------------------
#Name : INSTALL_TYPE
#Datatype : String
#Description: Installation type of the component.
#
# The following choices are available. The value should contain
# only one of these choices.
# EE : Enterprise Edition
# SE : Standard Edition
# Custom : Custom
#Example : INSTALL_TYPE = "EE"
#------------------------------------------------------------------------------
INSTALL_TYPE="EE"


###############################################################################
# #
# Dialog: PRIVILEGED OPERATING SYSTEM GROUPS #
# ------------------------------------------ #
# Provide values for the OS groups you wish to grant OSDBA and OSOPER #
# privileges. If the user performing the install is a member of the group #
# "dba", then that will be used unless specified otherwise below. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : s_nameForDBAGrp
# Datatype : String
# Description: The OS group to be granted OSDBA privileges
# Example : s_nameForDBAGrp = "dba"
#------------------------------------------------------------------------------
s_nameForDBAGrp=dba

#------------------------------------------------------------------------------
# Name : s_nameForOPERGrp
# Datatype : String
# Description: The OS group to be granted OSOPER privileges
# Example : s_nameForDBAGrp = "oper"
#------------------------------------------------------------------------------
s_nameForOPERGrp=dba


###############################################################################
# #
# Dialog: SELECT CONFIGURATION OPTION #
# ----------------------------------- #
# Please note that you must specificy values for all three entries below in #
# order to supress the Configuration Option Dialog. Even if you choose not #
# to configure an ASM instance, you must specify values for s_ASMSYSPassword #
# and s_ASMSYSPasswordAgain in order for the dialog not to be displayed. If #
# you choose to create a database or perform a software-only installation, #
# simply leave set these passwords to "". They will be ignored, but are #
# required for proper screen supression. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : n_configurationOption
# Datatype : Number
# Description: Determines the type of configuration to perform for the session.
#
# This entry should be specified as an number. The valid values
# that you can use map to the following options:
# 1 - Create a Database
# 2 - Configure an ASM instance
# 3 - Install Software Only
#
# Example : n_configurationOption=1
#------------------------------------------------------------------------------
n_configurationOption=3

#------------------------------------------------------------------------------
# Name : s_ASMSYSPassword
# Datatype : String
# Description: ASM SYS Password
#------------------------------------------------------------------------------
s_ASMSYSPassword=""

#------------------------------------------------------------------------------
# Name : s_ASMSYSPasswordAgain
# Datatype : String
# Description: ASM Sys password confirmation
#------------------------------------------------------------------------------
s_ASMSYSPasswordAgain=""


###############################################################################
# #
# Dialog: SELECT DATABASE CONFIGURATION #
# ------------------------------------- #
# If you chose n_performUpgrade=0 (Do not Upgrade an existing database) and #
# n_configurationOption=1 (Create a Database) then you need to specify the #
# type of database you wish to create. This can be done by selecting one of #
# the three starter database types or by choosing the Advanced configuration #
# option wish allows you to completely customize your database using the #
# DBCA. #
# #
# If you did not choose n_performUpgrade=0 and n_configurationOption=1 #
# then you need not specify values for n_dbType or s_responseFileName. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : n_dbType
# Datatype : Number
# Description: Determines the type of database to create.
#
# This entry should be specified as a number. The valid values
# you can specify map to the following options:
# 1 - General Purpose Starter Database
# 2 - Transaction Processing Starter Database
# 3 - Data Warehouse Starter Database
# 4 - Advanced Configuration
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
#
# Example : n_dbType=1
#------------------------------------------------------------------------------
n_dbType=<Value Required>

#------------------------------------------------------------------------------
# Name : s_responseFileName
# Datatype : String
# Description: Stores the location of the DBCA response file for an Advanced
# Configuration.
#
# If you chose to perform a silent Advanced Configuration by
# specifying n_dbType=4 above, then you need to specify a number
# of additional Advanced Configuration parameters in a response
# file that is used by the DBCA. A template of this response file
# can be found on the installation media in response/dbca.rsp. Use
# this file to completely customize the silent database
# configuration, and set s_responseFileName to the location of
# this response file.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType=4
#
# Example : oracle.assistants.server:s_responseFileName = "/tmp/dbca.rsp"
#------------------------------------------------------------------------------
oracle.assistants.server:s_responseFileName=<Value Unspecified>


###############################################################################
# #
# Dialog: SPECIFY DATABASE CONFIGURATION OPTIONS #
# ---------------------------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), and n_dbType = 1, 2 or 3 #
# (i.e., you chose to create a Starter Database), then you can specify the #
# following additional properties of your database here: #
# 1. Database SID and Global Database Name (s_dbSid, s_globalDBName) #
# 2. Database Character Set (s_dbRetChar) #
# 3. Whether you want Example Schemas (b_loadExampleSchemas) #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : s_globalDBName
# Datatype : String
# Description: The Starter Database Global Database Name
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : s_globalDBName = "mydb.mydomain.com"
#------------------------------------------------------------------------------
s_globalDBName=<Value Required>

#------------------------------------------------------------------------------
# Name : s_dbSid
# Datatype : String
# Description: The Starter Database SID.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : s_dbSid = "orcl"
#------------------------------------------------------------------------------
s_dbSid=<Value Required>

#------------------------------------------------------------------------------
# Name : s_dbRetChar
# Datatype : String
# Description: The Starter Database Characterset
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : s_dbRetChar = "WE8ISO8859P1"
#------------------------------------------------------------------------------
s_dbRetChar="WE8ISO8859P1"

#------------------------------------------------------------------------------
# Name : b_loadExampleSchemas
# Datatype : Boolean
# Description: This variable controls whether to load Example Schemas onto the
# starter database or not.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : b_loadExampleSchemas=false
#------------------------------------------------------------------------------
b_loadExampleSchemas=false

 

###############################################################################
# #
# Dialog: SELECT DATABASE MANAGEMENT OPTION #
# ----------------------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), and n_dbType = 1, 2 or 3 #
# (i.e., you chose to create a Starter Database), then you can specify the #
# following management options for your Starter Database: #
# 1. Whether to use Database Control or Grid Control to manage your database #
# (b_useDBControl) #
# 2. If you choose to use Database Control, you can choose to receive Email #
# Notifications for critical alerts (b_receiveEmailNotification, #
# s_dlgEMSMTPServer, and s_dlgEMEmailAddress) #
# 3. If you choose to use Grid Control you must specify the Management #
# service that you wish to use. (s_dlgEMCentralAgentSelected) #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : b_useDBControl
# Datatype : Boolean
# Description: Determines whether to use Database Control or Grid Control to
# manage the database.
#
# This variable also controls the following variables depending on
# its value:
# true: - b_receiveEmailNotification
# - s_dlgEMSMTPServer
# - s_dlgEMEmailAddress
# false:- s_dlgEMCentralAgentSelected
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : b_useDBControl=true
#------------------------------------------------------------------------------
b_useDBControl=true

#------------------------------------------------------------------------------
# Name : b_receiveEmailNotification
# Datatype : Boolean
# Description: Determines whether to receive email notification for critical
# alerts when using DB control.
#
# If set to true, this variable also requires the following
# variables to be set:
# - s_dlgEMSMTPServer
# - s_dlgEMEmailAddress
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useDBControl=true
#
# Example : b_receiveEmailNotification=false
#------------------------------------------------------------------------------
b_receiveEmailNotification=false

#------------------------------------------------------------------------------
# Name : s_dlgEMSMTPServer
# Datatype : String
# Description: Determines the SMTP server used for email notifications.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useDBControl=true
# b_receiveEmailNotification=true
#
# Example : s_dlgEMSMTPServer = "mysmtp.mydomain.com"
#------------------------------------------------------------------------------
s_dlgEMSMTPServer=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_dlgEMEmailAddress
# Datatype : String
# Description: Determines the email address the notifications are sent to.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useDBControl=true
# b_receiveEmailNotification=true
#
# Example : s_dlgEMEmailAddress = "myemail@mydomain.com"
#------------------------------------------------------------------------------
s_dlgEMEmailAddress=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_dlgEMCentralAgentSelected
# Datatype : String
# Description: Determines the Management Service to use if Grid Control is
# selected to manage the database.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useDBControl=false
#
# Example : s_dlgEMCentralAgentSelected = "http://myem.mydomain.com:4889/em/upload"
#------------------------------------------------------------------------------
s_dlgEMCentralAgentSelected=<Value Unspecified>


###############################################################################
# #
# Dialog: SPECIFY DATABASE STORAGE OPTION #
# --------------------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), and n_dbType = 1, 2 or 3 #
# (i.e., you chose to create a Starter Database), then you can specify the #
# type of storage options for your Starter Database using the entries below. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : n_dbStorageType
# Datatype : Number
# Description: Determines the type of storage to use for the the database.
#
# This entry should be specified as an number. The valid values
# that you can use map to the following options:
# 1 - Place the data files on a file system
# 2 - Use Automatic Storage Management (ASM)
# 3 - Place the datafiles on raw partitions
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
#------------------------------------------------------------------------------
n_dbStorageType=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_mountPoint
# Datatype : String
# Description: Location on the file system where you want to place your data
# files
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 1
#
#------------------------------------------------------------------------------
s_mountPoint=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_rawDeviceMapFileLocation
# Datatype : String
# Description: Specifies the location of a the raw devices mapping file.
#
# If you are using raw storage, you must place each tablespace,
# redo log, control file, etc. on a separate raw partition. The
# raw devices mapping file contains a mapping that tells the the
# Database Configuration Assistant (DBCA) which tablespace, redo
# log, etc. to put in which raw device. See the installation
# guide for more information on the raw devices mapping file.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 3
#
#------------------------------------------------------------------------------
s_rawDeviceMapFileLocation=<Value Unspecified>


###############################################################################
# #
# Dialog: SPECIFY BACKUP AND RECOVERY OPTIONS #
# ------------------------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), and n_dbType = 1, 2 or 3 #
# (i.e., you chose to create a Starter Database), then you can specify the #
# out-of-box backup and recovery options for your database using the entries #
# below. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : b_enableAutoBackup
# Datatype : Boolean
# Description: Determines whether to enable automated backups or not.
#
# If you set this entry to TRUE, then values for the the following
# additional entries also need to be set:
# - b_useFileSystemForRecovery
# - s_dlgRBORecoveryLocation
# - s_dlgRBOUsername
# - s_dlgRBOPassword
#
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : b_enableAutoBackup=true
#------------------------------------------------------------------------------
b_enableAutoBackup=false

#------------------------------------------------------------------------------
# Name : b_useFileSystemForRecovery
# Datatype : Boolean
# Description: Determines whether to use a file system or ASM for storing files
# related to backup and recovery of your database.
#
# If set to TRUE, then the file system will be used. If set to
# FALSE, then ASM will be used. This entry is only relevant if
# b_enableAutoBackup is set to TRUE above.
#
# If you set this entry to TRUE (use the file system), then you
# should also specify a value for:
# - s_dlgRBORecoveryLocation
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
#
# Example : b_useFileSystemForRecovery=true
#------------------------------------------------------------------------------
b_useFileSystemForRecovery=true

#------------------------------------------------------------------------------
# Name : s_dlgRBORecoveryLocation
# Datatype : String
# Description: If using a file system for storing backup and recovery files,
# this entry is used for specifying the location on the file
# system for storing these files..
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=true
#
# Example : s_dlgRBORecoveryLocation="/u01/app/oracle/flash_recovery_area"
#------------------------------------------------------------------------------
s_dlgRBORecoveryLocation=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_dlgRBOUsername
# Datatype : String
# Description: Regardless of the type of storage you choose for backup and
# recovery, if you decide to enable automated backups, a job will
# be scheduled to run daily at 2:00 AM to backup your database.
#
# This job will run as the operating system user that you specify
# using this entry.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
#
# Example : s_dlgRBOUsername = "oracle"
#------------------------------------------------------------------------------
s_dlgRBOUsername=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_dlgRBOUsername
# Datatype : String
# Description: Regardless of the type of storage you choose for backup and
# recovery, if you decide to enable automated backups, a job will
# be scheduled to run daily at 2:00 AM to backup your database.
#
# This job will run as the operating system user specified by
# the above entry s_dlgRBOUsername. This entry stores the
# the password for the above operating system user.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
#
#------------------------------------------------------------------------------
s_dlgRBOPassword=<Value Unspecified>


###############################################################################
# #
# Dialog: CONFIGURE AUTOMATIC STORAGE MANAGEMENT #
# ---------------------------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), n_dbType = 1, 2 or 3, and #
# n_dbStorageType=2 (use ASM) #
# #
# -- OR -- #
# #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), n_dbType = 1, 2 or 3, #
# b_enableAutoBackup=true, and b_useFileSystemForRecovery=false (use ASM #
# for backup and recovery) #
# #
# -- OR -- #
# #
# If you chose n_performUpgrade=0 and n_configurationOption=2 (configure #
# Automatic Storage Management) #
# #
# Then you need to specify configuration parameters for your Automatic #
# Storage Management instance. Do so using the entries below. #
# #
# -------------- #
# #
# NOTE: For single instance configurations of ASM, Cluster #
# Synchronization Services (CSS) must be configured as root prior to the #
# the creation of the ASM instance using the DBCA. Thus, for silent #
# configurations of Automatic Storage Management, the DBCA will not be #
# launched automatically at the end of the installation. This will give #
# you the opportunity to run root.sh after the installation portion is #
# is complete but before configuration begins. #
# #
# Instead, the values you enter here are stored for later use. After you run #
# root.sh, you can execute the file: #
# #
# $ORACLE_HOME/cfgtoollogs/configToolAllCommands #
# #
# This will launch the configuration tools using the values saved from this #
# response file. #
# #
###############################################################################

#------------------------------------------------------------------------------
#Name : s_dlgASMCfgDiskGroupName
#Datatype : String
#Description: Stores the name of the disk group you are creating within ASM
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 2
#
# -- OR --
#
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=false
#
# -- OR --
#
# n_configurationOption=2
# n_performUpgrade=0
#
# Example : s_dlgASMCfgDiskGroupName="DATA"
#------------------------------------------------------------------------------
s_dlgASMCfgDiskGroupName="DATA"

#------------------------------------------------------------------------------
#Name : s_dlgASMCfgDiskDiscoveryString
#Datatype : String
#Description: The Disk Discovery path contains a set of regular expressions
# that are used to identify candidate disks for ASM to use.
#
# By default, ASM searches in a platform-specific location for
# candidate disks. See the installation guide for more details
# on the Disk Discovery Path. If disks are located in the
# operating system specific default location, you need not change
# the value of this variable.
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 2
#
# -- OR --
#
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=false
#
# -- OR --
#
# n_configurationOption=2
# n_performUpgrade=0
#
# Example : s_dlgASMCfgDiskDiscoveryString="/dev/raw/*"
#------------------------------------------------------------------------------
s_dlgASMCfgDiskDiscoveryString=<Value Unspecified>

#------------------------------------------------------------------------------
#Name : n_dlgASMCfgRedundancySelected
#Datatype : Number
#Description: Specifies the redundancy level that Automatic Storage Management
# will use.
#
# This entry must be specified as one of the following numbers
# which map to different redundancy levels:
# 1 - External redundancy (no mirroring)
# 2 - Normal redundancy (two-way mirroring)
# 3 - High redundancy (three-way mirroring)
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 2
#
# -- OR --
#
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=false
#
# -- OR --
#
# n_configurationOption=2
# n_performUpgrade=0
#
# Example : n_dlgASMCfgRedundancySelected=2
#------------------------------------------------------------------------------
n_dlgASMCfgRedundancySelected=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : sl_dlgASMCfgSelectableDisks
# Datatype : StringList
# Description: Specifies the disks that can be made part of the disk group
# created during the installation.
#
# This list of disks contains the path to the disk in question
# and the size of the disk in MB. Specify the disks you
# wish to use for ASM storage as alternative pairs of
# <path to disk>, <size of disk>.
#
# The set of disks you wish to use should be specified in two
# places - this variable as well as the following variable:
# - sl_dlgASMCfgDiskSelections
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 2
#
# -- OR --
#
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=false
#
# -- OR --
#
# n_configurationOption=2
# n_performUpgrade=0
#
# Example : sl_dlgASMCfgSelectableDisks={"/dev/raw/asm1","1800","/dev/raw/asm2","1800","/dev/raw/asm3","1800","/dev/raw/asm4","1800"}
#------------------------------------------------------------------------------
sl_dlgASMCfgSelectableDisks=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : sl_dlgASMCfgDiskSelections
# Datatype : StringList
# Description: Specifies the disks that will be made part of the disk group
# created during the installation.
#
# This list of disks contains the path to the disk in question
# and the size of the disk in MB. Specify the disks you
# wish to use for ASM storage as alternative pairs of
# <path to disk>, <size of disk>.
#
# The set of disks you wish to use should be specified in two
# places - this variable as well as the following variable:
# - sl_dlgASMCfgSelectableDisks
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 2
#
# -- OR --
#
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=false
#
# -- OR --
#
# n_configurationOption=2
# n_performUpgrade=0
#
# Example : sl_dlgASMCfgDiskSelections={"/dev/raw/asm1","1800","/dev/raw/asm2","1800","/dev/raw/asm3","1800","/dev/raw/asm4","1800"}
#------------------------------------------------------------------------------
sl_dlgASMCfgDiskSelections=<Value Unspecified>


###############################################################################
# #
# Dialog: SELECT ASM DISK GROUP #
# ----------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), n_dbType = 1, 2 or 3, and #
# n_dbStorageType=2 (use ASM) #
# #
# -- OR -- #
# #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), n_dbType = 1, 2 or 3, #
# b_enableAutoBackup=true, and b_useFileSystemForRecovery=false (use ASM #
# for backup and recovery) #
# #
# -- AND -- #
# #
# If you are installing on a system where an ASM instance already exists #
# #
# Then you can use the existing ASM instance for storage. As part of the #
# the installation, you cannot create a new disk group in this case. You can #
# only use one of the existing Disk groups in your existing ASM instance. #
# #
# If there is not enough free space in your existing disk group to store #
# the additional data that you wish to store, then the installer will allow #
# you to add existing disks to the disk group you select to use. This can #
# be done silently by specifying the values for: #
# #
# - s_dlgASMCfgDiskGroupName #
# - s_dlgASMCfgDiskDiscoveryString #
# - n_dlgASMCfgRedundancySelected #
# - sl_dlgASMCfgSelectableDisks #
# - sl_dlgASMCfgDiskSelections #
# #
# in the section above. #
# #
# If you wish to create a new database, store database or backup and #
# recovery files in an existing ASM instance, and create new disks groups #
# for this purpose (rather than having to use an existing disk group, then #
# you should perform a software only installation and then use the Database #
# Configuruation Assistant (DBCA) to create disk groups in an existing ASM #
# instance. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : sl_dlgASMDskGrpSelectedGroup
# Datatype : StringList
# Description: Specifies the existing disk group in an existing ASM instance
# that you want to use for storage.
#
# The disk group must be specified as a set of entries in a
# string list. The entries in the string list consist of the
# 1. disk group name
# 2. the total space in the disk group (in MB)
# 3. the available space in the disk group (in MB)
# 4. the redundancy level of the disk group
#
# These values can be obtained easily by executing the following
# command from the Oracle Home where your existing ASM instance
# is running (you must set the ORACLE_HOME environment variable
# prior to running the command):
#
# $ORACLE_HOME/bin/kfod op=groups _asmsid='+ASM'
#
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# n_dbStorageType = 2
#
# -- OR --
#
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_enableAutoBackup=true
# b_useFileSystemForRecovery=false
#
# Example : sl_dlgASMDskGrpSelectedGroup={"DATA","3600","3498","NORMAL"}
#------------------------------------------------------------------------------
sl_dlgASMDskGrpSelectedGroup={" "," "," "," "}


###############################################################################
# #
# Dialog: SELECT DATABASE MANAGEMENT OPTION (ASM only configuration) #
# ------------------------------------------------------------------ #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# and n_configurationOption=2 (Confiugure an ASM instance) and if you already #
# have installed an Enterprise Manager Grid Control Agent on the system, you #
# have the option of centrally managing your ASM instance. You can do so by #
# setting the variables below. #
# #
# NOTE: If you choose to Configure an ASM instance in its own ORACLE_HOME, #
# (which is recommended) there is no option to manage this instance using #
# Database Control. Grid Control Management is an option that you can #
# configure from the installation if you already have an Grid Control Agent #
# installed. You can always install a Grid Control Agent after configuration #
# which will discover and allow you to manage this instance. In addition, #
# you can manage the ASM instance using Database Control once you configure #
# a database that uses the ASM instance (using the Database Control that is #
# is configured with that database.) #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : b_centrallyManageASMInstance
# Datatype : Boolean
# Description: Governs whether the ASM instance is to be managed by Grid
# Control or not.
#
# Note that this option is only relevant if you choose
# n_configurationOption=2 (Configure an ASM instance), and if
# you already have a Grid Control Agent on the system that can
# be used for central management. If you have such an Agent, and
# you wish to use it to centrally manage your ASM instance, then
# you must also set the following variable as documented above,
# which specifies which Management Service you wish to use:
# - s_dlgEMCentralAgentSelected
#
# Example : b_centrallyManageASMInstance=true
#------------------------------------------------------------------------------
b_centrallyManageASMInstance=true


###############################################################################
# #
# Dialog: SPECIFY DATABASE SCHEMA PASSWORDS #
# ----------------------------------------- #
# If you chose n_performUpgrade=0 (Do Not Upgrade an existing database), #
# n_configurationOption=1 (Create a Database), and n_dbType = 1, 2 or 3 #
# (i.e., you chose to create a Starter Database), then you can specify the #
# the passwords for the following four schemas in your starter database: #
# SYS #
# SYSTEM #
# SYSMAN (used by Enterprise Manager) #
# DBSNMP (used by Enterprise Manager) #
# #
# You can choose to use the same password for all accounts (not recommended) #
# or you can specify different passwords for each account (recommended) #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : b_useSamePassword
# Datatype : Boolean
# Description: Determines whether the same password is set for each database
# schema or whether different passwords should be specified.
#
# If b_useSamePassword=true, then you must specify the password
# that will be used for all accounts using:
# - s_superAdminSamePasswd
# - s_superAdminSamePasswdAgain
#
# If b_useSamePassword=false, then you must specify the passwords
# the will be used for each account using:
# - sl_superAdminPasswds
# - sl_superAdminPasswdsAgain
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
#
# Example : b_useSamePassword=false
#------------------------------------------------------------------------------
b_useSamePassword=true

#------------------------------------------------------------------------------
# Name : s_superAdminSamePasswd
# Datatype : String
# Description: Stores the password to be used for all accounts.
#
# NOTE: In order to perform the silent install successfully, you
# must specify the same password in both variables:
# - s_superAdminSamePasswd
# - s_superAdminSamePasswdAgain
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useSamePassword=true
#
#------------------------------------------------------------------------------
s_superAdminSamePasswd=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_superAdminSamePasswdAgain
# Datatype : String
# Description: Stores the password to be used for all accounts.
#
# NOTE: In order to perform the silent install successfully, you
# must specify the same password in both variables:
# - s_superAdminSamePasswd
# - s_superAdminSamePasswdAgain
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useSamePassword=true
#
#------------------------------------------------------------------------------
s_superAdminSamePasswdAgain=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_superAdminSamePasswd
# Datatype : String
# Description: Stores the password to be used for each of the database schemas
# SYS, SYSTEM, SYSMAN, and DBSNMP.
#
# NOTE: In order to perform the silent install successfully, you
# must specify the same passwords for each schema using both:
# - sl_superAdminSamePasswds
# - sl_superAdminSamePasswdsAgain
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useSamePassword=false
#
# Example : sl_superAdminPasswds = {"<password for SYS>","<password for SYSTEM>","<password for SYSMAN>","<password for DBSNMP>"}
#------------------------------------------------------------------------------
sl_superAdminPasswds=<Value Unspecified>

#------------------------------------------------------------------------------
# Name : s_superAdminSamePasswdAgain
# Datatype : String
# Description: Stores the password to be used for each of the database schemas
# SYS, SYSTEM, SYSMAN, and DBSNMP.
#
# NOTE: In order to perform the silent install successfully, you
# must specify the same passwords for each schema using both:
# - sl_superAdminSamePasswds
# - sl_superAdminSamePasswdsAgain
#
# Pre-requsites for setting this variable:
# n_configurationOption=1
# n_performUpgrade=0
# n_dbType = 1, 2, or 3
# b_useSamePassword=false
#
# Example : sl_superAdminPasswdsAgain = {"<password for SYS>","<password for SYSTEM>","<password for SYSMAN>","<password for DBSNMP>"}
#------------------------------------------------------------------------------
sl_superAdminPasswdsAgain=<Value Unspecified>


###############################################################################
# #
# Dialog: UPGRADE AN EXISTING DATABASE #
# ------------------------------------ #
# Databases prior to 10.2.0.1.0 may be upgraded after the installation using #
# the Database Upgrade Assistant. If you have databases prior to 10g #
# Release 2 (10.2.0.1.0) on your system, then the installation displays a #
# dialog allowing you to upgrade one of those databases directly from the #
# installation. If any of your databases use ASM for storage, you can choose #
# to upgrade just the ASM instance, just the database, or both. #
# #
###############################################################################

#------------------------------------------------------------------------------
# Name : n_performUpgrade
# Datatype : Number
# Description: Determines whether to perform an upgrade during the installation
# or not.
#
# If n_performUpgrade=1, then an upgrade will be performed at the
# end of the installation. In this case, you must also specify
# the values of one or both of the following:
# - n_upgradeDB
# - n_upgradeASM
#
# IMPORTANT INFO: If n_performUpgrade=0, then an upgrade will not
# be performed, and you can choose to create a new database using
# the variables as documented above. However, if you set
# n_performUpgrade=0, then use the following values ONLY for the
# respective variables:
# - n_upgradeDB=0
# - s_dbOHSelectedForUpgrade=""
# - s_dbSIDSelectedForUpgrade=""
# - n_upgradeASM=0
# - s_dbSelectedUsesASM="no"
#
# Example : n_performUpgrade=0
#------------------------------------------------------------------------------
n_performUpgrade=0

#------------------------------------------------------------------------------
# Name : n_upgradeDB
# Datatype : Number
# Description: Specifies whether the user wishes to perform an upgrade of a
# database (in addition to or as opposed to upgrading an ASM
# instance)
#
# This value must be specified as a number, which maps to the
# following behavior:
# 1 - Upgrade a database
# 0 - Do Not Upgrade a Database
#
# If n_upgradeDB=1, then you must also specify values for the
# following variables:
# - s_dbOHSelectedForUpgrade
# - s_dbSIDSelectedForUpgrade
#
# Pre-requsites for setting this variable:
# n_performUpgrade=1
#
# Example : n_upgradeDB = 1
#------------------------------------------------------------------------------
n_upgradeDB=0

#------------------------------------------------------------------------------
# Name : s_dbOHSelectedForUpgrade
# Datatype : String
# Description: Specifies the Oracle Home of the database you wish to upgrade.
#
# If s_dbOHSelectedForUpgrade is set to something other than "",
# then you must also specify values for the following variable:
# - s_dbSelectedUsesASM
#
# Pre-requsites for setting this variable:
# n_performUpgrade=1
# n_upgradeDB=1
#
# Example : s_dbOHSelectedForUpgrade = "/u01/app/oracle/9.2.0/db"
#------------------------------------------------------------------------------
s_dbOHSelectedForUpgrade=""

#------------------------------------------------------------------------------
# Name : s_dbSIDSelectedForUpgrade
# Datatype : String
# Description: Specifies the SID (for single instance) or database name (for
# RAC of the database you wish to upgrade.
#
# Pre-requsites for setting this variable:
# n_performUpgrade=1
# n_upgradeDB=1
#
# Example : s_dbSIDSelectedForUpgrade = "orcl"
#------------------------------------------------------------------------------
s_dbSIDSelectedForUpgrade=""

#------------------------------------------------------------------------------
# Name : n_upgradeASM
# Datatype : Number
# Description: Specifies whether the user wishes to perform an upgrade of the
# ASM instance (in addition to or as opposed to upgrading a
# database)
#
# This value must be specified as a number, which maps to the
# following behavior:
# 1 - Upgrade ASM
# 0 - Do Not Upgrade ASM
#
# Pre-requsites for setting this variable:
# n_performUpgrade=1
#
# Example : n_upgradeASM = 1
#------------------------------------------------------------------------------
n_upgradeASM=0

#------------------------------------------------------------------------------
#Name : s_dbSelectedUsesASM
#Datatype : Boolean
#Description: "Yes" if db selected uses ASM, else "No"
#Component : oracle.server
# Pre req:
# n_performUpgrade=1
# n_upgradeDB=1
#------------------------------------------------------------------------------
s_dbSelectedUsesASM="no"

 

###############################################################################
# #
# ENTRIES LISTED IN THIS SECTION SHOULD NOT BE MODIFIED. THEY ARE NEEDED IN #
# THE RESPONSE FILE FOR PROPER FUNCTIONING OF THE SILENT INSTALLATION, BUT #
# THEY SHOULD NOT BE MODIFIED. #
# #
###############################################################################

#------------------------------------------------------------------------------
#Name : sl_upgradableSIDBInstances
# sl_upgradableRACDBInstances
#Datatype : Boolean
#Description: Contains the upgradable DB instances
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

sl_upgradableSIDBInstances={}
sl_upgradableRACDBInstances={}

#------------------------------------------------------------------------------
#Name : n_dbSelection
#Datatype : Number
#Description: row selected in the spread table of upgradable DB instances
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

n_dbSelection=0

#------------------------------------------------------------------------------
#Name : b_stateOfUpgradeASMCheckbox
#Datatype : Boolean
#Description: stores the state of checkbox in UI
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

b_stateOfUpgradeASMCheckbox=false

#------------------------------------------------------------------------------
#Name : b_stateOfUpgradeDBCheckbox
#Datatype : Boolean
#Description: stores the state of checkbox in UI
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

b_stateOfUpgradeDBCheckbox=false

#------------------------------------------------------------------------------
#Name : b_oneClick
#Datatype : Boolean
#Description: Governs one-click behavior
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

b_oneClick=false

#------------------------------------------------------------------------------
#Name : s_dlgASMCfgRedundancyValue
#Datatype : String
#Description: Redundancy Value
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

s_dlgASMCfgRedundancyValue=""

#------------------------------------------------------------------------------
#Name : s_dlgASMCfgExistingFreeSpace
#Datatype : String
#Description: Existing Freee Space
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

s_dlgASMCfgExistingFreeSpace="0"

#------------------------------------------------------------------------------
#Name : s_dlgASMCfgNewDisksSize
#Datatype : String
#Description: New Disks Size
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

s_dlgASMCfgNewDisksSize=""

#------------------------------------------------------------------------------
#Name : s_dlgASMCfgAdditionalSpaceNeeded
#Datatype : String
#Description: Additional Space Needed for ASM configuration
#Component : oracle.server
#------------------------------------------------------------------------------
############################################
# DO NOT MODIFY THE VALUE OF THIS VARIABLE #
############################################

s_dlgASMCfgAdditionalSpaceNeeded="0 MB"

event="28401 trace name context forever, level 1"

 


1. update these kernel parameters using following parameters:

# ndd -set /dev/udp udp_smallest_anon_port 9000
# ndd -set /dev/udp udp_largest_anon_port 65500
# ndd -set /dev/tcp tcp_smallest_anon_port 9000
# ndd -set /dev/tcp tcp_largest_anon_port 65500


/************************************************************
--CLUSTERWARE ADMIN:
************************************************************/
$ export ORACLE_HOME=/u01/app/12.1.0/grid
$ $ORACLE_HOME/bin/srvctl modify listener -endpoints tcp:12345

 

/************************************************************
--svg编辑器:
************************************************************/
svg editor: https://inkscape.org/en/
中国矢量地图下载:https://commons.wikimedia.org/wiki/File:China_provinces_blank.svg


/************************************************************
--RAC load balance:
************************************************************/
Server-level load balancing not working due to bug 6442431 (文档 ID 840898.1)


/************************************************************
--IOSTAT:
************************************************************/
1. Any scripts/utilities to calculate the avg, peak IO/sec from the database?

(updated about 8 months ago by Abhilash)
http://www.databasejournal.com/features/oracle/article.php/3726391/Measuring-Disk-IO.htm

http://www.databasejournal.com/features/oracle/article.php/3731961/Measuring-Disk-IOA-Vendor-View.htm

http://www.lsi.com/DistributionSystem/AssetDocument/documentation/storage/business_solutions/25034-00_RevA_Oracle_Opitmal_Config.pdf

http://kevinclosson.wordpress.com/2006/12/11/a-tip-about-the-orion-io-generator-tool/

http://mradomski.wordpress.com/2008/01/19/benchmark-tools-part-i-disk-io/

http://www.cuddletech.com/blog/pivot/entry.php?id=820 - good intro




2. calculate the IOs separately per disk group.
Configuring and using Calibrate I/O [ID 727062.1] . This is available from 11g onwards only for analysing the IO. Alternatively, you can also use Orion or
other 3rd party tools.

3. I have used physical reads/writes values of AWR reports but looking for something that can give better values.

Getting the IO reports from awr are good enough for capacity planning. Atleast it worked for me. Go through teh database journel links above for holistic approach in IO design.

4. Redo generation rate.

Now this is tricky. You cannot get exact redo generation rate values. Its only a ball park but good enough for analysing and tuning network for redo transport.

Below is the process I follow >>>>
1. Get the details for number of log switches per hour/day/total

CLEAR COLUMNS BREAKS COMPUTES
set linesize 300
COLUMN DAY FORMAT a15 HEADING 'Day / Time' ENTMAP off
COLUMN H00 FORMAT 999 HEADING '00' ENTMAP off
COLUMN H01 FORMAT 999 HEADING '01' ENTMAP off
COLUMN H02 FORMAT 999 HEADING '02' ENTMAP off
COLUMN H03 FORMAT 999 HEADING '03' ENTMAP off
COLUMN H04 FORMAT 999 HEADING '04' ENTMAP off
COLUMN H05 FORMAT 999 HEADING '05' ENTMAP off
COLUMN H06 FORMAT 999 HEADING '06' ENTMAP off
COLUMN H07 FORMAT 999 HEADING '07' ENTMAP off
COLUMN H08 FORMAT 999 HEADING '08' ENTMAP off
COLUMN H09 FORMAT 999 HEADING '09' ENTMAP off
COLUMN H10 FORMAT 999 HEADING '10' ENTMAP off
COLUMN H11 FORMAT 999 HEADING '11' ENTMAP off
COLUMN H12 FORMAT 999 HEADING '12' ENTMAP off
COLUMN H13 FORMAT 999 HEADING '13' ENTMAP off
COLUMN H14 FORMAT 999 HEADING '14' ENTMAP off
COLUMN H15 FORMAT 999 HEADING '15' ENTMAP off
COLUMN H16 FORMAT 999 HEADING '16' ENTMAP off
COLUMN H17 FORMAT 999 HEADING '17' ENTMAP off
COLUMN H18 FORMAT 999 HEADING '18' ENTMAP off
COLUMN H19 FORMAT 999 HEADING '19' ENTMAP off
COLUMN H20 FORMAT 999 HEADING '20' ENTMAP off
COLUMN H21 FORMAT 999 HEADING '21' ENTMAP off
COLUMN H22 FORMAT 999 HEADING '22' ENTMAP off
COLUMN H23 FORMAT 999 HEADING '23' ENTMAP off
COLUMN TOTAL FORMAT 999,999 HEADING 'Total' ENTMAP off
BREAK ON report
COMPUTE sum LABEL Total: avg label Average: OF total ON report
SELECT
SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,8) DAY
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'00',1,0)) H00
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'01',1,0)) H01
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'02',1,0)) H02
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'03',1,0)) H03
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'04',1,0)) H04
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'05',1,0)) H05
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'06',1,0)) H06
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'07',1,0)) H07
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'08',1,0)) H08
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'09',1,0)) H09
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'10',1,0)) H10
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'11',1,0)) H11
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'12',1,0)) H12
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'13',1,0)) H13
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'14',1,0)) H14
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'15',1,0)) H15
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'16',1,0)) H16
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'17',1,0)) H17
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'18',1,0)) H18
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'19',1,0)) H19
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'20',1,0)) H20
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'21',1,0)) H21
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'22',1,0)) H22
, SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'23',1,0)) H23
, COUNT(*) TOTAL
FROM
v$log_history a
where SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,8) like ('%07/04/11%')
GROUP BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,8)
ORDER BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,8)
/
NOTE : GV$ is not required in case of RAC and DONOT use gv$ for loghist or archivelog.

2. Identify the size of redolog files in the database.
A total logswitches 4 log switches /hr doesnt always mean 4* 512MB(log size) = 2GB redo generation /hr. Log swicthes can occur even though redo log is not full.

3. Get the total redo generation size per day and total log switches.

per day caluclations >>>>
select count(1),round(sum(blocks*block_size)/1024/1024/1024,2) Gb from v$archived_log
where completion_time between sysdate-1 and sysdate;
select count(1),round(sum(blocks*block_size)/1024/1024/1024,2) Gb from v$archived_log
where completion_time between sysdate-2 and sysdate-1;
select count(1),round(sum(blocks*block_size)/1024/1024/1024,2) Gb from v$archived_log
where completion_time between sysdate-3 and sysdate-2;

MB/sec caluclations >>>
select round(sum(blocks*block_size)1024/1024/60/60,2) BS from v$archived_log
where substr(to_char(COMPLETION_TIME,'DD/MM/YY HH24:MI:SS'),1,8) like ('06/07/11') and inst_id=1;
select round(sum(blocks*block_size)1024/1024/60/60,2) BS from v$archived_log
where substr(to_char(COMPLETION_TIME,'DD/MM/YY HH24:MI:SS'),1,8) like ('06/07/11') and inst_id=2;
select round(sum(blocks*block_size)1024/1024/60/60,2) BS from v$archived_log
where substr(to_char(COMPLETION_TIME,'DD/MM/YY HH24:MI:SS'),1,8) like ('06/07/11') and inst_id=3;
You can also get good details from metalink ::: Determine How Much Disk Space is Needed for the Archive Files [ID 122555.1]