Oracle查找引起归档日志暴增的SQL语句转载

原文:https://www.modb.pro/db/1717539062243467264

参考:https://www.cnblogs.com/lei-z/p/16467177.html

一、新建表,模拟数据变更

1、新建三个表

CREATE TABLE t1 (
    id NUMBER,
    name VARCHAR2(200),
    info VARCHAR2(200),
    row_create_time TIMESTAMP,
    row_lastupdate_time TIMESTAMP
);

CREATE TABLE t2 (
    id NUMBER,
    name VARCHAR2(200),
    info VARCHAR2(200),
    row_create_time TIMESTAMP,
    row_lastupdate_time TIMESTAMP
);

CREATE TABLE t3 (
    id NUMBER,
    name VARCHAR2(200),
    info VARCHAR2(200),
    row_create_time TIMESTAMP,
    row_lastupdate_time TIMESTAMP
);


2、初始化数据

-- 使用循环插入数据,每10000条数据提交一次
DECLARE
    commit_count NUMBER := 0;
BEGIN
    FOR i IN 1..50000 LOOP
        INSERT INTO t1 (id, name, info, row_create_time, row_lastupdate_time)
        VALUES (i, 't1_' || i, 'info ' || i, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
        
        INSERT INTO t2 (id, name, info, row_create_time, row_lastupdate_time)
        VALUES (i, 't2_' || i, 'info ' || i, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
        
        INSERT INTO t3 (id, name, info, row_create_time, row_lastupdate_time)
        VALUES (i, 't3_' || i, 'info ' || i, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP);
        
        commit_count := commit_count + 1;
        
        IF commit_count = 10000 THEN
            COMMIT;
            commit_count := 0;
        END IF;
    END LOOP;
    COMMIT;
END;




3、编写存储过程执行update 或者 delete  


CREATE OR REPLACE PROCEDURE update_or_delete_data (
    p_action VARCHAR2,
    p_table_name VARCHAR2,
    p_total_rows NUMBER,
    p_commit_interval NUMBER
) AS
    v_counter NUMBER := 0;
    v_start_time TIMESTAMP := CURRENT_TIMESTAMP;

BEGIN
    IF p_action = 'update' THEN
        -- 执行更新操作
        WHILE v_counter < p_total_rows LOOP
            EXECUTE IMMEDIATE 'UPDATE ' || p_table_name || '
            SET name = ''Updated Name '' || DBMS_RANDOM.VALUE,
                info = ''Updated Info '' || DBMS_RANDOM.VALUE,
                row_lastupdate_time = CURRENT_TIMESTAMP
            WHERE id = :id' USING v_counter + 1;

            v_counter := v_counter + 1;

            IF MOD(v_counter, p_commit_interval) = 0 THEN
                COMMIT;
            END IF;
        END LOOP;

    ELSIF p_action = 'delete' THEN
        -- 执行删除操作
        WHILE v_counter < p_total_rows LOOP
            EXECUTE IMMEDIATE 'DELETE FROM ' || p_table_name || '
            WHERE id = :id' USING v_counter + 1;

            v_counter := v_counter + 1;

            IF MOD(v_counter, p_commit_interval) = 0 THEN
                COMMIT;
            END IF;
        END LOOP;
    ELSE
        DBMS_OUTPUT.PUT_LINE('Invalid action. Use "update" or "delete" as the first parameter.');
    END IF;

    COMMIT;

    -- 计算总耗时并输出信息
    DBMS_OUTPUT.PUT_LINE('Total Elapsed Time: ' || (CURRENT_TIMESTAMP - v_start_time));
    DBMS_OUTPUT.PUT_LINE('Table: ' || p_table_name || ', Action: ' || p_action || ', Total Rows Changed: ' || p_total_rows);
END;
/







4、执行存储过程,模拟数据变更


-- 调用存储过程来执行更新操作,更新表t1中的10000行,每1000行提交一次
call update_or_delete_data('update', 't1', 10000, 1000);

-- 调用存储过程来执行删除操作,删除表t2中的20000行,每2000行提交一次
call update_or_delete_data('delete', 't2', 20000, 2000);

 

二、查询归档日志暴增的三种方法

 

  • 根据SQL查询
  • 根据AWR定位
  • 根据dbms_logmnr挖掘归档日志
 

 

--第一种方法,根据SQL查询

1、查询最近三个小时的DML变更

with aa as 
(SELECT IID,
       USERNAME,
       to_char(BEGIN_TIME,'mm/dd hh24:mi') begin_time,
       SQL_ID,
       decode(COMMAND_TYPE,3,'SELECT',2,'INSERT',6,'UPDATE',7,'DELETE',189,'MERGE INTO','OTH') "SQL_TYPE",
       executions "EXEC_NUM",
       rows_processed "Change_NUM"
  FROM (SELECT s.INSTANCE_NUMBER IID,
               PARSING_SCHEMA_NAME USERNAME,COMMAND_TYPE,
               cast(BEGIN_INTERVAL_TIME as date) BEGIN_TIME,
               s.SQL_ID,
               executions_DELTA executions,
               rows_processed_DELTA rows_processed,
               (IOWAIT_DELTA) /
               1000000 io_time,
               100*ratio_to_report(rows_processed_DELTA) over(partition by s.INSTANCE_NUMBER, BEGIN_INTERVAL_TIME) RATIO,
               sum(rows_processed_DELTA) over(partition by s.INSTANCE_NUMBER, BEGIN_INTERVAL_TIME) totetime,
               elapsed_time_DELTA / 1000000 ETIME,
               CPU_TIME_DELTA / 1000000 CPU_TIME,
               (CLWAIT_DELTA+APWAIT_DELTA+CCWAIT_DELTA+PLSEXEC_TIME_DELTA+JAVEXEC_TIME_DELTA)/1000000 OTIME,
               row_number() over(partition by s.INSTANCE_NUMBER,BEGIN_INTERVAL_TIME order by rows_processed_DELTA desc) TOP_D
                   FROM dba_hist_sqlstat s, dba_hist_snapshot sn,dba_hist_sqltext s2
         where s.snap_id = sn.snap_id
           and s.INSTANCE_NUMBER = sn.INSTANCE_NUMBER 
           and rows_processed_DELTA is not null
           and s.sql_id = s2.sql_id and COMMAND_TYPE in (2,6,7,189)
           and sn.BEGIN_INTERVAL_TIME > sysdate - nvl(180,1)/1440         and PARSING_SCHEMA_NAME<>'SYS')
 WHERE TOP_D <= nvl(20,1)
  )
select aa.*,s.sql_fulltext "FULL_SQL" from aa left join  v$sql s on  aa.sql_id=s.sql_id ORDER BY IID, BEGIN_TIME desc,"Change_NUM" desc;




--如果sql_id从共享缓存清除,可以从dba_hist_sqltext表查询完整的SQL文本信息

select * from dba_hist_sqltext where sql_id='baak64bs12085';







2、查询指定时间段的DML变更
with aa as 
(SELECT IID,
       USERNAME,
       to_char(BEGIN_TIME,'mm/dd hh24:mi') begin_time,
       SQL_ID,
       decode(COMMAND_TYPE,3,'SELECT',2,'INSERT',6,'UPDATE',7,'DELETE',189,'MERGE INTO','OTH') "SQL_TYPE",
       executions "EXEC_NUM",
       rows_processed "Change_NUM"
  FROM (SELECT s.INSTANCE_NUMBER IID,
               PARSING_SCHEMA_NAME USERNAME,COMMAND_TYPE,
               cast(BEGIN_INTERVAL_TIME as date) BEGIN_TIME,
               s.SQL_ID,
               executions_DELTA executions,
               rows_processed_DELTA rows_processed,
               (IOWAIT_DELTA) /
               1000000 io_time,
               100*ratio_to_report(rows_processed_DELTA) over(partition by s.INSTANCE_NUMBER, BEGIN_INTERVAL_TIME) RATIO,
               sum(rows_processed_DELTA) over(partition by s.INSTANCE_NUMBER, BEGIN_INTERVAL_TIME) totetime,
               elapsed_time_DELTA / 1000000 ETIME,
               CPU_TIME_DELTA / 1000000 CPU_TIME,
               (CLWAIT_DELTA+APWAIT_DELTA+CCWAIT_DELTA+PLSEXEC_TIME_DELTA+JAVEXEC_TIME_DELTA)/1000000 OTIME,
               row_number() over(partition by s.INSTANCE_NUMBER,BEGIN_INTERVAL_TIME order by rows_processed_DELTA desc) TOP_D
                   FROM dba_hist_sqlstat s, dba_hist_snapshot sn,dba_hist_sqltext s2
         where s.snap_id = sn.snap_id
           and s.INSTANCE_NUMBER = sn.INSTANCE_NUMBER 
           and rows_processed_DELTA is not null
           and s.sql_id = s2.sql_id and COMMAND_TYPE in (2,6,7,189)
           and sn.BEGIN_INTERVAL_TIME >= TO_DATE('2023-10-23 21:00:00', 'YYYY-MM-DD HH24:MI:SS')
           and sn.BEGIN_INTERVAL_TIME < TO_DATE('2023-10-23 22:30:00', 'YYYY-MM-DD HH24:MI:SS')
           and PARSING_SCHEMA_NAME<>'SYS')
 WHERE TOP_D <= nvl(20,1)
  )
select aa.*,s.sql_fulltext "FULL_SQL" from aa left join  v$sql s on  aa.sql_id=s.sql_id ORDER BY IID, BEGIN_TIME desc,"Change_NUM" desc;





3、查询dml变更示例:

with aa as
(SELECT IID,
       USERNAME,
       to_char(BEGIN_TIME,'mm/dd hh24:mi') begin_time,
       SQL_ID,
       decode(COMMAND_TYPE,3,'SELECT',2,'INSERT',6,'UPDATE',7,'DELETE',189,'MERGE INTO','OTH') "SQL_TYPE",
       executions "EXEC_NUM",
       rows_processed "Change_NUM"
  FROM (SELECT s.INSTANCE_NUMBER IID,
               PARSING_SCHEMA_NAME USERNAME,COMMAND_TYPE,
               cast(BEGIN_INTERVAL_TIME as date) BEGIN_TIME,
               s.SQL_ID,
               executions_DELTA executions,
               rows_processed_DELTA rows_processed,
               (IOWAIT_DELTA) /
               1000000 io_time,
               100*ratio_to_report(rows_processed_DELTA) over(partition by s.INSTANCE_NUMBER, BEGIN_INTERVAL_TIME) RATIO,
               sum(rows_processed_DELTA) over(partition by s.INSTANCE_NUMBER, BEGIN_INTERVAL_TIME) totetime,
               elapsed_time_DELTA / 1000000 ETIME,
               CPU_TIME_DELTA / 1000000 CPU_TIME,
               (CLWAIT_DELTA+APWAIT_DELTA+CCWAIT_DELTA+PLSEXEC_TIME_DELTA+JAVEXEC_TIME_DELTA)/1000000 OTIME,
               row_number() over(partition by s.INSTANCE_NUMBER,BEGIN_INTERVAL_TIME order by rows_processed_DELTA desc) TOP_D
                   FROM dba_hist_sqlstat s, dba_hist_snapshot sn,dba_hist_sqltext s2
         where s.snap_id = sn.snap_id
           and s.INSTANCE_NUMBER = sn.INSTANCE_NUMBER
           and rows_processed_DELTA is not null
           and s.sql_id = s2.sql_id and COMMAND_TYPE in (2,6,7,189)
           and sn.BEGIN_INTERVAL_TIME > sysdate - nvl(180,1)/1440         and PARSING_SCHEMA_NAME<>'SYS')
 WHERE TOP_D <= nvl(20,1)
  )
  7    8    9   10   11   12   13   14   15   16   17   18   19   20   21   22   23   24   25   26   27   28   29   30   31  select aa.*,s.sql_fulltext "FULL_SQL" from aa left join  v$sql s on  aa.sql_id=s.sql_id ORDER BY IID, BEGIN_TIME desc,"Change_NUM" desc;

       IID USERNAME                       BEGIN_TIME  SQL_ID        SQL_TYPE     EXEC_NUM Change_NUM FULL_SQL
---------- ------------------------------ ----------- ------------- ---------- ---------- ---------- --------------------------------------------------------------------------------
         1 TEST                           10/23 21:00 baak64bs12085 DELETE          20000      20000 DELETE FROM t2
                                                                                                                 WHERE id = :id


         1 TEST                           10/23 21:00 6j1kx9yh18amp UPDATE          10000      10000 UPDATE t1
                                                                                                                 SET name = 'Updated Name ' || DBMS_RAN
                                                                                                     DOM.VALUE,


--第二种方法,根据AWR定位

sqlplus / as sysdba 
@?/rdbms/admin/awrrpt.sql


1、redo大量产生必然是由于大量产生"块改变"。从awr视图中找到"块改变"最多的segment

select * from (
SELECT to_char(dhs.begin_interval_time, 'YYYY_MM_DD HH24:MI') snap_time,
       dhsso.object_name,
       SUM(db_block_changes_delta)
  FROM dba_hist_seg_stat     dhss,
       dba_hist_seg_stat_obj dhsso,
       dba_hist_snapshot     dhs
 WHERE dhs.snap_id = dhss.snap_id
   AND dhs.instance_number = dhss.instance_number
   AND dhss.obj# = dhsso.obj#
   AND dhss.dataobj# = dhsso.dataobj#
   AND dhs.begin_interval_time >= to_date('2023-10-23 21:00:00', 'YYYY-MM-DD HH24:MI:SS')
   AND dhs.begin_interval_time < to_date('2023-10-23 22:30:00', 'YYYY-MM-DD HH24:MI:SS')
 GROUP BY to_char(dhs.begin_interval_time, 'YYYY_MM_DD HH24:MI'),
          dhsso.object_name
 order by 3 desc)
 where rownum<=5;
 

2、从awr视图中找出步骤1中排序靠前的对象涉及的SQL

SELECT to_char(begin_interval_time, 'YYYY_MM_DD HH24:MI'),
       dbms_lob.substr(sql_text, 4000, 1),
       dhss.instance_number,
       dhss.sql_id,
       executions_delta,
       rows_processed_delta
  FROM dba_hist_sqlstat dhss, dba_hist_snapshot dhs, dba_hist_sqltext dhst
 WHERE UPPER(dhst.sql_text) LIKE '%T1%'
   AND dhss.snap_id = dhs.snap_id
   AND dhss.instance_Number = dhs.instance_number
   AND dhss.sql_id = dhst.sql_id;
 
 
 
3、从ASH相关视图找到执行这些SQL的session、module和machine。

select d.sample_time,d.sql_id,d.sql_opname,d.program,d.module,d.machine from dba_hist_active_sess_history d WHERE sql_id = '6j1kx9yh18amp';
select a.sample_time,a.sql_id,a.sql_opname,a.program,a.module,a.machine from v$active_session_history a where sql_id = '6j1kx9yh18amp';

 

 

--第三种方法,挖掘归档日志

1、查询最近几天的归档,每小时归档文件数量
SELECT SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) Day,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'00',1,0)) H00,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'01',1,0)) H01,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'02',1,0)) H02,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'03',1,0)) H03,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'04',1,0)) H04,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'05',1,0)) H05,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'06',1,0)) H06,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'07',1,0)) H07,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'08',1,0)) H08,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'09',1,0)) H09,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'10',1,0)) H10,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'11',1,0)) H11,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'12',1,0)) H12,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'13',1,0)) H13,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'14',1,0)) H14,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'15',1,0)) H15,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'16',1,0)) H16,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'17',1,0)) H17,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'18',1,0)) H18,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'19',1,0)) H19,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'20',1,0)) H20,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'21',1,0)) H21,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'22',1,0)) H22,
       SUM(DECODE(SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH24:MI:SS'),10,2),'23',1,0)) H23,
       COUNT(*) TOTAL
FROM v$log_history a
WHERE first_time>=to_char(sysdate-10)
GROUP BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5)
ORDER BY SUBSTR(TO_CHAR(first_time, 'MM/DD/RR HH:MI:SS'),1,5) DESC;



2、挖掘归档日志,确认数据变更


-- 最好sys或相关权限的用户,也可以使用toad工具
-- 第一次需要执行如下SQL,之后再进行日志挖掘不需要执行
@?/rdbms/admin/dbmslm.sql
@?/rdbms/admin/dbmslmd.sql
 

 
--拼接需要解析的archive log,只有第一个日志需要添加options=>dbms_logmnr.new,其他不需要添加


SELECT 'execute dbms_logmnr.add_logfile(logfilename=>''' || name || ''', options=>dbms_logmnr.new);' as ddl
FROM v$archived_log
 WHERE completion_time >= TO_DATE('2023-10-22 20:00:00', 'YYYY-MM-DD HH24:MI:SS')
   AND completion_time <= TO_DATE('2023-10-23 23:00:00', 'YYYY-MM-DD HH24:MI:SS');



--完整的日志挖掘过程

execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_167_1106703090.dbf', options=>dbms_logmnr.new);


--使用本地的在线数据字典分析归档日志
execute dbms_logmnr.start_logmnr(options=>dbms_logmnr.dict_from_online_catalog); 



--增加日志
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_168_1106703090.dbf');
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_169_1106703090.dbf');
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_170_1106703090.dbf');
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_171_1106703090.dbf');
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_172_1106703090.dbf');
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_173_1106703090.dbf');
execute dbms_logmnr.add_logfile(logfilename=>'/data/oracle/archivelog/1_174_1106703090.dbf');




--创建临时表记录变更信息
create table test.logmnr_contents_1023 as select * from v$logmnr_contents;

-- 最后释放pga
execute dbms_logmnr.end_logmnr;



--查询临时表找出哪个表变更的比较频繁

select to_char(TIMESTAMP,'YYYY-MM-DD HH24') TIME_1,
          seg_owner,
          table_name,
          operation,
          count(*)
from test.logmnr_contents_1023
where seg_owner not in ('SYS')
group by to_char(TIMESTAMP,'YYYY-MM-DD HH24'),seg_owner,table_name,operation
order by seg_owner,table_name,to_char(TIMESTAMP,'YYYY-MM-DD HH24'),operation;
posted @ 2024-12-13 16:39  RedArmy  阅读(28)  评论(0编辑  收藏  举报