Oracle Database Logminer utility helps you to read your archived logs for what is running in your database at a specific time period.
-- following script comes with a fresh installation of oracle database. You can find it under your ORACLE_HOME/rdbms/admin directory. Script simply creates DBMS_LOGMNR_D package under SYS schema.
--@?/rdbms/admin/dbmslogmnrd.sql; -- Contents of the mined archived logs can be examined via the following view.
SELECT * FROM V$LOGMNR_CONTENTS; -- To determine which redo logs are being analyzed in the current Logminer session
SELECT * FROM V$LOGMNR_LOGS;-- Shows information about the Logminer DICTIONARY FILE, provided the DICTIONARY was created USING the STORE_IN_FLAT_FILE OPTION.
-- The information shown includes the DATABASE name AND status information.
SELECT * FROM V$LOGMNR_DICTIONARY;
SELECT * FROM V$LOGMNR_PARAMETERS /*
The TYPE OF CHANGE made TO the DATABASE (INSERT, UPDATE, DELETE, OR DDL).
The SCN AT which a CHANGE was made (SCN COLUMN).
The SCN AT which a CHANGE was committed (COMMIT_SCN COLUMN).
The TRANSACTION TO which a CHANGE belongs (XIDUSN, XIDSLT, AND XIDSQN COLUMNS).
The TABLE AND SCHEMA name OF the modified OBJECT (SEG_NAME AND SEG_OWNER COLUMNS).
The name OF the USER who issued the DDL OR DML STATEMENT TO make the CHANGE (USERNAME COLUMN).
Reconstructed SQL statements showing SQL that IS equivalent (but NOT necessarily identical) TO the SQL used TO generate the redo records (SQL_REDO COLUMN). IF a PASSWORD IS part OF the STATEMENT IN a SQL_REDO COLUMN, the PASSWORD IS encrypted.
Reconstructed SQL statements showing the SQL statements needed TO undo the CHANGE (SQL_UNDO COLUMN). SQL_UNDO COLUMNS that
correspond TO DDL statements are always NULL. Similarly, the SQL_UNDO COLUMN may be NULL FOR SOME datatypes AND FOR rolled back operations
You will need to add supplemental logging to mine your logs. By this way oracle database stores updated data by updated row data instead of ROWID basis. Logical update information is more meaningful rather than obtaining ROWID basis update statements. If a primary key or a unique index exists on the table, mined data will have these values as keys. In absence of these keys values will have all the row to identifiy updated data.
Another key factor is; if you mine your archived logs in a different database instead of mining in the production database ROWID information will worth nothing. In this kind of situation one needs to know the logical statements to mine meaningfully.
*/
----------------------------------------
-- adding supplemental logging and types
----------------------------------------
-- minimal supplemental logging
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA;-- Identification key logging enables database-wide before-image logging of primary keys or unique indexes (in the absence of primary keys)
-- for all updates. With this type of logging, an application can identify updated rows logically rather than resorting to ROWIDs
-- i.e. logical standby
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA (PRIMARY KEY, UNIQUE INDEX) COLUMNS;
ALTER DATABASE DROP SUPPLEMENTAL LOG DATA;-- table supplemental logging
-- These columns will be logged every time an UPDATE statement
-- is executed on scott.emp, regardless of whether or not the update affected them
ALTER TABLE scott.emp ADD SUPPLEMENTAL LOG GROUP emp_parttime (empno, ename, deptno) ALWAYS;-- before images of the columns will be logged only if at least one of the columns is updated.
ALTER TABLE scott.emp ADD SUPPLEMENTAL LOG GROUP emp_parttime (empno, ename, deptno);
ALTER TABLE scott.emp DROP SUPPLEMENTAL LOG GROUP emp_parttime;-- to create all logminer objects in to the specified tablespace
EXECUTE DBMS_LOGMNR_D.SET_TABLESPACE('logmnrts$');------------------------
-- to extract dictionary
------------------------
-- extracting to a flat file
-- You could also specify a filename and location without specifying the STORE_IN_FLAT_FILE option. The result would be the same.
-- alter system set UTL_FILE_DIR=
; should be executed first
EXECUTE DBMS_LOGMNR_D.BUILD('dictionary.ora', '/oracle/database/', OPTIONS => DBMS_LOGMNR_D.STORE_IN_FLAT_FILE);
-- to extract dictionary to online redologs
-- database must be in archive mode and archiving should be enabled
EXECUTE DBMS_LOGMNR_D.BUILD (OPTIONS=>DBMS_LOGMNR_D.STORE_IN_REDO_LOGS);
SELECT NAME FROM V$ARCHIVED_LOG WHERE DICTIONARY_BEGIN='YES';
SELECT NAME FROM V$ARCHIVED_LOG WHERE DICTIONARY_END='YES';
-- dont need to extract dictionary
-- with this command logminer uses online dictionary which is in the schema sys
-- The online catalog option is not valid with the DDL_DICT_TRACKING option.
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG);
-- tracking ddl statements
-- applies ddl commands in logminer internal dictionary
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DDL_DICT_TRACKING);
-- With this option set, LogMiner applies any DDL statements seen in the redo logs to its internal dictionary.
-- For example, to see all the DDLs executed by user SYS, you could issue the following query
SELECT USERNAME, SQL_REDO FROM V$LOGMNR_CONTENTS WHERE USERNAME = 'SYS' AND OEPRATION = 'DDL';
-----------------------------
-- filtering redo_log actions
-----------------------------
-- If long-running transactions are present in the redo logs being analyzed,
-- use of this option may cause an "Out of Memory" error.
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.COMMITTED_DATA_ONLY);
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.SKIP_CORRUPTION);
EXECUTE DBMS_LOGMNR.START_LOGMNR( DICTFILENAME => '/oracle/dictionary.ora',
STARTTIME => TO_DATE('01-Jan-1998 08:30:00', 'DD-MON-YYYY HH:MI:SS'),
ENDTIME => TO_DATE('01-Jan-1998 08:45:00', 'DD-MON-YYYY HH:MI:SS'));
EXECUTE DBMS_LOGMNR.START_LOGMNR( DICTFILENAME => '/oracle/dictionary.ora',
STARTSCN => 100,
ENDSCN => 150);
-----------------------------
-- querying v$logmnr_contents
-----------------------------
SELECT OPERATION, SQL_REDO, SQL_UNDO FROM V$LOGMNR_CONTENTS
WHERE SEG_OWNER = 'SCOTT' AND SEG_NAME = 'ORDERS' AND
OPERATION = 'DELETE' AND USERNAME = 'RON';
-- MINE_VALUE example
SELECT SQL_REDO FROM V$LOGMNR_CONTENTS
WHERE
SEG_NAME = 'emp' AND
SEG_OWNER = 'SCOTT' AND
OPERATION = 'UPDATE' AND
DBMS_LOGMNR.MINE_VALUE(REDO_VALUE, 'SCOTT.EMP.SAL') >
2*DBMS_LOGMNR.MINE_VALUE(UNDO_VALUE, 'SCOTT.EMP.SAL');
-- COLUMN_PRESENT example
SELECT
(XIDUSN || '.' || XIDSLT || '.' || XIDSQN) AS XID,
(DBMS_LOGMNR.MINE_VALUE(REDO_VALUE, 'SCOTT.EMP.SAL') -
DBMS_LOGMNR.MINE_VALUE(UNDO_VALUE, 'SCOTT.EMP.SAL')) AS INCR_SAL
FROM V$LOGMNR_CONTENTS
WHERE
DBMS_LOGMNR.COLUMN_PRESENT(REDO_VALUE, 'SCOTT.EMP.SAL') = 1 AND
DBMS_LOGMNR.COLUMN_PRESENT(UNDO_VALUE, 'SCOTT.EMP.SAL') = 1 AND
OPERATION = 'UPDATE';
--------------------
-- a session example
--------------------
ALTER DATABASE ADD SUPPLEMENTAL LOG DATA;
EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log1orc1.ora', OPTIONS => DBMS_LOGMNR.NEW);
EXECUTE DBMS_LOGMNR.ADD_LOGFILE(LOGFILENAME => 'log2orc1.ora', OPTIONS => DBMS_LOGMNR.ADDFILE);
EXECUTE DBMS_LOGMNR.START_LOGMNR(DICTFILENAME =>'/oracle/database/dictionary.ora');
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG + DBMS_LOGMNR.COMMITTED_DATA_ONLY);
EXECUTE DBMS_LOGMNR.START_LOGMNR(OPTIONS => DBMS_LOGMNR.DICT_FROM_REDO_LOGS + DBMS_LOGMNR.COMMITTED_DATA_ONLY);
EXECUTE DBMS_LOGMNR.START_LOGMNR(DICTFILENAME => 'orcldict.ora',
STARTTIME => TO_DATE('01-Jan-1998 08:30:00', 'DD-MON-YYYY HH:MI:SS'),
ENDTIME => TO_DATE('01-Jan-1998 08:45:00', 'DD-MON-YYYY HH:MI:SS'));
/*
--START_LOGMNR options
DBMS_LOGMNR.DICT_FROM_ONLINE_CATALOG
-- If you specify DICT_FROM_REDO_LOGS, LogMiner expects to find a dictionary in the redo logs
-- that you specified with the DBMS_LOGMNR.ADD_LOGFILE procedure before starting logmnr
DBMS_LOGMNR.DICT_FROM_REDO_LOGS
DBMS_LOGMNR.COMMITTED_DATA_ONLY
DBMS_LOGMNR.SKIP_CORRUPTION
DBMS_LOGMNR.DDL_DICT_TRACKING
-- DBMS_LOGMNR.ADD_LOGFILE options
DBMS_LOGMNR.NEW, DBMS_LOGMNR.ADDFILE, AND DBMS_LOGMNR.REMOVEFILE -- See Specify Redo Logs for Analysis
DBMS_LOGMNR.NO_SQL_DELIMITER
DBMS_LOGMNR.PRINT_PRETTY_SQL
DBMS_LOGMNR.CONTINUOUS_MINE
*/
SELECT * FROM v$logmnr_contents;
-- table hit statistics
SELECT SEG_OWNER, SEG_NAME, COUNT(*) AS Hits FROM
V$LOGMNR_CONTENTS WHERE SEG_NAME NOT LIKE '%$' GROUP BY
SEG_OWNER, SEG_NAME;
EXECUTE DBMS_LOGMNR.END_LOGMNR;