Showing posts with label oracle. Show all posts
Showing posts with label oracle. Show all posts

Wednesday, July 20, 2016

get file

function get_File ( p_directory varchar2, p_file_name varchar2) return BLOB
is

 l_bf BFILE;
 l_amount  INTEGER := 32767;
 l_position  INTEGER := 1;
 buffer       RAW(32767);
 l_bb          BLOB;
 l_file_path  varchar2(256);
 begin

   l_bf := bfilename(  p_directory , p_file_name );  
 
   dbms_lob.open(l_bf, dbms_lob.lob_readonly);
   DBMS_LOB.CREATETEMPORARY(l_bb, TRUE, DBMS_LOB.SESSION);
    LOOP
     begin
     dbms_lob.read(l_bf,l_amount, l_position, buffer);
     EXCEPTION
         WHEN NO_DATA_FOUND THEN
            EXIT;
      end;
     dbms_lob.writeappend(l_bb,l_amount,buffer);
     l_position := l_position + l_amount;
    END LOOP;
   
   dbms_lob.close(l_bf);

   return l_bb;
 

end get_File;

store password in korn

log(){
    typeset rundate=$(date +"%F %X")
printf "${rundate} -- $*\n"
printf "${rundate} -- $*\n" >> $screen_log
}

do_sql(){
sql=$*
out=$(sqlplus -s '/ as sysdba'<<-eof p=""> whenever sqlerror exit 1
whenever oserror exit 1
set heading off feedback off serveroutput on trimout on pagesize 0
$sql
EOF
)
ret=$?
printf '%s' "$out"
return $ret
}

store_pwd(){

load_env $destdb

log "checking database open mode..."
sql="select status from v\$instance;"
log "running sql: $sql"
out=$(do_sql "$sql")
ret=$?
[[ $ret -ne 0 ]] && { log "*** failed to query database -- \n$out"; exit 1; }
log "got: ${out}"
typeset local_inst_status=$out
[[ $local_inst_status != "OPEN" ]] && { log "*** $destdb is not OPEN, not able to store passwords; skipping"; return 102; }
sql=$(cat<<-eof p=""> set echo off
set lines 1000
col lines for a1000
select u.name ||','|| u.spare4 ||','|| u.password ||','|| du.default_tablespace ||','|| du.profile||','||u.defrole||','||du.authentication_type||','||'"'||u.ext_username||'"' lines
from user$ u join dba_users du on (u.name=du.username) where u.name in ('SYS','SYSTEM','ORA_NCYCLE') ;
EOF
)

log "querying password hashes..."
out=$(do_sql "$sql")
ret=$?
[[ $ret -ne 0 ]] && { log "*** failed to query database -- \n$out"; return 1; }

touch $pwd_store
chmod 600 $pwd_store
echo "$out" > $pwd_store
return $?

}
restore_pwd(){
# return 102: file hashes are not there->try with cloakware
# return 1: error

load_env $destdb
log "checking dump file at $pwd_store"

[[ ! -e $pwd_store ]] && { log "dump file with hashes does not exist"; return 102; }

cnt=$(grep -E "SYS|SYSTEM|ORA_NCYCLE" $pwd_store | wc -l)
[[ $cnt -ne 3 ]] && { log "dump file does not contain rows for SYS|SYSTEM|ORA_NCYCLE"; return 102;}

while read line 
do
usr=$(echo $line| awk -F, '{print $1}')
hash10g=$(echo $line| awk -F, '{print $3}')
hash11g=$(echo $line| awk -F, '{print $2}')

if [[ ! -z $hash11g ]];then
sql=$(cat<<-eof font="">
set feed on
alter user $usr identified by values '$hash11g;$hash10g';
EOF
)
elif [[ ! -z $hash10g ]];then 
sql=$(cat<<-eof font="">
set feed on
alter user $usr identified by values '$hash10g';
EOF
)
else 
log "password hash not found for user $usr"
return 102
fi

log "setting password for user $usr"
out=$(do_sql "$sql")
ret=$?
if [[ $ret -ne 0 && $out != *ORA-28007* ]]; then
log "*** failed to run sql -- \n$out"
return 1
elif [[ $ret -ne 0 && $out = *ORA-28007* ]]; then
log "password is already set"
else 
log "password set"
fi


done < $pwd_store

rm $pwd_store

return 0

}

run sqlplus in perl

sub mask_pwd {
my $str = shift;
$str =~ s/conn(ect)?\s+(.*?)\/(.*?)([@\s])/connect $2\/(hidden)$4/ig;
$str =~ s/identified\s+by(\s+values)?\s+(.+?)(\s)/identified by$1 (hidden) $3/ig;
return $str;

}

sub run_sql {

my $sql = shift;
my $error_handling = shift;
my $error_mode;
my $exec_sqlfile;

if (defined $error_handling) {

switch ($error_handling) {

case 'IGNORE_SQL_ERROR' {
$error_mode = "WHENEVER OSERROR exit 1; ";
}
case 'IGNORE_OS_ERROR' {
$error_mode = "WHENEVER SQLERROR exit SQL.SQLCODE; ";
}
case 'IGNORE_ALL_ERROR' {
$error_mode = "";
}

}

}
else { # default
$error_mode = "WHENEVER OSERROR exit 1; \nWHENEVER SQLERROR exit SQL.SQLCODE; ";
}


if ( $sql =~ /^\//) {  # starting with '/' -> filename  
$exec_sqlfile = $sql;
}
else {
&xOpen_File_Write( $sqlplusfile );
&xWrite_File( $sql );
&xClose_File;

$exec_sqlfile = $sqlplusfile;
}
 
   
  my $str = qq{ . /cs/oracle/local/dba/bin/basenv.ksh; . /cs/oracle/local/dba/bin/oraenv.ksh $instance;
sqlplus -s \\"/ as sysdba\\" << EOF > $sqlpluslogfile 2>&1
${error_mode}
\@$exec_sqlfile
EOF
  };

$cmd = &xKSH_Call_Out_String( $str );

&xWrite_Log("running:\n$error_mode\n".&mask_pwd($sql) );

my $log            = qx( $cmd );
my $err            = $?>>8;

chomp(my $sqllog         = &xGobble( $sqlpluslogfile ));

if ($err) {
&xWrite_Log( &mask_pwd($sqllog) );
return (-1, $sqllog);
}
else {

if ( $sql !~ /feed off/ ) {
&xWrite_Log("got:\n".&mask_pwd($sqllog));
}

return (0, $sqllog) ;

}

}

dumping system, role and proxy grants for users

set long 10000000
      set longchunksize 1000000
      set pages 0 head off feed off echo off
      set lines 500
      col lines for a500 word_wrapped
      exec dbms_metadata.set_transform_param(dbms_metadata.SESSION_TRANSFORM,'SQLTERMINATOR',TRUE);
      select '-- SYSTEM GRANTS' from dual;
      select dbms_metadata.get_granted_ddl('SYSTEM_GRANT',du.username) as lines from dba_users du where du.username in (${preserve_grant_quoted}) and exists (select grantee from dba_sys_privs where grantee=du.username);
      select '-- ROLE GRANTS' from dual;
      select dbms_metadata.get_granted_ddl('ROLE_GRANT',du.username) as lines from dba_users du where du.username in (${preserve_grant_quoted}) and exists (select grantee from dba_role_privs where grantee=du.username);
      select '-- DEFAULT ROLES' from dual;
      select dbms_metadata.get_granted_ddl('DEFAULT_ROLE',du.username) as lines from dba_users du where du.username in (${preserve_grant_quoted}) ;
      select '-- PROXY GRANTS' from dual;
      select dbms_metadata.get_granted_ddl('PROXY', pu.client) as lines  from proxy_users pu where pu.client in (${preserve_grant_quoted}) ;

How to test a DB-Link as SYS

VARIABLE owner VARCHAR2(30)
VARIABLE link VARCHAR2(128)
set serveroutput on
set define off
set verify off
set feed off
DECLARE 
cur_remote INTEGER;
lv_usr INTEGER;
lv_ignore INTEGER;
lv_db_name VARCHAR2(256);
lv_link_name VARCHAR2(128);

BEGIN

:owner:='$db_link->[0]';
:link:='$db_link->[1]';

FOR x IN ( select dbl.owner, dbl.db_link, dbl.username, dbl.host, du.user_id from 
dba_db_links dbl left join dba_users du on (dbl.owner = du.username)
where dbl.owner=:owner and dbl.db_link=:link
) LOOP

cur_remote := sys.dbms_sys_sql.open_cursor();

IF ( x.owner != 'PUBLIC' ) THEN
dbms_sys_sql.parse_as_user(
cur_remote,
'select property_value db_name
from database_properties\@'||x.db_link||
' where property_name=''GLOBAL_DB_NAME''',
DBMS_SQL.NATIVE,
x.user_id); 
ELSE
dbms_sys_sql.parse(
cur_remote,
'select property_value db_name
from database_properties\@'||x.db_link||
' where property_name=''GLOBAL_DB_NAME''',
DBMS_SQL.NATIVE); 
END IF;
 
dbms_sys_sql.define_column(cur_remote, 1, lv_db_name,256); 

lv_ignore := dbms_sys_sql.execute(cur_remote); 
 
IF dbms_sys_sql.fetch_rows(cur_remote)>0 THEN 
dbms_sys_sql.column_value(cur_remote, 1, lv_db_name); 
dbms_sys_sql.close_cursor(cur_remote); 
END IF;

-- dbms_output.put_line('x.db_link||','||sys_context('userenv','db_name')||','||x.owner||','||regexp_substr(lv_db_name,'[^.]+')||','||x.username);

sys.pk_compliance_sys.log_dblink_compliance (x.db_link, x.owner, regexp_substr(lv_db_name,'[^.]+'),
                             x.username, '', '' );
 
dbms_output.put_line('registered link '||x.db_link||' with taget db: '||regexp_substr(lv_db_name,'[^.]+'));

END LOOP;

EXCEPTION 
WHEN OTHERS THEN 
IF dbms_sys_sql.is_open(cur_remote) THEN 
dbms_sys_sql.CLOSE_CURSOR(cur_remote); 
END IF; 
RAISE;  
END;
/

Tuesday, September 18, 2012

db migrations

checklist for database migrations:

  • check roles and permissions to roles
  • check TEMP
  • check UNDO (on RAC check UNDO for each instance)
  • check SGA
  • check optimizer parameters
  • check all possible accounts (personal and app)

Tuesday, February 21, 2012

Load SQL Baselines

1. Create a SQL Tuning set
BEGIN
dbms_sqltune.create_sqlset(sqlset_name => 'EOD_856CDNTC4XDRG', description =>'Performance issue eod', sqlset_owner =>'SYS');
END;
/

2. Load the SQL into the set
DECLARE sqlset_cur dbms_sqltune.sqlset_cursor; bf VARCHAR2(37);
BEGIN
bf := 'UPPER(SQL_ID) = ''856CDNTC4XDRG'' ';
OPEN sqlset_cur FOR
SELECT VALUE(P) FROM TABLE(
dbms_sqltune.select_workload_repository( '7485', '7653', bf, NULL, NULL, NULL, NULL, 1, NULL, 'ALL')
) P;
dbms_sqltune.load_sqlset(
sqlset_name=>'EOD_856CDNTC4XDRG'
, populate_cursor=>sqlset_cur
, load_option => 'MERGE'
, update_option => 'ACCUMULATE'
, sqlset_owner=>'SYS');
END;
/


3. Check the SQL Tuning set
SELECT * FROM TABLE(DBMS_SQLTUNE.SELECT_SQLSET( 'EOD_856CDNTC4XDRG' ) );

4. Load the BASELINE from SQL Tuning set
set serveroutput on
DECLARE
l_plans_loaded PLS_INTEGER;
BEGIN
l_plans_loaded := DBMS_SPM.load_plans_from_sqlset(
sqlset_name => 'EOD_856CDNTC4XDRG'
,sqlset_owner => 'SYS');
dbms_output.put_line ('loaded '||l_plans_loaded);
END;
/

5. Use EM Server->SQL Plan Control to manage the baselines

Tuesday, December 20, 2011

run data pump out of procedure (DBMS_DATAPUMP)

1. create directory
create directory clone_auto as '';
2. create user
create user dba_clone_user identified by test123;
grant connect, create database link, resource, create view to dba_clone_user;
grant unlimited tablespace to dba_clone_user;
grant exp_full_database to dba_clone_user;
grant read,write on directory clone_auto to dba_clone_user;
grant execute on dbms_flashback to dba_clone_user;
grant create table to dba_clone_user;
grant FLASHBACK ANY TABLE to dba_clone_user;


2. create procedure
CREATE OR REPLACE PROCEDURE dba_clone_user.start_export
IS
   hdl_job        NUMBER;
   l_cur_scn      NUMBER;
   l_job_state    VARCHAR2 (20);
   l_status       SYS.ku$_status1010;
   l_job_status   SYS.ku$_jobstatus1010;
BEGIN
   
   begin
    execute immediate 'drop table  DBA_CLONE_USER.AUTO_EXPORT';
   exception when others then null;
   end;
   
   hdl_job := DBMS_DATAPUMP.OPEN ( operation => 'EXPORT', job_mode => 'SCHEMA', job_name => 'AUTO_EXPORT');
   dbms_datapump.set_parameter(hdl_job, 'FLASHBACK_SCN', dbms_flashback.get_system_change_number);
   dbms_datapump.set_parameter(hdl_job, 'COMPRESSION', 'METADATA_ONLY');
   dbms_datapump.metadata_filter (handle => hdl_job, name => 'SCHEMA_LIST', value => '''USER1''');
   dbms_datapump.set_parallel(hdl_job, 5);
  
   DBMS_DATAPUMP.add_file (handle => hdl_job,
                           filename => 'user1%U.dmp',
                           directory => 'CLONE_AUTO',
                           --filesize   IN VARCHAR2 DEFAULT NULL,
                           filetype => DBMS_DATAPUMP.ku$_file_type_dump_file,
                           reusefile => 1
                          );
                         
   DBMS_DATAPUMP.add_file (handle => hdl_job,
                           filename => 'auto_export.log',
                           DIRECTORY => 'CLONE_AUTO',
                           --filesize   IN VARCHAR2 DEFAULT NULL,
                           filetype => DBMS_DATAPUMP.ku$_file_type_log_file,
                           reusefile => 1
                          );
                         
   DBMS_DATAPUMP.start_job (handle => hdl_job);
  
   DBMS_DATAPUMP.wait_for_job (handle => hdl_job, job_state => l_job_state);
  
   DBMS_OUTPUT.put_line ('Job exited with status:' || l_job_state);
  
END;
/

Thursday, July 7, 2011

transfer stats from one schema to another

exporting from schema ORIG_USER and importing into MYUSER

1. create stattab DBMS_STATS.CREATE_STAT_TABLE(ownname=>'ORIG_USER',stattab=>'STATS_FOR_TEST');
2. create table with list of tables: create table test_tabs(object_name varchar2(200));
insert into test_tabs (select ...)

3. export statsselect 'exec dbms_stats.export_table_stats(ownname=>''ORIG_USER'',tabname=>'''|| object_name ||''',stattab=>''STATS_FOR_TEST'');' cmd from (select distinct object_name from test_tabs) t;
4. delete stats on target schemaselect 'exec dbms_stats.import_table_stats(ownname=>''MYUSER'',tabname=>'''|| object_name ||''',force=>true);' cmd from (select distinct object_name from test_tabs) t;
5. copy table with stats: create table myuser.STATS_FOR_TEST as (select * from orig_user.STATS_FOR_TEST);
6. modify the MYUSER.STATS_FOR_TEST:update stats_for_test set c5='MYUSER';
update stats_for_test set ch1 = ora_hash(ch1) where ch1 is not null; <== to mask histogram values

7. import statsselect 'exec dbms_stats.import_table_stats(ownname=>''MYUSER'',tabname=>'''|| object_name ||''',stattab=>''STATS_FOR_TEST'',force=>true);' cmd from (select distinct object_name from test_tabs) t;

Thursday, March 10, 2011

STATS Locked after import

Metalink: ORA-38029 "Object Statistics Are Locked" - Possible Causes [ID 433240.1]

To prevent import (imp) from locking the table's statistics when importing a table without the rows (rows=n), use statistics=none. To prevent data pump import (impdp) from locking the table's statistics when importing a table without the rows (content=metadata_only), use exclude=(table_statistics,index_statistics).

Friday, November 5, 2010

GRANT OPTION

if a user has the GRANT OPTION he can pass this privilege on to other users through a view, without granting the privilege on the original source view:
SQL> grant select on dba_users to dbatest with grant option;
SQL> conn dbatest
SQL> create view da_users as select * from sys.dba_users;
SQL> grant select on da_users to dbatest2;
SQL> conn dbatest2
SQL> select count(*) from dbatest.da_users;

COUNT(*)
----------
40

SQL> select count(*) from sys.dba_users;
select count(*) from sys.dba_users
*
ERROR at line 1:
ORA-00942: table or view does not exist

Monday, October 25, 2010

DG broker connections

setevent = '16634 trace name context forever, level 1'and restart the Data Guard Broker Process. This will record all Connectstrings used by the Data Guard Broker in the DRC.LOG-Files. So you can see which exact Connect is actually being used by the Data Guard Broker

metalink: [ID 745201.1]

to start and top dgbALTER SYSTEM SET DG_BROKER_START=[TRUE/FALSE];

Wednesday, September 15, 2010

SQL trace file interpretation

http://wiki.oracle.com/page/Structure+of+an+Extended+SQL+Trace+file


XCTEND rlbk=%d rd_only=%d
XCTEND A transaction end marker.

rlbk 1 if a rollback was performed, 0 if no rollback (commit).
rd_only 1 if transaction was read only, 0 if changes occurred.

Monday, June 14, 2010

prefetch

Execution plan looking like this:

---------------------------------------------------------------------------
0 SELECT STATEMENT Optimizer=ALL_ROWS (Cost=322 Card=319 Bytes=11803)
1 0 TABLE ACCESS (BY INDEX ROWID) OF 'TARGET' (Cost=2 Card=1 Bytes=29)
2 1 NESTED LOOPS (Cost=322 Card=319 Bytes=11803)
3 2 TABLE ACCESS (FULL) OF 'DRIVER' (Cost=3 Card=319 Bytes=2552)
4 2 INDEX (RANGE SCAN) OF 'T_PK' (UNIQUE) (Cost=1 Card=1)


is using a feature called 'prefetch'.

More about it:
http://www.oracle.com/technology/products/oracle9i/daily/dec17.html
http://forums.oracle.com/forums/message.jspa?messageID=4236898

Thursday, September 24, 2009

to enable the CBO trace

alter session set tracefile_identifier='_10053';
alter session set events '10053 trace name context forever, level 1';
alter session set events '10053 trace name context off';


http://www.psoug.org/reference/trace_tkprof.html

Tuesday, August 11, 2009

my old topic...

found my old topic on user groups

main point which I forgot and was looking for:

-- notice "<==" that remote v$session.process = local v$process.spid

Monday, July 27, 2009

incomplete recovery

Even though all of the datafiles might have been restored to the same SCN, still some of them might be left in 'fuzzy' state.
select substr(name,1,40),recover, fuzzy, to_char(checkpoint_change#) checkpoint_scn from v$datafile_header;
select file#,STATUS, FUZZY from v$datafile_header;

to ignore those files:
ALTER DATABASE DATAFILE [datafile name] OFFLINE DROP;
or if you need to open, use hidden parameter:
_allow_resetlogs_corruption=TRUE;
this will open the database even though it's inconsistent. the database then must be rebuilt using export/import.

Refs:
435201.1

Monday, March 23, 2009

ORA-01114 and tempfile id

this error indicates
ORA-01114: IO error writing block to file 1023 (block # 455946)
Cause: The device on which the file resides is probably offline. If the file is a temporary file, then it is also possible that the device has run out of space. This could happen because disk space of temporary files is not necessarily allocated at file creation time.

but there's no file with id 1023 in the database.

This is actually a tempfile. Tempfile id is being reckoned from (db_files + dba_temp_files.file_id).
So in this case the value of init.ora parameter "db_files" is 1022 and that makes the first tempfile id = 1023.

fuck all those who say to read documentation. there's supposed to be a metalink note which talks about it, but its hard to find it due to metalinks screwed search engine.
ненавижу бля ебаный оракл.

Thursday, February 19, 2009

investigating block corruption

step 1. Populate the v$database_block_corruption
backup validate check logical database;
step 2. Check whether there are any corrupt blocks:
select count(1) from V$DATABASE_BLOCK_CORRUPTION;
step 3. investigate which objects these blocks belong to
create table block_corr as select file#,block# from V$DATABASE_BLOCK_CORRUPTION where 1=0;
create unique index bcorr_pk on block_corr(file#,block#);
insert into block_corr select file#,block# from V$DATABASE_BLOCK_CORRUPTION order by file#,block#;

step 4. create a table to store faulted segments:
create table corr_objects as
SELECT de.segment_type, de.owner, de.segment_name, de.PARTITION_NAME
FROM dba_extents de, block_corr bc
WHERE de.file_id = bc.file#
and bc.block# between block_id AND block_id + blocks - 1
group by de.segment_type, de.owner, de.segment_name, de.PARTITION_NAME;

set lines 300 pages 999
col segment_name for a30
col PARTITION_NAME for a30
select * from corr_objects;

step 5. segment header block might be corrupt causing dba_extents not returning rows
select ds.owner, ds.segment_name, ds.segment_type, bc.file#, bc.block#
from dba_segments ds, block_corr bc
where ds.header_file=bc.file# and ds.header_block = bc.block#;

step 6. If it doesn't belong to an object, double check if it does exists in dba_free_space to check if the block belongs to file space usage bitmap.
select * from dba_free_space where file_id= &AFN
and &CBN between block_id and block_id + blocks -1;
list all corr blocks displaying whether it belongs to segment or free space
col is_used_block for a4
col is_free_space for a4
col file_name for a60
col file# for 999

select bc.file#, bc.block#
, DECODE(
(select de.block_id from dba_extents de where de.file_id = bc.file# and bc.block# between de.block_id and de.block_id + de.blocks -1 )
,null,'NO','YES') is_used_block
, DECODE(
(select dfs.block_id from dba_free_space dfs where dfs.file_id = bc.file# and bc.block# between dfs.block_id and dfs.block_id + dfs.blocks -1)
,null,'NO','YES') is_free_space
,ddf.file_name
from block_corr bc, dba_data_files ddf
where bc.file# = ddf.file_id
order by bc.file#, bc.block#;

step 7. if the blocks belong to indexes or index partitions, then rebuild them using ONLINE option (EE)

--to rebuild indexes and index partitions:
select 'alter index '||t.owner||'.'||t.segment_name||' REBUILD ' ||decode(t.segment_type,'INDEX PARTITION','PARTITION '||t.partition_name,'') || ' ONLINE;' from (
SELECT de.segment_type, de.owner, de.segment_name, de.partition_name
FROM dba_extents de, block_corr bc
WHERE de.file_id = bc.file#
and bc.block# between block_id AND block_id + blocks - 1
) t where t.segment_type IN ('INDEX','INDEX PARTITION')
group by t.owner,t.segment_type,t.segment_name,t.partition_name
/


step 8. Note 782137.1 If the block belongs to a datafile of a tablespace which has been dropped, then V$DATABASE_BLOCK_CORRUPTION is not going to be cleared by a subsequent rman backup. This is fixed in 11g. For 10g the workaround is:
execute dbms_backup_restore.resetCfileSection(17); /** clear v$backup_corruption
execute dbms_backup_restore.resetCfileSection(18); /**clear v$copy_corruption


relevant metalink notes:
How to Find All the Corrupted Objects in Your Database.
V$Database_block_corruption Shows Entries for Non-existent Datafile
RMAN Does not Report a Corrupt Block if it is not Part of Any Segment

Tuesday, February 3, 2009

DDL locks

No way to specify DDL with wait option. It will always fail with "resource busy" error.
In 11g there's a way to do this, but before have to use brute force

declare
success boolean := false;
begin
while ( not success)
loop
begin
execute immediate 'alter table t1 drop column n5 ';
success := true;
exception
when others then
null;
dbms_lock.sleep(10);
end;
end loop;
end;


ref: