grant select on v_$session to cmru_all;
grant comment any table to cmru_all;
grant lock any table to cmru_all;
grant select any dictionary to cmru_all;
execute dbms_repcat_admin.grant_admin_any_schema('cmru');
windows7
setup.exe -IgnoreSysPreReqs
you must add the following lines to all ...\stage\...\refhost.xml
<!--Microsoft Windows 7-->
<OPERATING_SYSTEM>
<VERSION VALUE="6.1"/>
</OPERATING_SYSTEM>
then the installation will succeed.
Edited by: user13708298 on 07.04.2011 13:51
---------------------------------
replication :
grant select on v_$session to prun_all;
grant comment any table to prun_all;
grant lock any table to prun_all;
grant select any dictionary to prun_all;
execute dbms_repcat_admin.grant_admin_any_schema('prun_all');
expdp system/tslmanager@orcl11g directory=payment_reg dumpfile=TPT_CANE.dmp logfile=TPT_CANEDPE1.log schemas=TPT_CANE
Resource usage :
----------------
select * from v$resource_limit
/
select program,count(*)
from v$session
group by program
order by 2 desc
/
select program,machine, count(*)
from v$session
where machine = 'COURTSFIJI\SRVFIJORACLEAP'
group by program, machine
order by 3 desc
/
select * from v$license
/
select * from v$resource_limit
/
Tablespace & datafiles:
-----------------------
grant select on v_$session to uat_all;
-- creating a table space
create tablespace tsl_data datafile 'E:\app\Administrator\oradata\orcl11g\tsl_data.ora' size 10000 M ;
create tablespace image_data datafile 'E:\app\Administrator\oradata\orcl11g\image_data.ora' size 10000 M ;
create tablespace cane_data datafile 'E:\app\Administrator\oradata\orcl11g\cane_data.ora' size 10000 M ;
-- creating a temporary table spaces
create tablespace temp_data datafile 'E:\oracledb\oradata\orclias\temp_data.ora' size 500 M temporary ;
CREATE TEMPORARY TABLESPACE temp_data TEMPFILE 'E:\app\Administrator\oradata\orcl11g\temp_data.ora' SIZE 5000 M;
--------- DROP TABLESPACE -------
DROP TABLESPACE TSL_DATA INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS;
-- Dropping user
drop user <username> cascade;
-- creating a user in required table space and temporary table space and assigning quota
create user <username> identified by <password>
default tablespace <tsname>
temporary tablespace <temp_tsname>
quota unlimited on <tsname>
;
create user ch_mail identified by ch_mail
default tablespace courts_data
temporary tablespace temp_data
quota unlimited on courts_data
;
-- Grant limited rights before importing
grant connect,create table,create synonym,create view,
create sequence,create procedure, create trigger,create any Index,
create any materialized view
to ch_hrm,pay
to <username1>,<username2>;
grant all privileges to ch_all,ch_adm,Ch_com,ch_pos,ch_fa,Ch_purinv,ch_pay,Ch_hrm,ch_sr
-- Granting all rights after importing
grant all privileges to <username> ;
-- To see available users and tablespaces info
select username,default_tablespace,temporary_tablespace
from dba_users;
-- To see available tablespaces with users
select TABLESPACE_NAME,USERNAME
from DBA_TS_QUOTAS
WHERE username ='CH_SR'
/
-- to see which data file(including path) is being used for tablespace
select TABLESPACE_NAME,substr(file_name,1,50) data_file,sum(bytes)/1024/1024 size_in_MB
from DBA_DATA_FILES
where tablespace_name like 'TSL%'
group by TABLESPACE_NAME,file_name
-- to see free tablespace information
-----------NUMBER OF TABLESPACES FOR USER ----------
select distinct tablespace_name from dba_segments
where owner = 'TPT_COM'
/
select tablespace_name,sum(bytes)/1024/1024 free_in_MB
from dba_free_space
--where tablespace_name like 'UAT%'
group by tablespace_name
/
Insert into irmig_all.subgroup_master (comp_code, main_code, cat_code, sgrp_code, sgrp_desc, crtd_by, crtd_dt)
SELECT distinct '10', '10', catg_code, sub_catg_code, upper(sub_catg_desc), 'SATHGURU', SYSDATE
FROM apps_prod_categories a
Where category_description is not null
/
Insert into irmig_all.subsubgrp_mst (comp_code, main_code, cat_code, sgrp_code, ssgrp_code, ssgrp_desc, crtd_by, crtd_dt)
SELECT distinct '10', '10', catg_code, sub_catg_code, sub_sub_catg_code, upper(sub_sub_catg_desc), 'SATHGURU', SYSDATE
FROM apps_prod_categories a
Where category_description is not null
/
--adding datafile to existing tablespace
alter tablespace <tablespace_name> add datafile 'd:\ORAWIN95\DATABASE\<datafile_name.ora>' size 100 M
alter tablespace tsl_data add datafile 'E:\APP\ADMINISTRATOR\ORADATA\DEV2ORCL11G\TSL_DATA1.ora' size 5000 M
-- to extend the size of datafile
alter database datafile 'D:\ORAWIN95\DATABASE\<datafile_name.ora>' resize 200 M;
---------- If DB password is expired --------------
select profile from DBA_USERS where username = <username>
Then you can change the limit to never expire using:
alter profile default limit password_life_time UNLIMITED;
If you want to previously check the limit you may use:
select resource_name,limit from dba_profiles where profile='default';
export
------
EXP <Connect String> FILE = <tEST.dmp> OWNER = <source_user> log=<log_file_name>
select 'exp system/smcmanager file='||username||' owner='||username||' log='||username||'e;'
from all_users
where user_id>60
/
Import :
---------
IMP <CONNECT STRING> FILE=<FILE_NAME> FROM_USER=<FROM_USER> TO_USER=<to_user> log=<log_file_name>
Roll back Segments:
------------------
step 1
create tablespace rbsbig datafile 'D:\ORAWIN95\DATABASE\rbsbig.ora' size 500 M
Step 2
Create rollback segment rbsbig
TABLESPACE rbsbig
STORAGE (
INITIAL 500k
NEXT 500k
MAXEXTENTS 2000);
before executing Large DML operation from sql prompt give the following commands
alter rollback segment rbsbig online;
set transaction use rollback segment rbsbig
Creating Database Link:
-----------------------
-- creating database link
CREATE DATABASE LINK satlink
CONNECT TO fa IDENTIFIED BY fa
USING 'sat';
-- querying data using database link
select * from config_mst@satlink;
CREATE Public Database Link courtsorcl
Connect To courts_all Identified By courts_all
Using 'ora10g64';
CREATE DATABASE LINK courtsorcl
CONNECT TO ch_all IDENTIFIED BY smcnv
USING 'courtsorcl';
Killing a Session:
------------------
-- finding locked sessions
select
c.owner,
c.object_name,
c.object_type,
b.sid,
b.serial#,
b.status,
b.osuser,
b.machine
from v$locked_object a , v$session b,dba_objects c
where b.sid = a.session_id
and a.object_id = c.object_id
/
CH_SR SR_SERVICEREQUEST_HDR TABLE 434 8389 INACTIVE SYSTEM COURTSFIJI\SRVFIJORACLEAPP
CH_SR SR_SERVICEREQUEST_HDR TABLE 196 3756 INACTIVE SYSTEM COURTSFIJI\SRVFIJORACLEAPP
CH_SR SR_FAULT_DIAGNOSIS_DTL TABLE 246 22888 INACTIVE SYSTEM COURTSFIJI\SRVFIJORACLEAPP
CH_SR SR_FAULT_DIAGNOSIS_DTL TABLE 200 5567 INACTIVE SYSTEM COURTSFIJI\SRVFIJORACLEAPP
CH_HRM BP505_LEVENTRY TABLE 320 9672 INACTIVE SYSTEM COURTSFIJI\SRVFIJORACLEAPP
Select * from v$session ;
Output :
--------
SADDR SID SERIAL# AUDSID
-------- ---------- ---------- ----------
027435DC 12 63 6818
02743E48 13 23 6819
Alter system kill session 'SID,SERIAL#';
E.g.,
Alter system kill session '12,63';
Global names :
select value from V$parameter where name=lower('global_names');
alter session set global_names = false;
alter database rename global_name to gdsprod.world;
SELECT PROGRAM,COUNT(*) FROM V$SESSION
GROUP BY PROGRAM
ORDER BY 2 DESC
declare
cursor c is
select tname from tab where tabtype='TABLE';
begin
FOR I IN C LOOP
execute immediate 'analyze table '||I.TNAME||' compute STATISTICS';
END LOOP;
end;
/
declare
cursor c is
select index_name from user_indexes;
begin
FOR I IN C LOOP
execute immediate 'analyze index '||I.index_name||' compute STATISTICS';
END LOOP;
end;
/
----------
copy from scott/tiger@odbc:test to scott/tiger@erpp create cost using select * from cost.dbf;
begin
exception
when no_data_found then
disp.errmsg('');
when too_many_rows then
disp.errmsg('');
when others then
disp.errmsg(sqlerrm);
end;
--------
--
-- increasing buffer for messages
set serveroutput on size 1000000;
-- Compiling objects
exec dbms_utility.compile_schema('CH_FA');
exec dbms_utility.compile_schema('CH_PURINV');
exec dbms_utility.compile_schema('CH_COM');
exec dbms_utility.compile_schema('CH_ADM');
exec dbms_utility.compile_schema('CH_PAY');
exec dbms_utility.compile_schema('CH_HRM');
exec dbms_utility.compile_schema('CH_SR');
exec dbms_utility.compile_schema('COURTS_POS');
exec dbms_utility.compile_schema('COURTS_ALL');
exec dbms_utility.compile_schema('SVSMFA');
EXEC DBMS_STATS.GATHER_SCHEMA_STATS('CH_FA');
exec dbms_stats.gather_table_stats('SCOTT', 'EMPLOYEES');
exec dbms_stats.gather_index_stats('CH_POS', 'IDX_INVDTL_DELVGEN');
or
SELECT 'ALTER ' || object_type || ' ' || object_name || ' COMPILE;'
FROM USER_OBJECTS
WHERE STATUS = 'INVALID';
-- unlocking the object statistics
exec DBMS_STATS.UNLOCK_TABLE_STATS('CH_FA','FA_AGEDTLS_TRN');
SELECT 'exec DBMS_STATS.UNLOCK_TABLE_STATS(''CH_FA'','''||TNAME||''''||'); '
FROM TAB
WHERE TABTYPE='TABLE'
---------
You can use this script to create the same job:
CREATE OR REPLACE PROCEDURE ANALIZESCHEMA
IS
BEGIN
DBMS_STATS.GATHER_SCHEMA_STATS('SCHEMA');
END;
And than you can schedule you job like this:
DECLARE jobno number;
BEGIN
DBMS_JOB.SUBMIT(
job => jobno,
what => 'bio_posting(trunc(sysdate));',
next_date => SYSDATE+(1/24),
interval => '/*1:Days*/ sysdate + (1/24)');
commit;
dbms_output.put_line(jobno);
END;
/
-- connect to SYS Or SYSTEM user grant privileges to CH_ALL for Java.
BEGIN
dbms_java.grant_permission('DVG_LIVE_ALL','java.net.SocketPermission','ssd-desk66','resolve');
--for specific directory , for this you need to create directory and grant read, write previleges to users i.e. CH_ALL.
-- DBMS_JAVA.grant_permission ('CH_ALL', 'java.io.FilePermission', 'C:\C_TEMP', 'read ,write, execute, delete');
--for all file
DBMS_JAVA.grant_permission ('DVG_LIVE_ALL', 'java.io.FilePermission','<<ALL FILES>>' ,'read ,write, execute, delete');
DBMS_JAVA.grant_permission ('DVG_LIVE_ALL', 'SYS:java.lang.RuntimePermission', 'writeFileDescriptor', '*');
DBMS_JAVA.grant_permission ('DVG_LIVE_ALL', 'SYS:java.lang.RuntimePermission', 'readFileDescriptor', '*');
commit;
END;
/
DBMS_STATS.GATHER_SCHEMA_STATS('');
EXEC dbms_stats.gather_schema_stats(’SCOTT’, cascade=>TRUE);
exec dbms_utility.compile_schema('CH_FA');
exec dbms_utility.compile_schema('CH_PURINV');
exec dbms_utility.compile_schema('CH_COM');
exec dbms_utility.compile_schema('CH_ADM');
exec dbms_utility.compile_schema('CH_PAY');
exec dbms_utility.compile_schema('CH_HRM');
exec dbms_utility.compile_schema('CH_SR');
exec dbms_utility.compile_schema('CH_COSACS');
exec dbms_utility.compile_schema('CH_POS');
exec dbms_utility.compile_schema('CH_ALL');
exec dbms_utility.compile_schema('VH_FA');
exec dbms_utility.compile_schema('VH_COM');
exec dbms_utility.compile_schema('VH_ADM');
exec dbms_utility.compile_schema('VM_ADM');
exec dbms_utility.compile_schema('VM_COM');
exec dbms_utility.compile_schema('VM_FA');
exec dbms_utility.compile_schema('VM_ALL');
exec dbms_utility.compile_schema('VH_ALL');
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_PURINV', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_FA', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_COM', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_ADM', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_PAY', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_HRM', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_SR', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('COURTS_POS', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('COURTS_ALL', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_COSACS', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('COSACS_MIG', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_POS', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('CH_ALL', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('DINT_FA', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('DINT_COM', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('DINT_ADM', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('DINT_POS', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('DINT_HRM', cascade=>TRUE);
exec DBMS_STATS.GATHER_SCHEMA_STATS('DINT_ALL', cascade=>TRUE);
exec dbms_stats.gather_schema_stats(ownname => 'CH_FA',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_PURINV',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_COM',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_ADM',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_PAY',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_HRM',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_SR',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
exec dbms_stats.gather_schema_stats(ownname => 'CH_ALL',estimate_percent => DBMS_STATS.AUTO_SAMPLE_SIZE,method_opt => 'FOR ALL COLUMNS SIZE auto',cascade => true);
-- to see resourece used statistics
select count(*) from v$regstat;
select count(*) from v$session;
select * from v$license;
show parameter open_cursors;
show sga
select name, type, value from v$parameter where name like lower('%MAX%').
ALTER SESSION SET parameter_name = value
/
-- windows firewall setting to allow connection to database
Open registry editior
Go to \\HKEY_LOCAL_MACHINE\SOFTWARE\ORACLE\HOME
Add USE_SHARED_SOCKET to your registry (Right mouse/New/String value)
Give Value TRUE
Restart the Server
-- used this to reduce the process memory assigned by default to each session/process of 1 mb to 500 Kb
-- before shutdown all the oracle services and run in the bin folder of the database.
orastack oracle.exe 500000
orastack tnslsnr.exe 500000
orastack svrmgrl.exe 500000
orastack sqlplus.exe 500000
/
--- increasing desktop heapsize
Modify the appropriate variable in the registry. This should not be done
unless:
- A backup has been made of the registry. Any error in modification of the
registry can render the system unusable.
- Solution 1 has been tested and has increased the number of processes
that will run successfully.
The following information applies to Windows NT, Windows 2000.
From the HKEY_LOCAL_MACHINE sub tree, go to the following key:
\System\CurrentControlSet\Control\Session Manager\SubSystems
Select the Windows value.From the Edit menu, choose String. Increase the
SharedSection parameter. SharedSection specifies the system and desktop
heaps using the followin g format:
SharedSection=xxxx,yyyy,zzzz
The default values are 1024,3072,512
All the values are in kilobytes (KB).
xxxx = System-wide Heapsize. There is no need to modify this value and
it is ignored for the rest of this discussion.
yyyy = IO Desktop Heapsize. This is the heap for memory objects in the
IO Desktop.
zzzz = Non-IO Destop Heapsize. This is the heap for memory objects in
the Non-IO Desktop.
If you change these values, you must reboot the system.
-- altering sort_area_size
alter session set sort_area_size = 1048576000
-- QUERYING V$SESSION
SELECT SID,SERIAL#,USERNAME,STATUS,SCHEMANAME,PROCESS,MACHINE,TERMINAL,PROGRAM,LOGON_TIME
FROM V$SESSION
--WHERE PROGRAM LIKE 'ORACLE%'
WHERE UPPER(PROGRAM) LIKE 'FRMWEB%'
order by sid
SELECT PROGRAM,COUNT(*) FROM V$SESSION
GROUP BY PROGRAM
ORDER BY 2 DESC
SELECT A.SID, A.USERNAME, B.XIDUSN, B.USED_UREC, B.USED_UBLK
FROM V$SESSION A, V$TRANSACTION B
WHERE A.SADDR=B.SES_ADDR
-- ora-04030 error
-----------------------
--- PGA
------------------------
-- Is there an oracle limit set for pga max memory per session
select
sum(value)/1024/1024 Mb
from
v$sesstat s, v$statname n
where
n.STATISTIC# = s.STATISTIC# and
name = 'session pga memory';
-- The following query can be used to find the total amount of memory allocated to the PGA areas of all sessions
-- to see session page memory in use
select sum(bytes)/1024/1024 Mb from
(select bytes from v$sgastat
union
select value bytes from
v$sesstat s,
v$statname n
where
n.STATISTIC# = s.STATISTIC# and
n.name = 'session pga memory'
);
-- Which process is requesting too much memory?
col name format a30
select sid,name,value/1024/1024
from v$statname n,v$sesstat s
where n.STATISTIC# = s.STATISTIC# and
name like 'session%memory%'
order by 3 asc;
SELECT SID,SUM(VAL) VALINMB FROM (
select sid,name,value/1024/1024 VAL
from v$statname n,v$sesstat s
where n.STATISTIC# = s.STATISTIC# and
name like 'session%memory%')
GROUP BY SID
order by 2 asc
/
-----------
alter system flush shared_pool;
dbms_session.free_unused_user_memory;
--- Oracle buffer Cache Hit Ratio - Oracle memory tuning
-- Calculate buffer cache hit ratio in the database. Make sure it is more than 80 for an oltp environment and 99 is the best value.
column "logical_reads" format 99,999,999,999
column "phys_reads" format 999,999,999
column "phy_writes" format 999,999,999
select a.value + b.value "logical_reads",
c.value "phys_reads",
round(100 * ((a.value+b.value)-c.value) /
(a.value+b.value))
"BUFFER HIT RATIO"
from v$sysstat a, v$sysstat b, v$sysstat c
where
a.statistic# = 38
and
b.statistic# = 39
and
c.statistic# = 40;
-- 6.0 Check Session Level Hit Ratio - Oracle tuning
-- The Hit Ratio should be higher than 90%
select Username,
OSUSER,
Consistent_Gets,
Block_Gets,
Physical_Reads,
100*( Consistent_Gets + Block_Gets - Physical_Reads)/
( Consistent_Gets + Block_Gets ) "Hit Ratio %"
from V$SESSION,V$SESS_IO
where V$SESSION.SID = V$SESS_IO.SID
and ( Consistent_Gets + Block_Gets )>0
and username is not null
order by Username,"Hit Ratio %";
set timing on
exec run_fifo_proc(to_date('27-07-2009','dd-mm-yyyy'));
---
DIRECT_HANDOFF_TTC_LISTENER=on/off
set parameter to DIRECT_HANDOFF_TTC_LISTENER=OFF in listener.ora
TNS-12518: TNS:listener could not hand off client connection
-- free sga
select POOL, round(bytes/1024/1024,0) FREE_MB
from v$sgastat
where name like '%free memory%';
--- UTL MAIL
----------------------------------------------------------------------------
-- utl_mail setup and configuration
----------------------------------------------------------------------------
Steps to enable Mailing from Database
1. sqlplus ‘/ as sysdba’
2. @$ORACLE_HOME/rdbms/admin/utlmail.sql
3. @$ORACLE_HOME/rdbms/admin/prvtmail.plb
4. Set smtp_server information in init.ora or spfile.ora
alter system set smtp_out_server = ‘SMTP_SERVER_IP_ADDRESS:SMTP_PORT’ scope=both;
25 = Default SMTP Port
If instance had been started with spfile
eg: alter system set smtp_out_server = ‘172.25.90.165:25' scope=both;
If instance had been started with pfile
alter system set smtp_out_server = ‘172.25.90.165:25';
Also make below entry in your initSID.ora
smtp_out_server = ‘172.25.90.165:25'
Thats It, your database is configured to send emails ….
How to send an email
1. sqlplus ‘/ as sysdba’
2. exec utl_mail.send((sender => ‘oraclepitstop@wordpress.com’, recipients => ‘oraclepitstop@wordpress.com’, subject => ‘Testing UTL_MAIL Option’, message => ‘blah blah blah’);
3. Check the inbox of the email id, to verify the email receipt.
To enable other DB users to use this functionality, grant execute permission on UTL_MAIL package.
eg: grant execute on utl_mail to apps;
begin
utl_mail.send(
sender => 'sathguru@courts.com.fj',
recipients => 'pavanic@sathguru.com',
subject => 'Testing UTL_MAIL Option',
message => 'Testing UTL_MAIL Option from courts database');
end;
UTL_MAIL.SEND (
sender IN VARCHAR2 CHARACTER SET ANY_CS,
recipients IN VARCHAR2 CHARACTER SET ANY_CS,
cc IN VARCHAR2 CHARACTER SET ANY_CS DEFAULT NULL,
bcc IN VARCHAR2 CHARACTER SET ANY_CS DEFAULT NULL,
subject IN VARCHAR2 CHARACTER SET ANY_CS DEFAULT NULL,
message IN VARCHAR2 CHARACTER SET ANY_CS,
mime_type IN VARCHAR2 DEFAULT 'text/plain; charset=us-ascii',
priority IN PLS_INTEGER DEFAULT NULL);
Here, then, is the “can we please pretend we’re running in 10g again” fix. It blows the whole ACL idea out of the water by simply creating an ACL that says everyone can access every networking service, no questions asked. Run it as SYS:
begin
dbms_network_acl_admin.create_acl (
acl => 'networkacl.xml',
description => 'Allow Network Connectivity',
principal => 'PUBLIC',
is_grant => TRUE,
privilege => 'connect',
start_date => SYSTIMESTAMP,
end_date => NULL);
dbms_network_acl_admin.assign_acl (
acl => 'networkacl.xml',
host => '*',
lower_port => NULL,
upper_port => NULL);
commit;
end;
The first bit of code simply creates an ACL and grants rights to it to PUBLIC. The second bit says that the ACL just created doesn’t restrict hosts or port ranges (which effectively means it’s restricting stuff-all). Your old 10g code will now run just fine in 11g once more.
-- creating job
DECLARE
jobno number;
BEGIN
DBMS_JOB.SUBMIT(job => jobno,
what => 'CH_FA.VOUCHER_AUTO_REVERSAL;',
next_date => to_date('24-Jul-2009 09:29:00 PM','dd-Mon-yyyy HH:MI:SS AM'),
interval => '/*1:Hr*/ sysdate + 1/24');
END;
/
select project_id,max(id) from cm_function_master
where project_id=6 and substr(id,1,2)<>'26'
group by project_id
/
----
--- creating additional reporting server
-- e
-- Open command prompt:
Goto opmn folder and execute below command (E:\oracle\FrHome_1\opmn\bin)
C:\>rwserver server=ssderp_ir_repserver start
-- Shut down the opmn if it prompts a popup
-- shut down all
C:\>%OMH%\opmn\bin\opmnctl stopall
-- Add new server target to opmn.xlm
C:\>%OMH%\bin\addNewServerTarget.bat ssderp_ir_repserver
--The DOS window will close automatically.
--Open command prompt again:
Update the config with new settings
C:\>%OMH%\dcm\bin\dcmctl.bat updateconfig -ct opmn -v -d
C:\>%OMH%\ dcm\bin\dcmctl.bat resyncinstance -v -d
Start the opmn
C:\>%OMH%\opmn\bin\opmnctl startall
INDEXES CREATING, MONITORING & REBUILDING
=========================================
-- Creating and dropping indexes.
CREATE INDEX ix_emp_01
ON emp (deptno)
TABLESPACE index_tbs;
DROP INDEX index_name;
-- Make a index unusable
alter index PK_BF102_BUSUNIT_MST unusable;
-- see the status of the Index
select status from user_indexes
where index_name ='PK_BF102_BUSUNIT_MST';
-- To make the index usable
ALTER INDEX PK_BF102_BUSUNIT_MST REBUILD online TABLESPACE courts_data compute statistics;
select
'ALTER INDEX '||index_name||' REBUILD online ;'
from user_indexes
/
-- to make index invisible and visible
ALTER INDEX index INVISIBLE;
ALTER INDEX index VISIBLE;
SELECT INDEX_NAME, VISIBILITY FROM USER_INDEXES
WHERE INDEX_NAME = 'IND1';
-- To rename an index, issue this statement:
ALTER INDEX index_name RENAME TO new_name;
Monitoring Index Usage:
-----------------------
-- To start monitoring the usage of an index
ALTER INDEX index MONITORING USAGE;
-- stop the monitoring:
ALTER INDEX index NOMONITORING USAGE;
-- to view the usage
select * from V$OBJECT_USAGE
--- this can only be used on function based indexes -- need to check.
alter index emp_idx disable
MOVING DATAFILES TO A DIFFERENT LOCATION
========================================
For system,undo,temp tablespaces
--------------------------------
- Connect as SYS DBA with
CONNECT / AS SYSDBA command.
- Shutdown the database instance with
SHUTDOWN immediate
- Rename or/and move the datafiles at operating system level.
- Start Oracle database in mount state with
STARTUP MOUNT
- Modify the name or location of datafiles in Oracle data dictionary using following command syntax:
ALTER DATABASE RENAME FILE 'F:\oracle\product\10.2.0\oradata\orcl\UNDOTBS2.ORA' TO 'E:\oracle\oradata\orcl\UNDOTBS2.ORA';
ALTER DATABASE RENAME FILE 'F:\oracle\product\10.2.0\oradata\orcl\TEMP_DATA.ORA' TO 'E:\oracle\oradata\orcl\TEMP_DATA.ORA';
- Open Oracle database instance completely with
ALTER DATABASE OPEN
For Non system tablespaces :
----------------------------
- Connect as SYS DBA with CONNECT / AS SYSDBA command.
- Make offline the affected tablespace with
ALTER TABLESPACE SMC_DATA OFFLINE;
- Modify the name or location of datafiles in Oracle data dictionary using following command syntax:
ALTER DATABASE RENAME FILE 'F:\oracle\product\10.2.0\oradata\orcl\SMC_DATA.ORA' TO 'E:\oracle\oradata\orcl\SMC_DATA.ORA';
Bring the tablespace online again with
ALTER TABLESPACE SMC_DATA ONLINE;
SHRINKING UNDO TABLESPACE :
==========================
- Create a undo tabespace with required size
CREATE UNDO TABLESPACE "UNDOTBS2" DATAFILE 'F:\oracle\product\10.2.0\oradata\orcl\UNDOTBS2.ORA' SIZE 100M
AUTOEXTEND ON NEXT 10240K MAXSIZE 32767M ;
- alter the system as sys user pointing to newly created tablespace
alter system set undo_tablespace = UNDOTBS2 scope=both;
- Now drop the old tablespace including the datafiles
drop tablespace "UNDOTBS1" INCLUDING CONTENTS AND DATAFILES ;
ORACLE RECYCLEBIN :
===================
- To see the contents of recyclebin
show recyclebin
- to clear recyclebin
PURGE RECYCLEBIN;
-
DATAPUMP :
----------
Enter User:/ as sysdba
create directory dump_dir as 'D:\Db_backup';
grant read,write on directory integration_logs to uat_all;
select * from dba_directories
/
EXPDP SYSTEM/SMCMANAGER@ORA10G DIRECTORY=DPUMP_DIR DUMPFILE=CH_DUMP LOGfILE=CH_DUMP SCHEMAS=CH_POS,CH_ADM,CH_COM
impdp scott/tiger@db10g tables=EMP,DEPT directory=TEST_DIR dumpfile=EMP_DEPT.dmp logfile=impdpEMP_DEPT.log
HRAO DATA PUMP :
----------------
sqlplus system/smcmanger@orcl
CREATE DIRECTORY nano AS 'e:\basix_cif\backup'; (Create manually)
GRANT READ,WRITE ON DIRECTORY nano TO nano_ops;
grant create any directory to system;
Data pump export command
-------------------------
C:\> expdp nano_ops/nano_ops@orcl directory = nano tables=nano_ops.bo501_regis_hdr
C:\> expdp nano_ops/nano_ops@orcl directory = nano dumpfile=opsnano.dmp logfile=opsnano.log schemas=nano_ops
C:\> expdp nano_ops/nano_ops@orcl directory = nano dumpfile=opsnano.dmp logfile=opsnano.log content=metadata_only schemas=nano_ops
-- without rows from single tables--
C:\> expdp nano_ops/nano_ops@orcl directory = nano dumpfile=opsnano.dmp logfile=opsnano.log schemas=nano_ops query = bo501_regis_hdr:'"where 2=1"'
-- without rows from all tables--
C:\> expdp nano_ops/nano_ops@orcl directory = nano dumpfile=opsnano.dmp logfile=opsnano.log schemas=nano_ops query = 'where 2=1'
To Export Full Database, give the following command
---------------------------------------------------
$expdp scott/tiger FULL=y DIRECTORY=data_pump_dir DUMPFILE=full.dmp
LOGFILE=myfullexp.log JOB_NAME=myfullJob
The above command will export the full database and it will create the dump file full.dmp in the directory on the server /u01/oracle/my_dump_dir
In some cases where the Database is in Terabytes the above command will not feasible since the dump file size will be larger than the operating system limit, and hence export will fail. In this situation you can create multiple dump files by typing the following command
$expdp scott/tiger FULL=y DIRECTORY=data_pump_dir DUMPFILE=full%U.dmp
FILESIZE=5G LOGFILE=myfullexp.log JOB_NAME=myfullJob
This will create multiple dump files named full01.dmp, full02.dmp, full03.dmp and so on. The FILESIZE parameter specifies how much larger the dump file should be.
Example of Exporting a Schema
------------------------------
To export all the objects of SCOTT’S schema you can run the following export data pump command.
$expdp scott/tiger DIRECTORY=data_pump_dir DUMPFILE=scott_schema.dmp
SCHEMAS=SCOTT
You can omit SCHEMAS since the default mode of Data Pump export is SCHEMAS only.
If you want to export objects of multiple schemas you can specify the following command
$expdp scott/tiger DIRECTORY=data_pump_dir DUMPFILE=scott_schema.dmp
SCHEMAS=SCOTT,HR,ALI
Exporting Individual Tables using Data Pump Export
--------------------------------------------------
You can use Data Pump Export utility to export individual tables. The following example shows the syntax to export tables
$expdp hr/hr DIRECTORY=dpump_dir1 DUMPFILE=tables.dmp
TABLES=employees,jobs,departments
Excluding and Including Objects during Export
---------------------------------------------
You can exclude objects while performing a export by using EXCLUDE option of Data Pump utility. For example you are exporting a schema and don’t want to export tables whose name starts with “A” then you can type the following command
$expdp scott/tiger DIRECTORY=data_pump_dir DUMPFILE=scott_schema.dmp
SCHEMAS=SCOTT EXCLUDE=TABLE:”like ‘A%’”
Then all tables in Scott’s Schema whose name starts with “A “ will not be exported.
Similarly you can also INCLUDE option to only export certain objects like this
$expdp scott/tiger DIRECTORY=data_pump_dir DUMPFILE=scott_schema.dmp
SCHEMAS=SCOTT INCLUDE=TABLE:”like ‘A%’”
This is opposite of EXCLUDE option i.e. it will export only those tables of Scott’s schema whose name starts with “A”
Similarly you can also exclude INDEXES, CONSTRAINTS, GRANTS, USER, SCHEMA
Using Query to Filter Rows during Export
You can use QUERY option to export only required rows. For Example, the following will export only those rows of employees tables whose salary is above 10000 and whose dept id is 10.
expdp hr/hr QUERY=emp:'"WHERE dept_id > 10 AND sal > 10000"'
NOLOGFILE=y DIRECTORY=dpump_dir1 DUMPFILE=exp1.dmp
Using Query to Filter Rows during Export
----------------------------------------
You can use QUERY option to export only required rows. For Example, the following will export only those rows of employees tables whose salary is above 10000 and whose dept id is 10.
expdp hr/hr QUERY=emp:'"WHERE dept_id > 10 AND sal > 10000"'
NOLOGFILE=y DIRECTORY=dpump_dir1 DUMPFILE=exp1.dmp
Data pump import command
-------------------------
-- nano_ops -> source data which is related to export user
-- Nano -> Target data user which is new user
C:\> impdp system/smcmanager@orcl directory=test_imp dumpfile=opsnano.dmp logfile=opsnano1.log remap_schema=nano_ops:nano
-- reallocation of tablespaces (If import the blank dump without using 'transform' the new user
will occupy the table space as source user table space, hence we can avoid the more tablespaces
with the below command.)
C:\> impdp system/smcmanager@orcl directory=test_imp dumpfile=opsnano.dmp logfile=opsnano1.log remap_schema=nano_ops:nano transform=segment_attributes:N
==========
ALTER DATABASE BACKUP CONTROLFILE TO TRACE;
Create pfile from spfile;
MERGE Statement to Perform an Update/Insert
===========================================
MERGE INTO target t
USING source s
ON (t.product_id=s.product_id)
WHEN MATCHED THEN UPDATE SET
t.price=s.price,
t.discount=s.discount
WHEN NOT MATCHED THEN INSERT
(product_id, product_code, product_desc,
product_price, product_discount)
VALUES
(s.product_id, s.product_code, s.product_desc,
s.product_price, s.product_discount);
External tables :
=================
CREATE DIRECTORY ext_data_dir AS 'c:\oradata\ext_data';
GRANT READ, WRITE ON DIRECTORY ext_data_dir TO scott;
--Creating an External Table
CREATE TABLE sales_ext(
product_id NUMBER(6),
sale_date DATE,
store_id NUMBER(8),
quantity_sold NUMBER(8),
unit_cost NUMBER(10,2),
unit_price NUMBER(10,2))
ORGANIZATION EXTERNAL (
TYPE ORACLE LOADER
DEFAULT DIRECTORY ext_data_dir
ACCESS PARAMETERS
(RECORDS DELIMITED BY NEWLINE
BADFILE log_file_dir:'sales.bad_xt'
LOGFILE log_file_dir:'sales.log_xt'
FIELDS TERMINATED BY "|" LDRTRIM
MISSING FIELD VALUES ARE NULL)
LOCATION ('sales.data'))
REJECT LIMIT UNLIMITED;
JOBS :
======
select
JOB,
LOG_USER,
SCHEMA_USER,
LAST_DATE,
THIS_DATE,
NEXT_DATE,
TOTAL_TIME,
BROKEN,
FAILURES,
INTERVAL,
WHAT
from user_jobs
/
Pfile & Spfile
===================================
-- file location oracle_home\database or DBS folder for unix
create pfile from spfile;
create spfile from pfile ;
startup pfile=c:\initORCL.ora
==============
cint temp table deletion
delete from bmsfcint
where BATCHNO=2637
/
delete from temp_bmsfcint;
==================
--CREATE SYNONYMS
SELECT 'CREATE SYNONYM '||TNAME||' FOR '||USER||'.'||TNAME||';'
FROM TAB
/
-- CREATE GRANTS
SELECT 'GRANT SELECT ON '||TNAME||' TO SANJESH ;'
FROM TAB
/
===================
Configuring smtp server ip
First of all we need to configure Reports to use your SMTP server to send emails. The SMTP server address is held in a configuration file called rwbuilder.conf. Follow the steps below:
1.Find this file (usually in /reports/conf or search for it using Windows search)..
2.Open it for editing in your favourite editor.
3.Find an entry like 192.168.100.100 (Please note that the IP address in the file on your PC will be different to this one).
4.Change the IP address found in the above step to your SMTP server's NAME or IP Address.
5.Save and close this file
6.Close Oracle reports if it up and then restart it (so it can read the changed config file)
The above steps would configure Reports to use your SMTP server.
----
-- Finanace year roll back
-- Finanace year roll back
delete from subac_mstob_dtl
where finyr_control=11;
delete from mainac_mstob_dtl
where finyr_control=11;
delete from weeks_mst
where FINYR_CONTROL =11;
DELETE FROM BF149_FINPERIOD_HDR
WHERE FINCTRL_NO=11;
delete from material_detail where serial='11';
delete from fn_year_mst
where fn_year='11';
update fn_year_mst
set this_year='Y'
where fn_year='10';
chwlan pwd : leo10
-- finding non numeric char in a field of varchar2
select * from temp_bmsfcint
--where buff_no=652169972
where length(replace(translate(buff_no,'1234567890','9999999999'),9,null))>0
/
-- reclaim tablespace
alter table "CH_POS"."POS_FINTRANS_DTL" enable row movement
alter table "CH_POS"."POS_FINTRANS_DTL" shrink space
-- Tomcat config for CDB check
-- Location of tns connection file
C:\Program Files\Apache Software Foundation\Tomcat 6.0\webapps\databureau\WEB-INF\classes\config
-- if config is change restart the appache server
-- Location of application deployed folder
C:\Program Files\Apache Software Foundation\Tomcat 6.0\webapps\
-- Local folder requires two folders
c:\request_files & c:\response_files
-- to check the database connection details
http://10.103.10.11:8585/databureau/checkconnection
-- to check appache server working
http://10.103.10.11:8585/
---------------------
WEBcahe keepalive :
---------------------
Webcache KeepAlive timeout is reached. (The default value for KeepAlive timeout is 5 seconds)
Solution
To implement the solution, please execute the following steps:
1. Login to the Webcache administartion :
http://<host>:<admin port>/webcacheadmin
http://srvfijoracleapp:9400/webcacheadmin
2. Under Properties -> Network Timeouts
Edit the KeepAlive time out to a higher value in seconds, for example 65 (default value is 5 secs)
3. Restart the webcache
Tablespace shrinking
---------
The following example shrinks the locally managed temporary tablespace lmtmp1 to a size of 20M.
ALTER TABLESPACE lmtemp1 SHRINK SPACE KEEP 20M;
The following example shrinks the tempfile lmtemp02.dbf of the locally managed temporary tablespace lmtmp2. Because the KEEP clause is omitted, the database attempts to shrink the tempfile to the minimum possible size.
ALTER TABLESPACE lmtemp2 SHRINK TEMPFILE '/u02/oracle/data/lmtemp02.dbf';
----------
alter tablespace COURTS_DATA coalesce;
-----------
ALTER TABLE CH_POS.POS_PAYMENTS_HDR enable ROW movement;
ALTER TABLE CH_POS.POS_PAYMENTS_HDR shrink SPACE;
ALTER TABLE CH_POS.POS_PAYMENTS_HDR disable ROW movement;
ALTER TABLE CH_POS.POS_FINTRANS_DTL enable ROW movement;
ALTER TABLE CH_POS.POS_FINTRANS_DTL shrink SPACE;
ALTER TABLE CH_POS.POS_FINTRANS_DTL disable ROW movement;
starting report server manually
-------------------------------
rwserver server=server_name start -- not working
2.1.2 Starting, Stopping, and Restarting Reports Servers from the Oracle Process Manager and Notification Server
You can use the following command lines to start, stop, and restart Reports Server if it was configured through the Oracle Process Manager and Notification Server:
ORACLE_HOME/opmn/bin/opmnctl startproc ias-component=reports_server_name
ORACLE_HOME/opmn/bin/opmnctl startproc process-type=reports_server_name
ORACLE_HOME/opmn/bin/opmnctl stopproc ias-component=reports_server_name
ORACLE_HOME/opmn/bin/opmnctl restartproc ias-component=reports_server_name
The Reports Server name must match the name in the ias-component id in the opmn.xml file.
You can also query the status of the Oracle Process Manager and Notification Server, by using the following command:
ORACLE_HOME/opmn/bin/opmnctl status
To directly start the in-process server from a URL, enter the following from your Web browser:
http://sureshlaptop:7778/reports/rwservlet/startserver
If Reports Server is running as an in-process server through the Reports Servlet, issue the following URL:
http://sureshlaptop:7778/reports/rwservlet/stopserver
Startup
You can also start Reports Server as a standalone server on Windows using the following command:
rwserver server=server_name
Add the BATCH command line keyword to start up the server without displaying dialog boxes or messages.
rwserver server=server_name batch=yes
rwserver server=rep_localhost_FRHome1 batch=yes
shutdown
To shut down the server normally (that is, finish pending jobs and then stop):
rwserver server=server shutdown=normal authid=username/password
To shut down the server immediately (that is, stop without finishing pending jobs):
rwserver server=server shutdown=immediate authid=username/password
To shut down the server without displaying any related messages:
rwserver server=server shutdown=normal authid=username/password batch=yes
http://sureshlaptop:7778/forms/frmservlet?config=webutil
----------------------------
Submitting Report Using URL
----------------------------
http://ssderp:7778/reports/rwservlet?&server=batch_repserver&destype=mail&desformat=pdf&report=SUPPLIER_STOCK_B.RDF&userid=courts_all/courts_all@ora10g&BACKGROUND=YES¶mform=NO&FN_YR_CODE=12&P_YRSTDT=01-APR-11&P_FRDT=01-JAN-11&P_TODT=30-MAY-11&P_LY_FRDT=01-JAN-10&P_LY_TODT=30-MAY-10&REP_COMP_CODE=55&REP_MAIN_CODE=55&REP_SUP_CODE=MO21&subject=Supplier Report as on 30-MAY-11(MOTIBHAI CO)&desname=sureshp@sathguru.com&cc=pavanic@sathguru.com&from=pavanic@sathguru.com
Creating exp script & user creation script from dba_users
---------------------------------------------------------
select 'create user '||username||' identified by '||username||' default tablespace '||DEFAULT_TABLESPACE||' temporary tablespace '||TEMPORARY_TABLESPACE||' quota unlimited on '||DEFAULT_TABLESPACE|| ';'
from dba_users
where user_id>60
order by user_id
/
select 'exp system/smcmanager@ora10g file='||username||' owner='||username||' log='||username||'_e', CREATED
from dba_users
where user_id>60
order by user_id
/
Finding sql text from sql id:
-----------------------------
select * from v$sqltext
where sql_id='g8kf4h7vhp805'
order by piece
/
Temporary tablespace groups :
-----------------------------
New Data Dictionary View
Oracle 10g introduces a new data dictionary view, dba_tablespace_groups, for the temporary tablespace group. Using a tablespace with a temporary tablespace group will result in the following select statement. However, using a tablespace without a temporary tablespace group will not return the select statement below.
-- to know tablespace groups and tablse spaces
select tablespace_name, group_name
from DBA_TABLESPACE_GROUPS;
-- Create a temporary tablespace and implicitly add it to a temporary tablespace group.
CREATE TEMPORARY TABLESPACE temp01
TEMPFILE ‘/u02/oradata/temp01.dbs’ SIZE 500M
TABLESPACE GROUP tempgroup_a;
-- Create a temporary tablespace without assigning it to a temporary tablespace group.
-- Remove a temporary tablespace from a temporary tablespace group.
ALTER TABLESPACE temp04 TABLESPACE GROUP ‘‘;
-- add a temporary tablespace to a temporary tablespace group.
ALTER TABLESPACE temp03 TABLESPACE GROUP tempgroup_b;
-- Assign a user to a default temporary tablespace group.
ALTER USER scott TEMPORARY TABLESPACE tempgroup_A;
-----------------------------
ml
-----------------------------
sankaranand.b@dhanushinfotech.net
technicalshan
------------------------------
====================================================
Moving
====================================================
Relocate Indexes
This script is especially useful after you have performed an import with FROMUSER TOUSER option, where the original tablespace name is different from the targeted TOUSER tablespace, causing all tables and indexes to be stored within their original owner’s default tablespace.
Lets say you do an export for owner=PA where all tables are in PAD tablespace and all indexes are in PAX tablespace, now you do an import into a different schema called XPA; FROMUSER=PA TOUSER=XPA, after the import into XPA all tables are going to be stored in PAD and indexes in PAX. But you rather want to have all tables owned by XPA in XPAD tablepsace and all indexes owned by XPA in XPAX tablespace.
Stored procedure p_rebuild_indexes_to_ts will achieve just that. p_rebuild_indexes_to_ts generates dynamic PL/SQL which rebuilds all the indexes belonging to a given user and moves them into a given tablespace.
idbasolutions.com Apr 2008
This script creates a PL/SQL procedure that generates dynamic sql which rebuilds
all indexes for a given OWNER and moves them into a different tablespace.
Do not run this procedure at pick time.
Disclaimer: Use on database – especially production databases
is at DBAs own risk.This procedure was tested on Oracle 9.2.0.4 running Oracle Finacial 11.5.9
Use this procedure as follow: Ex: From SQL prompt: exec p_rebuild_indexes_to_ts (‘XPA’,’XPAX’)
where XPA is the owner of indexes to be moved into XPAX tablespace..05-Apr-2007
create or replace procedure
p_rebuild_indexes_to_ts (schema IN dba_indexes.owner%type,
ts_name IN dba_tablespaces.tablespace_name%type)
as
v_cursorid integer;
status integer;
cursor c_dba_indexes is
select owner,index_name
from dba_indexes i
where owner=upper(schema)
and STATUS=’VALID’
and index_NAME not like ‘SYS_IOT_OVER_%’
and not exists (select 1
from dba_constraints c
where CONSTRAINT_TYPE =’P’
and c.INDEX_NAME=i.index_NAME
and c.INDEX_OWNER=i.OWNER)
and not exists (select 1
from dba_lobs l
where l.INDEX_NAME=i.index_NAME
and l.OWNER=i.OWNER);
v_dba_indexes c_dba_indexes%rowtype;
begin
open c_dba_indexes;
v_cursorid:=dbms_sql.open_cursor;
fetch c_dba_indexes into v_dba_indexes;
if (c_dba_indexes%notfound) then
dbms_output.put_line(‘Owner ‘||
upper(schema)||’ : ‘||
‘No indexes were to be found for this schema.’);
end if;
while ( c_dba_indexes%found ) loop
dbms_sql.parse(v_cursorid,‘ALTER INDEX ‘||v_dba_indexes.owner||’.'||v_dba_indexes.index_name||‘ REBUILD TABLESPACE ‘||ts_name,dbms_sql.native);
status:=dbms_sql.execute(v_cursorid);
dbms_output.put_line(‘Index Rebuild: ‘||v_dba_indexes.owner||’.'||v_dba_indexes.index_name||’ ‘||ts_name);
fetch c_dba_indexes into v_dba_indexes;
end loop;
close c_dba_indexes;
dbms_sql.close_cursor(v_cursorid);
exception
when others then
dbms_output.put_line(‘Error…… ‘);
dbms_sql.close_cursor(v_cursorid);
raise;
end p_rebuild_indexes_to_ts;
http://www.idbasolutions.com/scripts/relocate-tables.html
http://www.idbasolutions.com/scripts/relocate-indexes.html
======================================
-- PGA estimation per user
======================================
set pages 999;
column pga_size format 999,999,999
select
1048576+a.value+b.value pga_size --2097152 2mb
from
v$parameter a,
v$parameter b
where
a.name = 'sort_area_size'
and
b.name = 'hash_area_size'
===========================================
--DB EM droping and creation
===========================================
go to E:\oracle\product\10.2.0\db_1\bin\emca using RUN->CMD
Drop the configuration files and repository with the following at the command prompt:
emca -config dbcontrol db -repos recreate
emca -deconfig dbcontrol db -repos drop
then run this to create afresh:
emca -config dbcontrol db -repos create
Ignore the email and SMTP server questions as they're optional and can be entered when you have the EM running.
The port for EM console may have changed so any shortcuts you had will have to be updated.
Check the logs for any issues:
'{ORACLE_HOME}\cfgtoollogs\emca\<ORACLE_SID>\'
--Incase dropping fails at repository dropping hanging follow the steps to manually remove repository.
DECLARE
CURSOR c1 IS
SELECT owner, synonym_name name
FROM dba_synonyms
WHERE table_owner = 'SYSMAN';
BEGIN
FOR r1 IN c1
LOOP
IF r1.owner = 'PUBLIC' THEN
EXECUTE IMMEDIATE 'DROP PUBLIC SYNONYM '||r1.name;
ELSE
EXECUTE IMMEDIATE 'DROP SYNONYM '||r1.owner||'.'||r1.name;
END IF;
END LOOP;
END;
/
DROP ROLE mgmt_user;
drop user mgmt_view cascade;
drop user sysman cascade;
==========================================
-- standby database
==========================================
--Create an Oracle service for the standby database on the standby server with
-- command (startmode is manual):
oradim -new -sid dbvisitp -intpwd MYSECRETPASSWORD -startmode M
--Where dbvisitp is the name of the databas On the primary server:
Make a standby controlfile on the production database using the following command:
ALTER DATABASE CREATE STANDBY CONTROLFILE AS
'C:\oracle\orabase\admin\dbvisitp\create\STANDBY_DBVISIT_control01.ctl' REUSE;
-- starting the standby db
sqlplus /nolog
SQL> connect / as sysdba ;
SQL> startup nomount
SQL> alter database mount standby database ;
SQL> recover standby database ;
Specify log: {<RET>=suggested | filename | AUTO | CANCEL}
-- switching standby to primary
If the standby was open read-only then restart the standby
SQL> SHUTDOWN IMMEDIATE
SQL> STARTUP MOUNT
Switchover the standby database to a primary
SQL> ALTER DATABASE COMMIT TO SWITCHOVER TO PRIMARY;
Open the new primary database:
SQL> ALTER DATABASE OPEN;
Correct any tempfile mismatch
;
t
=============================================
--Hirarchial tree query
=============================================
select empno,substr(lpad(' ',3*(level-1),' ')||ename,1,50) ename,mgr, level,sys_CONNECT_BY_PATH(ename, '/') "Path"
from emp
connect by prior empno = mgr
start with mgr is null
ORDER SIBLINGS BY empno
/
=========================
-- Case statement
=========================
SELECT last_name, commission_pct,
(CASE commission_pct
WHEN 0.1 THEN ‘Low’
WHEN 0.15 THEN ‘Average’
WHEN 0.2 THEN ‘High’
ELSE ‘N/A’
END ) Commission
FROM employees ORDER BY last_name;
A more complex version is the Searched CASE expression where a comparison expression is used to find a match:
SELECT last_name, job_id, salary,
(CASE
WHEN job_id LIKE 'SA_MAN' AND salary < 12000 THEN '10%'
WHEN job_id LIKE 'SA_MAN' AND salary >= 12000 THEN '15%'
WHEN job_id LIKE 'IT_PROG' AND salary < 9000 THEN '8%'
WHEN job_id LIKE 'IT_PROG' AND salary >= 9000 THEN '12%'
ELSE 'NOT APPLICABLE'
END ) Raise
FROM employees;
======================
-- rollup
======================
-- displays group wise sub totals
select job,deptno,sum(sal)
from emp
GROUP BY ROLLUP (job,deptno)
=======================
--MERGE Statement Enhancements in Oracle Database 10g
========================
Optional Clauses
The MATCHED and NOT MATCHED clauses are now optional making all of the following examples valid.
-- Both clauses present.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN MATCHED THEN
UPDATE SET a.status = b.status
WHEN NOT MATCHED THEN
INSERT (object_id, status)
VALUES (b.object_id, b.status);
-- No matched clause, insert only.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN NOT MATCHED THEN
INSERT (object_id, status)
VALUES (b.object_id, b.status);
-- No not-matched clause, update only.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN MATCHED THEN
UPDATE SET a.status = b.status;
Conditional Operations
Conditional inserts and updates are now possible by using a WHERE clause on these statements.
-- Both clauses present.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN MATCHED THEN
UPDATE SET a.status = b.status
WHERE b.status != 'VALID'
WHEN NOT MATCHED THEN
INSERT (object_id, status)
VALUES (b.object_id, b.status)
WHERE b.status != 'VALID';
-- No matched clause, insert only.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN NOT MATCHED THEN
INSERT (object_id, status)
VALUES (b.object_id, b.status)
WHERE b.status != 'VALID';
-- No not-matched clause, update only.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN MATCHED THEN
UPDATE SET a.status = b.status
WHERE b.status != 'VALID';
DELETE Clause
An optional DELETE WHERE clause can be used to clean up after a merge operation. Only those rows which match both the ON clause and the DELETE WHERE clause are deleted.
MERGE INTO test1 a
USING all_objects b
ON (a.object_id = b.object_id)
WHEN MATCHED THEN
UPDATE SET a.status = b.status
WHERE b.status != 'VALID'
DELETE WHERE (b.status = 'VALID');
====================================
-- Generating JOBS script
====================================
declare
what user_jobs.what%TYPE;
interval user_jobs.interval%TYPE;
CURSOR c IS
select what, interval
from user_jobs;
BEGIN
dbms_output.put_line('DECLARE');
dbms_output.put_line('job user_jobs.job%TYPE;');
dbms_output.put_line('BEGIN');
OPEN c;
LOOP
fetch c into what, interval;
exit when c%NOTFOUND;
dbms_output.put_line('dbms_job.submit(');
dbms_output.put_line('job => job,');
dbms_output.put('what => ''');
dbms_output.put(replace(what, '''', ''''''));
dbms_output.put_line(''',');
dbms_output.put_line('next_date => sysdate,');
dbms_output.put('interval => ''');
dbms_output.put(replace(interval, '''', ''''''));
dbms_output.put_line(''');');
dbms_output.put_line('');
END LOOP;
CLOSE c;
dbms_output.put_line('commit;');
dbms_output.put_line('END;');
dbms_output.put_line('/');
END;
ADDM :
======
show parameter statistics_level to be set to Typical for enabling ADDM
alter system set statistics_level=basic;
alter system set statistics_level=typical;
alter system set statistics_level=all;
SELECT statistics_name,
session_status,
system_status,
activation_level,
session_settable
FROM v$statistics_level
ORDER BY statistics_name;
DBA_ADVISOR_TASKS
This view provides basic information about existing tasks, such as the task Id, task name, and when created.
select * from DBA_ADVISOR_LOG
This view contains the current task information, such as status, progress, error messages, and execution times.
select * from DBA_ADVISOR_RECOMMENDATIONS
This view displays the results of completed diagnostic tasks with recommendations for the problems identified in each run. The recommendations should be looked at in the order of the RANK column, as this relays the magnitude of the problem for the recommendation. The BENEFIT column gives the benefit to the system you can expect after the recommendation is carried out.
select * from DBA_ADVISOR_FINDINGS
This view displays all the findings and symptoms that the diagnostic monitor encountered along with the specific recommendation.
---- ALTER INDEX ------------------
select 'ALTER INDEX '||index_name||' REBUILD TABLESPACE CH_POS_DATA;' from user_indexes
/
ALTER INDEX acct_uk REBUILD TABLESPACE CH_POS_DATA;
--------------- DYNAMIC SCRIPTS FOR ANALYSE AND COMPILE ------------------
Select 'EXEC DBMS_STATS.GATHER_SCHEMA_STATS('||''''||USERNAME||''''||', cascade=>TRUE);'
FROM dba_users
where created >='01-jan-2012'
order by created
/
Select 'exec dbms_utility.compile_schema('||''''||USERNAME||''''||');'
FROM dba_users
where created >='01-jan-2012'
order by created
/
select 'expdp system/VCorclmanager1@vcorcl directory=backup01 dumpfile='||username||'.dmp logfile='||username||
'.log schemas='||username||' '
from dba_users
where created >='01-jan-2012'
order by created
/
--------------------- 11G setting - Avoid password case sensitive ----------------------------
ALTER SYSTEM SET SEC_CASE_SENSITIVE_LOGON=FALSE SCOPE=BOTH
------------------- ANLYZE TABLE ------------------------------
Exec dbms_stats.gather_table_stats(ownname => 'CH_POS', tabname => 'POS_INVOICE_HDR' , estimate_percent => 100, method_opt => 'for all indexed columns size auto')
/
Exec dbms_stats.gather_table_stats(ownname => 'CH_POS', tabname => 'POS_INV_ITEM_DTL' , estimate_percent => 100, method_opt => 'for all indexed columns size auto')
/
Select 'Create synonym '||user||'.'||synonym_name||' for '||Replace(table_owner, 'CH_', 'MC_')||'.'||table_name||';'
From user_synonyms
/
Select 'Drop synonym '||user||'.'||synonym_name||';'
From user_synonyms
/
------------ To check the number of records at particular time ------
select * from sup_mast as of timestamp to_timestamp('17/09/2014','dd/mm/yyyy hh24:mi')
where sup_code like 'T%'
/
---------------- EXPDP OR IMPDP ERROR -----------------
Resolving ORA-31634: job already exists;ORA-31664: unable to construct unique job name when defaulted
Resolving ORA-31634: job already exists;ORA-31664: unable to construct unique job name when defaulted
In the database which I support have a daily export backup and one day the logs reported with the errors
ORA-31634: job already exists
ORA-31664: unable to construct unique job name when defaulted
Reason for the Error:
This error normally occurs when you do not specify a unique JOB_NAME for the DataPump job and for some reason that JOB_NAME already exists in the database, or else if you are running many jobs at the same time (more than 99 jobs) and DataPump cannot create a new job.
Resolution:
To fix this issue you have to cleanup the orphaned datapump jobs.
1) Determine the datapump jobs which exists in the database and the status as NOT RUNNING.
SELECT owner_name, job_name, operation, job_mode,
state, attached_sessions
FROM dba_datapump_jobs
where owner_name='&SCHEMA' and state='NOT RUNNING'
ORDER BY 1,2;
2) Determine the related master tables
SELECT o.status, o.object_id, o.object_type,
o.owner||'.'||object_name "OWNER.OBJECT",o.created,j.state
FROM dba_objects o, dba_datapump_jobs j
WHERE o.owner=j.owner_name AND o.object_name=j.job_name
and o.owner='&SCHEMA' and j.state='NOT RUNNING' ORDER BY 4,2;
3) Drop the tables generated in step 2.
Drop table <tablename>;
This will clear the master tables created by orphaned jobs.