I am having some unusual issue. Whenever i run this query my database disconnects automatically or this error occurs: ORA-12805: parallel query server died unexpectedly.
BEGIN
EXECUTE IMMEDIATE
'CREATE TABLE ZZZ_CLAIMS_00172414_MEMID PARALLEL NOLOGGING AS
SELECT MEMID, ENRID, MEMFIRSTNAME, MEMLASTNAME,GENDER,DOB
FROM
(
SELECT /*+parallel(a,4)*/
MEMID,
ENRID,
MEMFIRSTNAME,
MEMLASTNAME,
GENDER,
DOB,
LVLID2,
ROW_NUMBER() OVER(
PARTITION BY
MEMFIRSTNAME, MEMLASTNAME,GENDER,DOB ORDER BY EFFDATE DESC,TERMDATE DESC NULLS LAST) RN
FROM vh_eligibilities a
WHERE LVLID2 = ''00172414''
)
WHERE RN =1
'; --248969
EXCEPTION WHEN OTHERS THEN NULL;
END;
But when i remove where condition i.e WHERE LVLID2 = ''00172414'', table is created successfully. I consulted with my DBA , he suggested to alter session before creating table.
BEGIN
EXECUTE IMMEDIATE 'ALTER SESSION SET "_pred_move_around" = FALSE';
EXCEPTION WHEN OTHERS THEN NULL;
END;
After altering session table is created successfully. I was using this for past two weeks then today i had to create similar table . But today table was created without using alter session query.
Then i realised i was using same query with little difference:
BEGIN
EXECUTE IMMEDIATE
'CREATE TABLE ZZZ_CLAIMS_00172414_MEMID PARALLEL NOLOGGING AS
SELECT MEMID, ENRID, MEMFIRSTNAME, MEMLASTNAME,GENDER,DOB,**LVLID2**----- The difference ------
FROM
(
SELECT /*+parallel(a,4)*/
MEMID,
ENRID,
MEMFIRSTNAME,
MEMLASTNAME,
GENDER,
DOB,
LVLID2,
ROW_NUMBER() OVER(
PARTITION BY
MEMFIRSTNAME, MEMLASTNAME,GENDER,DOB ORDER BY EFFDATE DESC,TERMDATE DESC NULLS LAST) RN
FROM vh_eligibilities a
WHERE LVLID2 = ''00172414''
)
WHERE RN =1
'; --248969
EXCEPTION WHEN OTHERS THEN NULL;
END;
Can anyone tell me why i was getting this error, and after altering session table was successfully created.
And without altering session , just adding lvlid2 in select statement while creating table , table was created.
Any help would be highly appreciated.
Related
I need to complete multiple inserts in Oracle.
insert into table_name (ID, code, date_t)
values (schema_name.SEQ$table_name.NEXTVAL, '232323232323' , to_date('2020-09-01','YYYY-MM-DD'));
insert into table_name (ID, code, date_t)
values (schema_name.SEQ$table_name.NEXTVAL, '242424242424' , to_date('2020-09-01','YYYY-MM-DD'));
I can't complete them when trying to use simple execute because it gives me an error: ora-00933 sql command not properly ended
But when I using execute as script it works fine.
The main problem is that I can't explain that to people how to complete that script in the console.
Maybe there are some other options to complete multiple inserts in Oracle?
Use INSERT ... SELECT:
insert into table_name (ID, code, date_t)
WITH data (code, date_t) AS (
SELECT '232323232323', DATE '2020-09-01' FROM DUAL UNION ALL
SELECT '242424242424', DATE '2020-09-01' FROM DUAL
)
SELECT SEQ$table_name.NEXTVAL, code, date_t FROM data;
You could also use INSERT ALL; however, you have to jump through some hoops to get it to work with a sequence so I would suggest not using this option:
INSERT ALL
WHEN rn = 1 THEN
INTO table_name (id, code, date_t)
VALUES (SEQ$table_name.NEXTVAL, '252525252525', DATE '2020-09-01')
WHEN rn = 2 THEN
INTO table_name (id, code, date_t)
VALUES (SEQ$table_name.CURRVAL, '262626262626', DATE '2020-09-01')
SELECT LEVEL AS rn
FROM DUAL
CONNECT BY LEVEL <= 2;
I have a stored procedure which should return several results - but it returns only one row. I think it's the last row in result set.
I am not sure, but I think the problem is in this line of code:
select chi.id bulk collect into v_numbers from dual;
and that this line somehow overrides all previous results (there is several of them for each loop). How to insert into v_numbers without overriding previous results? I know that it's also wrong to insert only one row, but I haven't found solution to insert several rows from chi.
PROCEDURE GET_ATTRIBUTES(
P_AUTH_USE_ID IN NUMBER,
P_CATEGORY_ID IN NUMBER,
P_VERSION_ID IN NUMBER,
P_RESULT OUT TYPES.CURSOR_TYPE
) IS
v_numbers sys.odcinumberlist := null;
BEGIN
FOR item IN
(SELECT ID FROM INV_SRV WHERE SRV_CATEGORY_ID IN
(
SELECT id
FROM inv_srv_category
START WITH parent_category_id = P_CATEGORY_ID
CONNECT BY PRIOR id = parent_category_id
) OR SRV_CATEGORY_ID = P_CATEGORY_ID)
LOOP
for chi in (select s.id
from inv_srv s
start with s.parent_srv_id = item.id
connect by prior s.id = s.parent_srv_id
)
loop
select chi.id bulk collect into v_numbers from dual; --> here I should insert all rows from that loop, but I don't know how
end loop;
END LOOP;
OPEN P_RESULT FOR SELECT t.column_value from table(v_numbers) t; --> only one row is returned
END;
Use BULK COLLECT and FORALL for bulk inserts and better performance. The FORALL statement will allow the DML to be run for each row in the collection without requiring a context switch each time, thus improving the overall performance.
CREATE OR REPLACE PROCEDURE get_attributes (
p_auth_use_id IN NUMBER,
p_category_id IN NUMBER,
p_version_id IN NUMBER,
p_result OUT types.cursor_type
) IS
v_numbers sys.odcinumberlist := NULL;
BEGIN
SELECT s.id
BULK COLLECT --> Bulk collect all values
INTO v_numbers
FROM inv_srv s
start with s.parent_srv_id in (
SELECT ID FROM INV_SRV
WHERE SRV_CATEGORY_ID IN
(
SELECT id
FROM inv_srv_category
START WITH parent_category_id = P_CATEGORY_ID
CONNECT BY PRIOR id = parent_category_id
)
OR SRV_CATEGORY_ID = P_CATEGORY_ID)
connect by prior s.id = s.parent_srv_id;
FORALL i IN 1..v_numbers.COUNT
INSERT INTO your_table VALUES v_numbers ( i ); --> Bulk insert
END;
Every time the loop executes v_numbers will be re populated again and again so, either 1) use v_numbers.extend; v_numbers(v_numbers.last) = "Your Value" or write everything in a single bulk collect.
select s.id
bulk collect into v_numbers
from inv_srv s
start with s.parent_srv_id in (SELECT ID FROM INV_SRV
WHERE SRV_CATEGORY_ID IN
(
SELECT id
FROM inv_srv_category
START WITH parent_category_id = P_CATEGORY_ID
CONNECT BY PRIOR id = parent_category_id
)
OR SRV_CATEGORY_ID = P_CATEGORY_ID)
connect by prior s.id = s.parent_srv_id
This may be considered as a improper use of the PL/SQL loops (often connected with a catastrophic performance) in a situation where a SQL solution exists.
Why don't you simple defines the cursor as follows:
OPEN P_RESULT FOR
select s.id
from inv_srv s
start with s.parent_srv_id in
(SELECT ID FROM INV_SRV WHERE SRV_CATEGORY_ID IN
(SELECT id
FROM inv_srv_category
START WITH parent_category_id = 1
CONNECT BY PRIOR id = parent_category_id
) OR SRV_CATEGORY_ID = 1)
connect by prior s.id = s.parent_srv_id
;
The query is constructed from your outer and inner loop so that it returns the same result.
The transformation may not be trivial in generall case and must be carefully tested, but the performance profit may be high.
I'm currently migrating data from legacy system to the current system.
I have this INSERT statement inside a stored procedure.
INSERT INTO TABLE_1
(PRIMARY_ID, SEQUENCE_ID, DESCR)
SELECT LEGACY_ID PRIMARY_ID
, (SELECT COUNT(*) + 1
FROM TABLE_1 T1
WHERE T1.PRIMARY_ID = L1.LEGACY_ID) SEQUENCE_ID
, L1.DESCR
FROM LEGACY_TABLE L1;
However, whenever I have multiple values of LEGACY_ID from LEGACY_TABLE, the query for the SEQUENCE_ID doesn't increment.
Why is this so? I can't seem to find any documentation on how the INSERT INTO SELECT statement works behind the scenes. I am guessing that it selects all the values from the table you are selecting and then inserts them simultaneously after, that's why it doesn't increment the COUNT(*) value?
What other workarounds can I do? I cannot create a SEQUENCE because the SEQUENCE_ID must be based on the number of PRIMARY_ID that are present. They are both primary ids.
Thanks in advance.
Yes, The SELECT will be executed FIRST and only then the INSERT happens.
A Simple PL/SQL block below, will be a simpler approach, though not efficient.
DECLARE
V_SEQUENCE_ID NUMBER;
V_COMMIT_LIMIT:= 20000;
V_ITEM_COUNT := 0;
BEGIN
FOR REC IN (SELECT LEGACY_ID,DESCR FROM LEGACY_TABLE)
LOOP
V_SEQUENCE_ID:= 0;
SELECT COUNT(*)+1 INTO V_SEQUENCE_ID FROM TABLE_1 T1
WHERE T1.PRIMARY_ID = REC.LEGACY_ID
INSERT INTO TABLE_1
(PRIMARY_ID, SEQUENCE_ID, DESCR)
VALUES
(REC.LEGACY_ID,V_SEQUENCE_ID,REC.DESCR);
V_ITEM_COUNT := V_ITEM_COUNT + 1;
IF(V_ITEM_COUNT >= V_COMMIT_LIMIT)
THEN
COMMIT;
V_ITEM_COUNT := 0;
END IF;
END LOOP;
COMMIT;
END;
/
EDIT: Using CTE:
WITH TABLE_1_PRIM_CT(PRIMARY_ID, SEQUENCE_ID) AS
(
SELECT L1.LEGACY_ID,COUNT(*)
FROM LEGACY_TABLE L1
LEFT OUTER JOIN TABLE_1 T1
ON(L1.LEGACY_ID = T1.PRIMARY_ID)
GROUP BY L1.LEGACY_ID
)
INSERT INTO TABLE_1
(SELECT L1.LEGACY_ID,
CTE.SEQUENCE_ID+ (ROW_NUMBER() OVER (PARTITION BY L1.LEGACY_ID ORDER BY null)),
L1.DESCR
FROM TABLE_1_PRIM_CT CTE, LEGACY_TABLE L1
WHERE L1.LEGACY_ID = CTE.PRIMARY_ID);
PS: With your Millions of Rows, this is going to create a temp table
of same size, during execution. Do Explain Plan before actual execution.
Wondering if someone can help point me in the right direction with this challenge, or tell me I'm crazy for trying this via sql. If sql would be too challenging, are there any free or inexpensive tools that would help me automate this?
I'm working on testing some data between an old and new Oracle database. What I'd like to do is be able to dynamically generate this query for all tables in a schema.
Select Column_1, Column_2 FROM Table_1
MINUS
Select Column_1, Column_2 FROM Table_1#"OLD_SERVER"
One catch is that the columns selected for each table should only be columns that do not begin with 'ETL' since those are expected to change with the migration.
To keep this dynamic, can I use the all_tab_columns to loop through each table?
So for a simplified example, let's say this query returned the following results, and you can expect the results from ALL_TAB_COLUMNS to be identical between the OLD and NEW database:
select TABLE_NAME, COLUMN_NAME from ALL_TAB_COLUMNS where owner = 'OWNER1'
TABLE_NAME, COLUMN_NAME
-----------------------
TABLE1, COLUMN_1
TABLE1, COLUMN_2
TABLE1, ETLCOLUMN_3
TABLE2, COLUMN_A
TABLE2, COLUMN_B
TABLE2, ETLCOLUMN_C
How would I write a query that would run a minus between the same table and columns (that do not begin with ETL) on the old and new database, and output the results along with the table name and the date ran, and then loop through to the next table and do the same thing?
First - check out this:
http://docs.oracle.com/cd/E11882_01/server.112/e41481/spa_upgrade.htm#RATUG210
Second - you would like to write a query that issues a query - The problem is that in user_tab_columns each column is a row.
for doing that I would recommend you reading this : http://www.dba-oracle.com/t_converting_rows_columns.htm
The source table for you is USER_TAB_COLUMNS, and when running the query you can add a where that says "where column_name not like 'ETL%' etc.
After that - the query would look something like:
select 'select '
|| listagg..... (from the link) || 'from table name' sql
from user_tab_columns
where column_name not like 'ETL%'
and table_name = 'table name'
group by table_name
and btw - you're not crazy - before changing a system you need to be able to sign the upgrade will succeed - this is the only way to do it.
btw - if you'll describe in more depth the system and the upgrade - I'm sure the community will be able to help you find ways to test it in more depth, and will point you out to things to test.
Testing only the output is not enough in many cases....
GOOD LUCK!
This testing can automated with SQL and PL/SQL. You're not crazy for doing this. Comparison systems like this can be incredibly helpful for testing changes to complex systems. It's not as good as automated unit tests but it can significantly enhance the typical database testing.
The code below is a fully working example. But in the real world there are many gotchas that could easily take several days to resolve. For example, dealing with CLOBs, large tables, timestamps and sequence-based values, etc.
Sample schemas and data differences
create user schema1 identified by schema1;
create user schema2 identified by schema2;
alter user schema1 quota unlimited on users;
alter user schema2 quota unlimited on users;
--Data in 1, not 2.
create table schema1.table1 as select 1 a, 1 b from dual;
create table schema2.table1(a number, b number);
--Data in 2, not 1.
create table schema1.table2(a number, b number);
create table schema2.table2 as select 1 a, 1 b from dual;
--Same data in both, excluding unused column.
create table schema1.table3 as select 1 a, 1 b, 'asdf' ETL_c from dual;
create table schema2.table3 as select 1 a, 1 b, 'fdsa' ETL_c from dual;
--Table DDL difference.
create table schema1.table4(a number);
create table schema2.table4(b number);
--Privileges can be tricky.
grant select on dba_tab_columns to <your schema>;
Procedure to print differences script
create or replace procedure print_differences(
p_old_schema in varchar2,
p_new_schema in varchar2) authid current_user
is
v_table_index number := 0;
v_row_count number;
begin
--Print header information.
dbms_output.put_line('--Comparison between '||p_old_schema||' and '||
p_new_schema||', at '||to_char(sysdate, 'YYYY-MM-DD HH24:MI')||'.'||chr(10));
--Create a SQL statement to return the differences for each table.
for differences in
(
--Return number of differences and SQL statements to view them.
select
'
with old_table as (select '||column_list||' from '||p_old_schema||'.'||table_name||')
, new_table as (select '||column_list||' from '||p_new_schema||'.'||table_name||')
select * from
(
select ''OLD'' old_or_new, old_table.* from old_table minus
select ''OLD'' old_or_new, new_table.* from new_table
)
union all
select * from
(
select ''NEW'' old_or_new, new_table.* from new_table minus
select ''NEW'' old_or_new, old_table.* from old_table
)
' difference_sql, table_name
from
(
select table_name
,listagg(column_name, ',') within group (order by column_id) column_list
from dba_tab_columns
where owner = p_old_schema
and column_name not like 'ETL%'
group by table_name
) column_lists
) loop
begin
--Print table information:
v_table_index := v_table_index+1;
dbms_output.put_line(chr(10)||'--'||lpad(v_table_index, 3, '0')||': '||differences.table_name);
--Count differences.
execute immediate 'select count(*) from ('||differences.difference_sql||')' into v_row_count;
--Print SQL statements to investigate differences.
if v_row_count = 0 then
dbms_output.put_line('--No differences.');
else
dbms_output.put_line('--Differences: '||v_row_count);
dbms_output.put_line(differences.difference_sql||';');
end if;
exception when others then
dbms_output.put_line('/*Error with this statement, possible DDL difference: '
||differences.difference_sql||dbms_utility.format_error_stack||
dbms_utility.format_error_backtrace||'*/');
end;
end loop;
end;
/
Running the procedure
begin
print_differences('SCHEMA1', 'SCHEMA2');
end;
/
Sample output
The procedure does not output the actual differences. If there are differences, it outputs a script that will display the differences. With a decent IDE this will be a much better way to view the data, and it also helps to further analyze the differences.
--Comparison between SCHEMA1 and SCHEMA2, at 2014-03-28 23:44.
--001: TABLE1
--Differences: 1
with old_table as (select A,B from SCHEMA1.TABLE1)
, new_table as (select A,B from SCHEMA2.TABLE1)
select * from
(
select 'OLD' old_or_new, old_table.* from old_table minus
select 'OLD' old_or_new, new_table.* from new_table
)
union all
select * from
(
select 'NEW' old_or_new, new_table.* from new_table minus
select 'NEW' old_or_new, old_table.* from old_table
)
;
--002: TABLE2
--Differences: 1
with old_table as (select A,B from SCHEMA1.TABLE2)
, new_table as (select A,B from SCHEMA2.TABLE2)
select * from
(
select 'OLD' old_or_new, old_table.* from old_table minus
select 'OLD' old_or_new, new_table.* from new_table
)
union all
select * from
(
select 'NEW' old_or_new, new_table.* from new_table minus
select 'NEW' old_or_new, old_table.* from old_table
)
;
--003: TABLE3
--No differences.
--004: TABLE4
/*Error with this statement, possible DDL difference:
with old_table as (select A from SCHEMA1.TABLE4)
, new_table as (select A from SCHEMA2.TABLE4)
select * from
(
select 'OLD' old_or_new, old_table.* from old_table minus
select 'OLD' old_or_new, new_table.* from new_table
)
union all
select * from
(
select 'NEW' old_or_new, new_table.* from new_table minus
select 'NEW' old_or_new, old_table.* from old_table
)
ORA-06575: Package or function A is in an invalid state
ORA-06512: at "JHELLER.PRINT_DIFFERENCES", line 48
*/
I need to update a query so that it checks that a duplicate entry does not exist before insertion. In MySQL I can just use INSERT IGNORE so that if a duplicate record is found it just skips the insert, but I can't seem to find an equivalent option for Oracle. Any suggestions?
If you're on 11g you can use the hint IGNORE_ROW_ON_DUPKEY_INDEX:
SQL> create table my_table(a number, constraint my_table_pk primary key (a));
Table created.
SQL> insert /*+ ignore_row_on_dupkey_index(my_table, my_table_pk) */
2 into my_table
3 select 1 from dual
4 union all
5 select 1 from dual;
1 row created.
Check out the MERGE statement. This should do what you want - it's the WHEN NOT MATCHED clause that will do this.
Do to Oracle's lack of support for a true VALUES() clause the syntax for a single record with fixed values is pretty clumsy though:
MERGE INTO your_table yt
USING (
SELECT 42 as the_pk_value,
'some_value' as some_column
FROM dual
) t on (yt.pk = t.the_pke_value)
WHEN NOT MATCHED THEN
INSERT (pk, the_column)
VALUES (t.the_pk_value, t.some_column);
A different approach (if you are e.g. doing bulk loading from a different table) is to use the "Error logging" facility of Oracle. The statement would look like this:
INSERT INTO your_table (col1, col2, col3)
SELECT c1, c2, c3
FROM staging_table
LOG ERRORS INTO errlog ('some comment') REJECT LIMIT UNLIMITED;
Afterwards all rows that would have thrown an error are available in the table errlog. You need to create that errlog table (or whatever name you choose) manually before running the insert using DBMS_ERRLOG.CREATE_ERROR_LOG.
See the manual for details
I don't think there is but to save time you can attempt the insert and ignore the inevitable error:
begin
insert into table_a( col1, col2, col3 )
values ( 1, 2, 3 );
exception when dup_val_on_index then
null;
end;
/
This will only ignore exceptions raised specifically by duplicate primary key or unique key constraints; everything else will be raised as normal.
If you don't want to do this then you have to select from the table first, which isn't really that efficient.
Another variant
Insert into my_table (student_id, group_id)
select distinct p.studentid, g.groupid
from person p, group g
where NOT EXISTS (select 1
from my_table a
where a.student_id = p.studentid
and a.group_id = g.groupid)
or you could do
Insert into my_table (student_id, group_id)
select distinct p.studentid, g.groupid
from person p, group g
MINUS
select student_id, group_id
from my_table
A simple solution
insert into t1
select from t2
where not exists
(select 1 from t1 where t1.id= t2.id)
This one isn't mine, but came in really handy when using sqlloader:
create a view that points to your table:
CREATE OR REPLACE VIEW test_view
AS SELECT * FROM test_tab
create the trigger:
CREATE OR REPLACE TRIGGER test_trig
INSTEAD OF INSERT ON test_view
FOR EACH ROW
BEGIN
INSERT INTO test_tab VALUES
(:NEW.id, :NEW.name);
EXCEPTION
WHEN DUP_VAL_ON_INDEX THEN NULL;
END test_trig;
and in the ctl file, insert into the view instead:
OPTIONS(ERRORS=0)
LOAD DATA
INFILE 'file_with_duplicates.csv'
INTO TABLE test_view
FIELDS TERMINATED BY ','
(id, field1)
How about simply adding an index with whatever fields you need to check for dupes on and say it must be unique? Saves a read check.
yet another "where not exists"-variant using dual...
insert into t1(id, unique_name)
select t1_seq.nextval, 'Franz-Xaver' from dual
where not exists (select 1 from t1 where unique_name = 'Franz-Xaver');