Insert using Union/Union All fails to insert all rows - oracle

I am trying to insert the result of a UNION into a table .While I cannot reproduce the original query here, due to it being in a restricted environment, it's structure looks similar to this:
WITH temp(X,Y,Z) AS(
SELECT....
)
SELECT X,Y,Z from TEMP --PART A
UNION
SELECT 'A','B','C' FROM DUAL;--PART B
Part A of the query returns about 1000 records, while Part B is just a single record.
When I wrap this whole query inside a procedure, such that the result of the select statements are inserted into a table, I see that only Part B, a single record gets inserted into the target table.Here is what it loos like when I wrap it inside the procedure.
CREATE OR REPLACE PROCEDURE PROC_INSERT
AS
BEGIN
INSERT INTO TABLENAME(COLUMN1,COLUMN2....)
WITH temp(X,Y,Z) AS(
SELECT....
)
SELECT X,Y,Z from TEMP
UNION
SELECT 'A','B','C' FROM DUAL;
COMMIT;
END;
I tried using both UNION and UNION ALL, but somehow I don't see to get the Part A of the result set inserted into the table.
What could be the possible reasons for such a scenario to occur? I tried to replicate the same ,but failed.

You can do something like this
DECLARE
CURSOR CUR IS SELECT X, Y, Z FROM OG_TABLE UNION SELECT 'A', 'B', 'C' FROM DUAL;
BEGIN
FOR REC IN CUR
LOOP
INSERT INTO NEWTABLE (COL1, COL2, COL3) VALUES (REC.X, REC.Y, REC.Z);
END LOOP;
END;
Alternative Optimized code
DECLARE
LIM PLS_INTEGER := 100;
CURSOR CUR IS SELECT X, Y, Z FROM OG_TABLE UNION SELECT 'A', 'B', 'C'
FROM DUAL;
TYPE OG_REC IS RECORD( X OG_TABLE.X%TYPE, Y OG_TABLE.Y%TYPE, Z
OG_TABLE%TYPE);
TYPE OGT IS TABLE OF OG_REC;
OG_TAB OGT;
BEGIN
OPEN CUR;
LOOP
FETCH CUR BULK COLLECT INTO OG_TAB LIMIT LIM;
BEGIN
FORALL i IN 1..OG_TAB.COUNT SAVE EXCEPTIONS
INSERT INTO NEWTABLE (COL1, COL2, COL3) VALUES (OG_TAB(i).X,
OG_TAB(i).Y, OG_TAB(i).Z);
EXCEPTION
WHEN OTHERS THEN
NULL;
END;
EXIT WHEN CUR%NOTFOUND;
END LOOP;
COMMIT;
CLOSE CUR;
END;

Related

How to assign multiple values to a variable using into clause in oracle query?

I have an oracle stored procedure like this
CREATE OR REPLACE PROCEDURE DEMO (V_IN CHAR, V_OUT VARCHAR2)
IS
BEGIN
FOR ITEM IN LOOP (SELECT DISTINCT (NAME)
FROM TABLE1 INTO V_OUT
WHERE ID = V_IN
LOOP
--CODE TO PRINT V_OUT
END LOOP;
END;
Now how should I create that V_OUT variable so that it can hold all the values coming from query? I'm doing this in oracle12C.
You don't put the INTO clause in the cursor query. And even if you did, you have it in the wrong place in the SQL statement.
You deal with that when you fetch a row from the query:
CREATE OR REPLACE PROCEDURE
DEMO (V_IN CHAR, V_OUT
VARCHAR2)IS
BEGIN
FOR ITEM IN LOOP
(SELECT DISTINCT (NAME)
FROM TABLE1
WHERE ID= V_IN
)
LOOP
dbms_output.put_line(item.name);
v_out := item.name;
END LOOP;
END;
But then the problem is that we just keep overlaying the previous value, so that when your procedure actually exits, the only value of v_out is that last assigned. If you truely need a collection of values, you need to declare your output variable to be a ref cursor, and adjust code accordingly. I've never actually worked with them, so perhaps someone else will chime in.
You can work with collections, like this:
--declare the pakage type
CREATE OR REPLACE PACKAGE PKG_TYPES AS
TYPE LIST_VARCHAR IS TABLE OF VARCHAR2(2000);
END;
--create the proc that will assemble the value list
CREATE OR REPLACE PROCEDURE DEMO ( V_IN IN varchar2, V_OUT IN OUT PKG_TYPES.LIST_VARCHAR) IS
BEGIN
FOR ITEM IN (
SELECT DISTINCT (NAME) name
FROM (SELECT 'X' ID, 'A' name FROM dual
UNION
SELECT 'X' ID, 'b' name FROM dual
UNION
SELECT 'y' ID, 'c' name FROM dual
) TABLE1
WHERE ID= V_IN
)
LOOP
V_OUT.EXTEND;
V_OUT(V_OUT.LAST) := item.name;
--CODE TO PRINT V_OUT
END LOOP;
END;
--use the list. I separated this step but it can be in the demo proc as well
DECLARE
names PKG_TYPES.LIST_VARCHAR := PKG_TYPES.LIST_VARCHAR();
BEGIN
demo('X',names) ;
FOR i IN names.first..names.last LOOP
Dbms_Output.put_line(i);
END LOOP;
END;
You will have to handle exceptions for when no value is returned from the cursor (when no ID is found).
If you need a collection variable - you can use a nested table and bulk collect like below.
To be able to return the value from the procedure you will need to declare the nested table type in some package or on DB schema level.
declare
type test_type is table of varchar2(2000);
test_collection test_type;
begin
select distinct(name) bulk collect into test_collection
from (
select 1 id, 'AAA' name from dual
union all
select 1 id, 'BBB' name from dual
union all
select 1 id, 'AAA' name from dual
union all
select 2 id, 'CCC' name from dual
)
where id = 1;
for i in test_collection.first..test_collection.last loop
dbms_output.put_line(test_collection(i));
end loop;
end;
/
If you just need a string with concatenated values - you can use listagg to create it like below
declare
test_str varchar2(4000);
begin
select listagg(name, ', ') within group(order by 1)
into test_str
from (
select distinct name
from (
select 1 id, 'AAA' name from dual
union all
select 1 id, 'BBB' name from dual
union all
select 1 id, 'AAA' name from dual
union all
select 2 id, 'CCC' name from dual
)
where id = 1
);
dbms_output.put_line(test_str);
end;
/

Oracle Loop Rule 4809

I need to write a procedure that will insert thousands of rows in a table and use the auto generated id resulted from these rows and use it in other inserts.
I used a for loop in which I save the sequence id in a variable then use it in my inserts.
declare
first_id integer;
BEGIN
FOR texts in (select distinct text from table_texts )
LOOP
first_id := SEQ_IDS_OBJECTID.NEXTVAL;
INSERT INTO table_1(id,some_fields)
VALUES (first_id, 'blablabla');
insert into table_2 (id,text_field)
VALUES (first_id, texts.text);
END LOOP;
commit;
END;
I think that this is not the ideal way to achieve what I need. Also when I enter the code in TOAD , I get the following warning :
Rule 4809 (A loop that contains DML statements should be refactored to use BULK COLLECT and FORALL)
Is there better way to do it?
EDIT:
the above code was simplified. But I think I have to expose more of it to explain the case :
declare
first_id integer;
second_id integer;
BEGIN
FOR texts in (select distinct text1 , text2 from mdf )
LOOP
first_id := XAKTA.SEQ_IDS_OBJECTID.NEXTVAL;
select id_1 into second_id from table_3 where field_1 =texts.text1 ;
INSERT INTO table_1(id_1,id_2,some_fields)
VALUES (first_id ,second_id ,'blablabla');
insert into table_2 (id,text1,text2)
VALUES (first_id, texts.text1,texts.text2);
END LOOP;
commit;
END;
You can use FORALL to insert batches of items from your cursor:
DECLARE
TYPE texts_tab IS TABLE OF table_texts.text%TYPE;
TYPE ids_tab IS TABLE OF table_2.id%TYPE;
p_texts texts_tab;
p_ids ids_tab;
CURSOR c IS
SELECT DISTINCT text FROM table_texts;
BEGIN
OPEN c;
LOOP
FETCH c BULK COLLECT INTO p_texts LIMIT 100;
FORALL i IN 1 .. p_texts.COUNT
INSERT INTO table_2 ( id, text_field )
VALUES ( SEQ_IDS_OBJECTID.NEXTVAL, p_texts(i) )
RETURNING id BULK COLLECT INTO p_ids;
FORALL i IN 1 .. p_ids.COUNT
INSERT INTO table_1( id, some_fields )
VALUES ( p_ids(i), 'blablabla' );
EXIT WHEN c%NOTFOUND;
END LOOP;
CLOSE c;
COMMIT;
END;
/
db<>fiddle here

Data not inserting to destination table

I have the following block of PL-SQL code in Oracle:
DECLARE TAB VARCHAR(100);
COL VARCHAR(100);
CURSOR C_COLS IS
select DISTINCT table_name, column_name
from all_tab_columns
where OWNER = 'MyDB' AND DATA_TYPE LIKE '%VARCHAR%';
BEGIN
OPEN C_COLS;
LOOP
FETCH C_COLS INTO TAB, COL;
EXIT WHEN C_COLS%notfound;
INSERT INTO TargetTable (TABLE_NAME, COLUMN_NAME, COLUMN_VALUE)
SELECT DISTINCT TAB,
COL,
(SELECT COL FROM TAB)
FROM TAB
WHERE REGEXP_LIKE(COL, '([ABCDEFGHIJKLMNOPQRSTUVWXYZ])\d\d\d\d\d\d([ABCDEFGHIJKLMNOPQRSTUVWXYZ])', 'ix');
END LOOP;
CLOSE C_COLS;
END;
The idea is to determine which tables in my rather large database contain a certain pattern of data and to find them.
So I want to return three columns: TableName, ColumnName, Value of ColumnName.
The above runs but returns no data and I can't understand why. The query in the cursor returns results, and if I hard code the table values into a simple select statement containing my Regex, I get results. I just want one result set that contains the thousands of results I expect.
Could it be the (SELECT COL FROM TAB) I'm using to dynamically find the column_value? I wasn't sure if I could express it this way.
If you want to select columns dynamically you may wish to try dynamic SQL.
DECLARE
w_sql VARCHAR2(32767);
BEGIN
DBMS_OUTPUT.enable(32767);
FOR s_cols IN (
select DISTINCT
table_name
, column_name
from all_tab_columns
where owner = 'MyDB'
AND data_type LIKE '%VARCHAR%'
)
LOOP
w_sql := q'!
INSERT
INTO TargetTable (TABLE_NAME, COLUMN_NAME, COLUMN_VALUE)
SELECT DISTINCT
':TAB'
, ':COL'
, :COL
FROM :TAB
WHERE REGEXP_LIKE(:COL, '([ABCDEFGHIJKLMNOPQRSTUVWXYZ])\d\d\d\d\d\d([ABCDEFGHIJKLMNOPQRSTUVWXYZ])', 'ix')
!';
w_sql := REPLACE(w_sql, ':TAB', s_cols.table_name);
w_sql := REPLACE(w_sql, ':COL', s_cols.column_name);
EXECUTE IMMEDIATE w_sql;
END LOOP;
COMMIT;
EXCEPTION
WHEN OTHERS THEN
DBMS_OUTPUT.put_line('Error for SQL :'|| w_sql ||'; error is :'|| SQLERRM);
END;

Bulk Collect Twice over same nested table

Is there any way that after the second bulk collect, data does not get override of the first bulk collect. I don't want to iterate in loop.
DECLARE
TYPE abc IS RECORD (p_id part.p_id%TYPE);
TYPE abc_nt
IS
TABLE OF abc
INDEX BY BINARY_INTEGER;
v_abc_nt abc_nt;
BEGIN
SELECT p_id
BULK COLLECT
INTO v_abc_nt
FROM part
WHERE p_id IN ('E1', 'E2');
SELECT p_id
BULK COLLECT
INTO v_abc_nt
FROM part
WHERE p_id IN ('E3', 'E4');
FOR i IN v_abc_nt.FIRST .. v_abc_nt.LAST
LOOP
DBMS_OUTPUT.put_line (
'p_id is ' || v_abc_nt (i).p_id
);
END LOOP;
END;
OUTPUT:
p_id is E3
p_id is E4
Note: E1 and E2 is present in part table.
You can't simply add the data to the collection, no.
You can, however, do a BULK COLLECT into a separate collection and then combine the collections assuming that you really just need/ want a nested table rather than an associative array...
DECLARE
TYPE abc IS RECORD (p_id part.p_id%TYPE);
TYPE abc_nt
IS
TABLE OF abc;
v_abc_nt abc_nt;
v_abc_nt2 abc_nt;
BEGIN
SELECT p_id
BULK COLLECT
INTO v_abc_nt
FROM part
WHERE p_id IN ('E1', 'E2');
SELECT p_id
BULK COLLECT
INTO v_abc_nt2
FROM part
WHERE p_id IN ('E3', 'E4');
v_abc_nt := v_abc_nt MULTISET UNION v_abc_nt2;
FOR i IN v_abc_nt.FIRST .. v_abc_nt.LAST
LOOP
DBMS_OUTPUT.put_line (
'p_id is ' || v_abc_nt (i).p_id
);
END LOOP;
END;
If you really want to use an associative array, you would need to write some code because there is no way for Oracle to know automatically how to remap the associations of one array when you combine it with another associative array that has some of the same keys.
You can write it like this
bad example:
declare
type t_numb is record(
numb number);
type t_numb_list is table of t_numb;
v_numb_list t_numb_list;
begin
with q as
(select 1 a from dual union select 2 from dual union select 3 from dual)
select q.a bulk collect into v_numb_list from q;
with w as
(select 4 a from dual union select 5 from dual union select 6 from dual)
select w.a bulk collect into v_numb_list from w;
for r in 1 .. v_numb_list.count loop
dbms_output.put_line(v_numb_list(r).numb);
end loop;
end;
and this works good:
declare
type t_numb is record(
numb number);
type t_numb_list is table of t_numb;
v_numb_list t_numb_list := t_numb_list();
v_numb t_numb;
begin
for q in (select 1 a
from dual
union
select 2
from dual
union
select 3
from dual) loop
v_numb.numb := q.a;
v_numb_list.extend;
v_numb_list(v_numb_list.count) := v_numb;
end loop;
for w in (select 4 a
from dual
union
select 5
from dual
union
select 6
from dual) loop
v_numb.numb := w.a;
v_numb_list.extend;
v_numb_list(v_numb_list.count) := v_numb;
end loop;
for r in 1 .. v_numb_list.count loop
dbms_output.put_line(v_numb_list(r).numb);
end loop;
end;

refactoring large cursor queries by splitting into multiple cursors

Another PL/SQL refactoring question!
I have several cursors that are of the general simplified form:
cursor_1 is
with X as (select col1, col2 from TAB where col1 = '1'),
Y as (select col1, col2 from TAB where col2 = '3'),
/*main select*/
select count(X.col1), ...
from X inner join Y on...
group by rollup (X.col1, ...
cursor_2 is
with X as (select col1, col2 from TAB where col1 = '7' and col2 = '9' and col3 = 'TEST'),
Y as (select col1, col2 from TAB where col3 = '6'),
/*main select*/
select count(X.col1), ...
from X inner join Y on...
group by rollup (X.col1, ...
cursor_2 is
with X as (select col1, col2 from TAB where col1 IS NULL ),
Y as (select col1, col2 from TAB where col2 IS NOT NULL ),
/*main select*/
select count(X.col1), ...
from X inner join Y on...
group by rollup (X.col1, ...
...
begin
for r in cursor_1 loop
print_report_results(r);
end loop;
for r in cursor_2 loop
print_report_results(r);
end loop;
...
end;
Basically, all of these cursors (there's more than 3) are the same summary/reporting queries. The difference is in the factored subqueries. There are always 2 factored subqueries, "X" and "Y", and they always select the same columns to feed into the main reporting query.
The problem is that the main reporting query is VERY large, about 70 lines. This itself isn't so bad, but it was copy-pasted for ALL of the reporting queries (I think there's over a dozen).
Since the only difference is in the factored subqueries (and they all return the same columns, it's really just a difference in the tables they select from and their conditions) I was hoping to find a way to refactor all this so that there is ONE query for the giant report and smaller ones for the various factored subqueries so that when changes are made to the way the report is done, I only have to do it in one place, not a dozen. Not to mention a much easier-to-navigate (and read) file!
I just don't know how to properly refactor something like this. I was thinking pipelined functions? I'm not sure they're appropriate for this though, or if there's a simpler way...
On the other hand, I also wonder if performance would be significantly worse by splitting out the reporting query. Performance (speed) is an issue for this system. I'd rather not introduce changes for developer convenience if it adds significant execution time.
I guess what I'd ultimately like is something that looks sort of like this (I'm just not sure how to do this so that it will actually compile):
cursor main_report_cursor (in_X, in_Y) is
with X as (select * from in_X),
Y as (select * from in_Y)
/*main select*/
select count(X.col1), ...
from X inner join Y on...
group by rollup (X.col1, ...
cursor x_1 is
select col1, col2 from TAB where col1 = '1';
cursor y_1 is
select col1, col2 from TAB where col2 = '3'
...
begin
for r in main_report_cursor(x_1,y_1) loop
print_report_results(r);
end loop;
for r in main_report_cursor(x_2,y_2) loop
print_report_results(r);
end loop;
...
(Using Oracle 10g)
Use a pipelined function. For example:
drop table my_tab;
create table my_tab
(
col1 number,
col2 varchar2(10),
col3 char(1)
);
insert into my_tab values (1, 'One', 'X');
insert into my_tab values (1, 'One', 'Y');
insert into my_tab values (2, 'Two', 'X');
insert into my_tab values (2, 'Two', 'Y');
insert into my_tab values (3, 'Three', 'X');
insert into my_tab values (4, 'Four', 'Y');
commit;
-- define types
create or replace package refcur_pkg is
--type people_tab is table of people%rowtype;
type my_subquery_tab is table of my_tab%rowtype;
end refcur_pkg;
Create the function pipelined
-- create pipelined function
create or replace function get_tab_data(p_cur_num in number, p_cur_type in char)
return REFCUR_PKG.my_subquery_tab pipelined
IS
v_ret REFCUR_PKG.my_subquery_tab;
begin
if (p_cur_num = 1) then
if (upper(p_cur_type) = 'X') then
for rec in (select * from my_tab where col1=1 and col3='X')
loop
pipe row(rec);
end loop;
elsif (upper(p_cur_type) = 'Y') then
for rec in (select * from my_tab where col1=1 and col3='Y')
loop
pipe row(rec);
end loop;
else
return;
end if;
elsif (p_cur_num = 2) then
if (upper(p_cur_type) = 'X') then
for rec in (select * from my_tab where col1=2 and col3='X')
loop
pipe row(rec);
end loop;
elsif (upper(p_cur_type) = 'Y') then
for rec in (select * from my_tab where col1=2 and col3='Y')
loop
pipe row(rec);
end loop;
else
return;
end if;
end if;
return;
end;
MAIN procedure example
-- main procedure/usage
declare
cursor sel_cur1 is
with X as (select * from table(get_tab_data(1, 'x'))),
Y as (select * from table(get_tab_data(1, 'y')))
select X.col1, Y.col2 from X,Y where X.col1 = Y.col1;
begin
for rec in sel_cur1
loop
dbms_output.put_line(rec.col1 || ',' || rec.col2);
end loop;
end;
All of your various subqueries are reduced to a call to a single pipelined function, which determines the rows to return.
EDIT:
To combine all needed types and functions into 1 procedure, and also to use variables for subquery function parameters, I'm adding the following example:
create or replace procedure my_pipe
IS
-- define types
type my_subquery_tab is table of my_tab%rowtype;
type ref_cur_t is ref cursor;
v_ref_cur ref_cur_t;
-- define vars
v_with_sql varchar2(4000);
v_main_sql varchar2(32767);
v_x1 number;
v_x2 char;
v_y1 number;
v_y2 char;
v_col1 my_tab.col1%type;
v_col2 my_tab.col2%type;
-- define local functions/procs
function get_tab_data(p_cur_num in number, p_cur_type in char)
return my_subquery_tab pipelined
IS
v_ret my_subquery_tab;
begin
if (p_cur_num = 1) then
if (upper(p_cur_type) = 'X') then
for rec in (select * from my_tab where col1=1 and col3='X')
loop
pipe row(rec);
end loop;
elsif (upper(p_cur_type) = 'Y') then
for rec in (select * from my_tab where col1=1 and col3='Y')
loop
pipe row(rec);
end loop;
else
return;
end if;
elsif (p_cur_num = 2) then
if (upper(p_cur_type) = 'X') then
for rec in (select * from my_tab where col1=2 and col3='X')
loop
pipe row(rec);
end loop;
elsif (upper(p_cur_type) = 'Y') then
for rec in (select * from my_tab where col1=2 and col3='Y')
loop
pipe row(rec);
end loop;
else
return;
end if;
end if;
return;
end;
BEGIN
---------------------------------
-- Setup SQL for cursors
---------------------------------
-- this will have different parameter values for subqueries
v_with_sql := q'{
with X as (select * from table(get_tab_data(:x1, :x2))),
Y as (select * from table(get_tab_data(:y1, :y2)))
}';
-- this will stay the same for all cursors
v_main_sql := q'{
select X.col1, Y.col2 from X,Y where X.col1 = Y.col1
}';
---------------------------------
-- set initial subquery parameters
---------------------------------
v_x1 := 1;
v_x2 := 'x';
v_y1 := 1;
v_y2 := 'y';
open v_ref_cur for v_with_sql || v_main_sql using v_x1, v_x2, v_y1, v_y2;
loop
fetch v_ref_cur into v_col1, v_col2;
exit when v_ref_cur%notfound;
dbms_output.put_line(v_col1 || ',' || v_col2);
end loop;
close v_ref_cur;
---------------------------------
-- change subquery parameters
---------------------------------
v_x1 := 2;
v_x2 := 'x';
v_y1 := 2;
v_y2 := 'y';
open v_ref_cur for v_with_sql || v_main_sql using v_x1, v_x2, v_y1, v_y2;
loop
fetch v_ref_cur into v_col1, v_col2;
exit when v_ref_cur%notfound;
dbms_output.put_line(v_col1 || ',' || v_col2);
end loop;
close v_ref_cur;
end;
Note the benefit now is that even if you have many different cursors, you only need to define the main query and subquery SQL once. After that, you're just changing variables.
Cheers
--Create views that will be replaced by common table expressions later.
--The column names have to be the same, the actual content doesn't matter.
create or replace view x as select 'wrong' col1, 'wrong' col2 from dual;
create or replace view y as select 'wrong' col1, 'wrong' col2 from dual;
--Put the repetitive logic in one view
create or replace view main_select as
select count(x.col1) total, x.col2
from X inner join Y on x.col1 = y.col1
group by rollup (x.col1);
--Just querying the view produces the wrong results
select * from main_select;
--But when you add the common table expressions X and Y they override
--the dummy views and produce the real results.
declare
cursor cursor_1 is
with X as (select 'right' col1, 'right' col2 from dual),
Y as (select 'right' col1, 'right' col2 from dual)
select total, col2 from main_select;
--... repeat for each cursor, just replace X and Y as necessary
begin
for r in cursor_1 loop
dbms_output.put_line(r.col2);
end loop;
null;
end;
/
This solution is a little weirder than the pipelined approach, and requires 3 new objects for the views, but it will probably run faster
since there is less context switching between SQL and PL/SQL.
One possibility you could consider is using 2 Global Temporary Tables (GTTs) for X and Y. Then you just need one cursor, but you have to clear and re-populate the 2 GTTs several times - and if data volumes are large you may want to get optimiser stats on the GTTs each time too.
This is the sort of thing I mean:
cursor_gtt is
select count(X.col1), ...
from GTT_X inner join GTT_Y on...
group by rollup (X.col1, ...
begin
insert into gtt_x select col1, col2 from TAB where col1 = '1';
insert into gtt_y select col1, col2 from TAB where col2 = '3';
-- maybe get stats for gtt_x and gtt_y here
for r in cursor_gtt loop
print_report_results(r);
end loop;
delete gtt_x;
delete gtt_y;
insert into gtt_x select col1, col2 from TAB where col1 = '7' and col2 = '9' and col3 = 'TEST';
insert into gtt_y select col1, col2 from TAB where col3 = '6'
-- maybe get stats for gtt_x and gtt_y here
for r in cursor_gtt loop
print_report_results(r);
end loop;
...
end;
So the same 2 GTTs are re-populated and the same cursor is used each time.
What about creating a view for the main query? That pretties up your code and centralizes the main query to boot.

Resources