I have a requirement where I get the table name and column name dynamically, I need to fetch the data and insert/ update in a table using bulk collect. As far as I checked I will not be able to use FORALL for dynamic sql which use dynamic table/column name. Please suggest any workaround to insert the data in a collection by bulk
Declare
Type Type_xx is table of varchar2(200);
Lv_Coll type_xx:=type_xx();
Lv_Coll2 sys_refcursor;
Lv_tab varchar2(200):='C_Sample_1';
Lv_Col Varchar2(200):='SHORT_NAME';
Out_tab Varchar2(200):='Test';
Begin
OPEN Lv_Coll2 FOR 'Select '||Lv_Col||' from '||Lv_tab||'';
Loop
Fetch Lv_Coll2 bulk collect into Lv_Coll limit 100;
Exit when Lv_Coll.count < 100;
forall i in Lv_Coll.first..Lv_Coll.last
Execute Immediate 'insert into '||Out_tab||' values ('||Lv_Coll(i)||')';
End loop;
end;
It gives the error
ORA-06550: line 16, column 17:
PLS-00801: internal error [*** ASSERT
at file pdw4.c, line 620; Unknown expression Expr = 283.;
Xanon__0x2b21bbdd8__AB[16, 17]]
Database version is 11.2.0.4.0
Probably not a preferred solution, but you can put your whole bulk insert PL/SQL block inside an EXECUTE IMMEDIATE statement.
Here is a way to do that :
declare
type t_ntt is table of test1%rowtype index by pls_integer;
l_ntt t_ntt;
c_limit INTEGER := 100;
sqltext VARCHAR2(1000);
table_name VARCHAR2(30) := 'test1';
column_name VARCHAR2(30) := 'A';
c_cursor sys_refcursor;
begin
open c_cursor for 'select '|| column_name|| ' from ' || table_name ;
loop
fetch c_cursor bulk collect into l_ntt limit c_limit;
exit when l_ntt.count = 0;
dbms_output.put_line(l_ntt.count);
forall i in indices of l_ntt
insert into test values l_ntt(i);
end loop;
close c_cursor;
end;
I have not done the commit on it.
Related
For operating on millions of records I want to put a limit of 500 but the following code gives error.
Error report:
ORA-06550: line 6, column 49:
PLS-00103: Encountered the symbol "LIMIT" when expecting one of the following:
DECLARE
TYPE EMP_T IS TABLE OF NUMBER;
EMP_ID EMP_T;
QRY VARCHAR2(4000):='SELECT EMPLOYEE_ID FROM EMPLOYEES';
begin
execute immediate QRY bulk collect into EMP_ID LIMIT 500;
END;
That's not the way to use LIMIT clause. I think you can't use LIMIT clause in BULK COLLECT with EXECUTE IMMEDIATE. BULK COLLECT LIMIT in EXECUTE IMMEDIATE
Example:
DECLARE
TYPE EMP_T IS TABLE OF NUMBER;
EMP_ID EMP_T;
CURSOR c_data IS SELECT empid FROM EMPLOYEE;
begin
OPEN c_data;
LOOP
FETCH c_data
BULK COLLECT INTO EMP_ID LIMIT 100;
EXIT WHEN EMP_ID.count = 0;
-- Process contents of collection here.
DBMS_OUTPUT.put_line(EMP_ID.count || ' rows');
END LOOP;
CLOSE c_data;
END;
/
Background
I'm trying to make a re-usable PL/SQL procedure to move data from one
database to another.
For this purpose, I'm using dynamic SQL.
The procedure executes perfectly if I use a REPLACE with placeholders.
However, for security reasons, I want to use bind variables.
Question
How can I make an entire PL/SQL code block dynamic (with bind
variables)? If I use a REPLACE instead of the bind variables, it works
fine.
How to replicate
To replicate this in your database, create the following procedure as it is:
create or replace procedure move_data(i_schema_name in varchar2, i_table_name in varchar2, i_destination in varchar2) as
l_sql varchar2(32767);
l_cursor_limit pls_integer := 500;
l_values_list varchar2(32767);
begin
select listagg('l_to_be_moved(i).' || column_name, ', ') within group (order by column_id)
into l_values_list
from all_tab_cols
where owner = i_schema_name and
table_name = i_table_name and
virtual_column = 'NO';
l_sql := q'[
declare
l_cur_limit pls_integer := :l_cursor_limit;
cursor c_get_to_be_moved is
select :i_table_name.*, :i_table_name.rowid
from :i_table_name;
type tab_to_be_moved is table of c_get_to_be_moved%rowtype;
l_to_be_moved tab_to_be_moved;
begin
open c_get_to_be_moved;
loop
fetch c_get_to_be_moved
bulk collect into l_to_be_moved limit l_cur_limit;
exit when l_to_be_moved.count = 0;
for i in 1.. l_to_be_moved.count loop
begin
insert into :i_table_name#:i_destination values (:l_values_list);
exception
when others then
dbms_output.put_line(sqlerrm);
l_to_be_moved.delete(i);
end;
end loop;
forall i in 1.. l_to_be_moved.count
delete
from :i_table_name
where rowid = l_to_be_moved(i).rowid;
for i in 1..l_to_be_moved.count loop
if (sql%bulk_rowcount(i) = 0) then
raise_application_error(-20001, 'Could not find ROWID to delete. Rolling back...');
end if;
end loop;
commit;
end loop;
close c_get_to_be_moved;
exception
when others then
rollback;
dbms_output.put_line(sqlerrm);
end;]';
execute immediate l_sql using l_cursor_limit, i_table_name, i_destination, l_values_list;
exception
when others then
rollback;
dbms_output.put_line(sqlerrm);
end;
/
And then you can execute the procedure with the following:
begin
move_data('MySchemaName', 'MyTableName', 'MyDatabaseLinkName');
end;
/
Due to many reasons(inability to generate an appropriate execution plan, security checking, etc.) Oracle does not allow identifiers binding (table names, schema names, column names and so on). So if it's really necessary, the only way is to hard code those identifiers after some sort of validation (to prevent SQL injection).
If I understand well, you could try a trick, by using a dynamic SQL inside a dynamic SQL.
setup:
create table tab100 as select level l from dual connect by level <= 100;
create table tab200 as select level l from dual connect by level <= 200;
create table tabDest as select * from tab100 where 1 = 2;
This will not work:
create or replace procedure testBind (pTableName in varchar2) is
vSQL varchar2(32000);
begin
vSQL := 'insert into tabDest select * from :tableName';
execute immediate vSQL using pTableName;
end;
But this will do the trick:
create or replace procedure testBind2 (pTableName in varchar2) is
vSQL varchar2(32000);
begin
vSQL := q'[declare
vTab varchar2(30) := :tableName;
vSQL2 varchar2(32000) := 'insert into tabDest select * from ' || vTab;
begin
execute immediate vSQL2;
end;
]';
execute immediate vSQL using pTableName;
end;
I think you can do it simpler.
create or replace procedure move_data(i_schema_name in varchar2, i_table_name in varchar2, i_destination in varchar2) as
l_sql varchar2(32767);
begin
select listagg('l_to_be_moved(i).' || column_name, ', ') within group (order by column_id)
into l_values_list
from all_tab_cols
where owner = i_schema_name and
table_name = i_table_name and
virtual_column = 'NO';
l_sql := 'insert into '||i_destination||'.'||i_table_name||' select * from '||i_schema_name||'.'||i_table_name;
execute immediate l_sql;
end;
If you are concerned about SQL-Injection, have a look at package DBMS_ASSERT. This PL/SQL package provides function to validate properties of input values.
If we have a column in a table of type number, how can we store the result of select query on that column in an array ?
This sample uses a list (table of numbers) to achieve this, because i find
those lists much more handy:
CREATE OR REPLACE TYPE numberlist AS TABLE OF NUMBER;
DECLARE
v_numberlist numberlist;
BEGIN
SELECT intval numbercolumn
BULK COLLECT INTO v_numberlist
FROM lookup;
FOR i IN 1..v_numberlist.count
LOOP
dbms_output.put_line( v_numberlist(i) );
END LOOP;
END;
Create a type which store number:-
CREATE OR REPLACE TYPE varray is table of number;
--write your select query inside for loop () where i am extracting through level
declare
p varray := varray();
BEGIN
for i in (select level from dual connect by level <= 10) loop
p.extend;
p(p.count) := i.level;
end loop;
for xarr in (select column_value from table(cast(p as varray))) loop
dbms_output.put_line(xarr.column_value);
end loop;
END;
output:-
1
2
3
4
5
6
7
8
9
10
Just an option to use some native SQL datatype. Hope it helps.
SET SERVEROUTPUT ON;
DECLARE
lv_num_tab DBMS_SQL.NUMBER_TABLE;
BEGIN
SELECT LEVEL BULK COLLECT INTO lv_num_tab FROM DUAL CONNECT BY LEVEL < 10;
FOR I IN lv_num_tab.FIRST..lv_num_tab.LAST
LOOP
dbms_output.put_line(lv_num_tab(i));
END LOOP;
END;
You may also want to put the whole select in a table. You can use a BULK COLLECT to an array:
CREATE OR REPLACE TYPE t_my_list AS TABLE OF VARCHAR2(100);
CREATE OR REPLACE
PROCEDURE get_tables(p_owner in varchar2)
as
v_res t_my_list;
v_qry varchar2(4000) := '';
begin
v_qry := ' SELECT table_name from all_tables where owner='''||p_owner||'''';
dbms_output.put_line(v_qry);
-- all at once in the table
execute immediate v_qry bulk collect into v_res;
FOR I in 1..v_res.count
loop
dbms_output.put_line(v_res(i));
end loop;
exception
when others then
raise;
end get_tables;
/
begin
get_tables('E') ;
end;
/
I have a table A which has column A which holds table names as values.
All these tables have a common column C. I need maximum value of this column for each table.
I tried this using dynamic SQL but I'm getting errors. Please suggest.
DECLARE
query1 VARCHAR2(100);
c_table VARCHAR2(40);
c_obj VARCHAR2(20);
Cursor cursor_a IS
SELECT a FROM A;
BEGIN
Open cursor_a;
LOOP
Fetch cursor_a INTO c_table2;
EXIT WHEN cursor_a%notfound;
query1 := 'SELECT max(object_ref) AS "c_obj" FROM c_table' ;
EXECUTE IMMEDIATE query1;
dbms_output.put_line('Maximum value: '|| c_table || c_obj);
END LOOP;
Close cursor_a;
END;
Dynamic SQL can't see your PL/SQL variable: you need to pass it a string which can be executed in the scope of the SQL engine. So you need to concatenate the table name with the statement's boilerplate text:
query1 := 'SELECT max(c) FROM ' || variable_name;
You also need to return the result of the query into a variable.
Here is how it works (I've stripped out some of the unnecessary code from your example):
DECLARE
c_table VARCHAR2(40);
c_obj VARCHAR2(20);
BEGIN
for lrec in ( select a as tab_name from A )
LOOP
EXECUTE IMMEDIATE 'SELECT max(object_ref) FROM ' || lrec.tab_name
into c_obj ;
dbms_output.put_line('Maximum value: '|| lrec.tab_name
|| '='|| c_obj);
END LOOP;
END;
There is some miss match in veriables that you had used i.e.
declared as "c_table" but accessing as "c_table2"
Each table common column name is "C" but accessing as "object_ref"
In dynamic query use INTO keyword to store the value to your varibale
Suggestions
Use concat() function to prepare the query dynamically i.e. something like:
SET #SQL := CONCAT('SELECT max(c) INTO ', c_obj, ' FROM ',c_table);
Steps of implementing dynamic query is:
SET #SQL = <your dynamic query>
PREPARE stmt FROM #SQL;
EXECUTE stmt;
Sample code:
DECLARE
query1 VARCHAR2(100);
c_table VARCHAR2(40);
c_obj VARCHAR2(20);
CURSOR cursor_a IS
SELECT a FROM A;
BEGIN
OPEN cursor_a;
LOOP
FETCH cursor_a INTO c_table;
EXIT WHEN cursor_a%notfound;
SET #SQL := CONCAT('SELECT max(object_ref) AS c_obj INTO ', c_obj, ' FROM ',c_table);
PREPARE stmt FROM #SQL;
EXECUTE stmt;
dbms_output.put_line('Maximum value: '|| c_table || c_obj);
END LOOP;
CLOSE cursor_a;
END;
I have one table called event, and created another global temp table tmp_event with the same columns and definition with event. Is it possible to insert records in event to tmp_event using this ?
DECLARE
v_record event%rowtype;
BEGIN
Insert into tmp_event values v_record;
END;
There are too many columns in event table, I want to try this because I don't want to list all the columns.
Forget to mention: I will use this in the trigger, can this v_record be the object :new after insert on EVENT table ?
To insert one row-
DECLARE
v_record event%rowtype;
BEGIN
SELECT * INTO v_record from event where rownum=1; --or whatever where clause
Insert into tmp_event values v_record;
END;
Or a more elaborate version to insert all rows from event-
DECLARE
TYPE t_bulk_collect_test_tab IS TABLE OF event%ROWTYPE;
l_tab t_bulk_collect_test_tab;
CURSOR c_data IS
SELECT *
FROM event;
BEGIN
OPEN c_data;
LOOP
FETCH c_data
BULK COLLECT INTO l_tab LIMIT 10000;
EXIT WHEN l_tab.count = 0;
-- Process contents of collection here.
Insert into tmp_event values v_record;
END LOOP;
CLOSE c_data;
END;
/
In a trigger, yes it is possible but its like the chicken or the egg. You have to initialize every field of the rowtype with the :new column values like-
v_record.col1 := :new.col1;
v_record.col2 := :new.col2;
v_record.col3 := :new.col3;
....
Apparently, the PLSQL examples above cannot be used in a trigger since it would throw a mutating trigger error. And there is no other way for you to get the entire row in the trigger other than accessing each column separately as I explain above, so if you do all this why not directly use :new.col in the INSERT into temp_event itself, will save you a lot of work.
Also since you say it's a lot of work to mention all the columns, (in Oracle 11gR2) here's a quick way of doing that by generating the INSERT statement and executing it dynamically (although not tested for performance).
CREATE OR REPLACE TRIGGER event_air --air stands for "after insert of row"
AFTER INSERT ON EVENT
FOR EACH ROW
L_query varchar2(2000); --size it appropriately
BEGIN
SELECT 'INSERT INTO tmp_event VALUES ('|| listagg (':new.'||column_name, ',')
WITHIN GROUP (ORDER BY column_name) ||')'
INTO l_query
FROM all_tab_columns
WHERE table_name='EVENT';
EXECUTE IMMEDIATE l_query;
EXCEPTION
WHEN OTHERS THEN
--Meaningful exception handling here
END;
There is a way to insert multiple rows into table with %Rowtype.
checkout below example.
DECLARE
TYPE v_test IS TABLE OF TEST_TAB%rowtype;
v_test_tab v_test ;
EXECUTE immediate ' SELECT * FROM TEST_TAB ' bulk collect INTO v_test_tab ;
dbms_output.put_line('v_test_tab.count -->'||v_test_tab.count);
FOR i IN 1..v_test_tab.count
LOOP
INSERT INTO TEST_TAB_1 VALUES v_test_tab
(i
) ;
END LOOP;
END;
sum up to full working excample ...
DECLARE
TYPE t_bulk_collect_test_tab IS TABLE OF event%ROWTYPE;
l_tab t_bulk_collect_test_tab;
CURSOR c_data IS SELECT * FROM event;
BEGIN
OPEN c_data;
LOOP
FETCH c_data
BULK COLLECT INTO l_tab LIMIT 10000;
EXIT WHEN l_tab.count = 0;
FORALL i IN 1..l_tab.count
Insert into tmp_event values l_tab(i);
commit;
END LOOP;
CLOSE c_data;
END;
/