How do I work with a table column that contains SQL statement strings that need to be executed? - oracle

I need to build a PL/SQL procedure that takes data from a source table and inserts it into a target table. The source table has an ITEM1 column, an ITEM2 column, and a SRC_CODE column. The SRC_CODE column contains a string that is a SQL Select Statement, i.e. SELECT KEY FROM SOMETABLE WHERE DAY = V_DAY. So, I somehow need to execute the statements within the SRC_CODE column and populate that V_DAY variable within the select statement. The resulting KEY values, along with ITEM1 and ITEM2 from the source table will go into the TARGET table.
Approaching the process logically, I gather I need to take a row from the source table, execute the SRC_CODE into a collection, then take each KEY from the collection and tie it back to the ITEM1 and ITEM2 and insert KEY, ITEM, and ITEM2 into the target table. I have no clue how to go about this in terms of programming.
The following is my attempt to at least populate the target with key values, but to no avail, as I get a invalid identifier error. If someone could correct/expand on this to get what I need, it would be much appreciated:
CREATE OR REPLACE PROCEDURE POPULATETARGET IS
TYPE KEYS_T IS TABLE OF SOMETABLE.KEY%TYPE;
L_KEYS KEYS_T;
V_DAY NUMBER;
SRC_CODE_FETCH VARCHAR2(200);
V_SRC_CODE VARCHAR2 (4000);
RC SYS_REFCURSOR;
BEGIN
V_DAY := 20150826;
SRC_CODE_FETCH := 'SELECT SRC_CODE FROM SOURCE';
OPEN RC FOR SRC_CODE_FETCH;
LOOP
FETCH RC INTO V_SRC_CODE;
EXIT WHEN RC%NOTFOUND;
EXECUTE IMMEDIATE V_SRC_CODE BULK COLLECT INTO L_KEYS USING V_DAY;
FORALL x IN L_KEYS.FIRST..L_KEYS.LAST
INSERT INTO TARGET VALUES L_KEYS(x);
END LOOP;
CLOSE RC;
END;

The problem is that you are missing parenthesis at insert statement,so your insert line should be:
INSERT INTO TARGET VALUES (L_KEYS(x));
Also i recommend you use COMMIT after this line.

This worked to perfection:
CREATE OR REPLACE PROCEDURE POPULATETARGET IS
TYPE KEYS_T IS TABLE OF SOMETABLE.KEY%TYPE;
L_KEYS KEYS_T;
V_DAY NUMBER;
V_SRC_CODE VARCHAR2 (4000);
RC SYS_REFCURSOR;
BEGIN
VDAY_ID := 20150826;
OPEN RC FOR SELECT SRC_CODE FROM SOURCE;
LOOP
FETCH RC INTO V_SRC_CODE;
EXIT WHEN RC%NOTFOUND;
EXECUTE IMMEDIATE V_SRC_CODE BULK COLLECT INTO L_KEYS USING V_DAY;
FORALL x IN L_KEYS.FIRST..L_KEYS.LAST
INSERT INTO TARGET (KEY, ITEM1, ITEM2)
VALUES((L_KEYS(x)), (SELECT ITEM1 FROM SOURCETBL WHERE SRC_CODE = V_SRC_CODE), (SELECT ITEM2 FROM SOURCETBL WHERE SRC_CODE = V_SRC_CODE));
COMMIT;
END LOOP;
CLOSE RC;
END;

Related

how to get better performance stored procedure oracle

I wrote a stored procedure, but I don't think it is performing at all. How can I make it work better? Thanks.
Table A has 800k records. Table B has 36k records. Is test data, there will be more records in production environment.
Table B has customer_no column index defined.
I ran it once and it took 22 minutes.
create or replace procedure SP_KVKK
is
TYPE json_data_type IS RECORD
(
del_data CLOB
);
customer_no number(10);
r_del_data C%ROWTYPE;
l_deleted_cur SYS_REFCURSOR;
l_deleted_rec json_data_type;
l_sel_sql VARCHAR2 (500);
cursor mbb_list is
select customer_no from A;
begin
open mbb_list;
loop
fetch mbb_list into customer_no;
exit when mbb_list%notfound;
l_sel_sql := 'SELECT JSON_OBJECT(* RETURNING CLOB) AS DEL_DATA
FROM B where customer_no=' || customer_no;
open l_deleted_cur for l_sel_sql;
loop
fetch l_deleted_cur into l_deleted_rec;
exit when l_deleted_cur%notfound;
r_del_data.DELETED_DOCUMENT_JSON := l_deleted_rec.del_data;
r_del_data.DELETE_DATE := SYSTIMESTAMP;
Insert into C
values r_del_data;
end loop;
close l_deleted_cur;
end loop;
close mbb_list;
end;
The best thing you can do is deal with this construct as a single SQL statement. Stop thinking in terms of row-by-row nested loops which will always be slow, and look to SQL batch operations to handle this:
begin
-- insert your two columns into table C using the data
-- from table B returned as a json_object and systimestamp,
-- where only records from table B that have a customer_no
-- from table A will be selected
insert into C (deleted_document_json, delete_date)
select json_object(B.* returning clob), systimestamp
from B
inner join A using (customer_no);
commit;
end;

Insert into not working on plsql in oracle

declare
vquery long;
cursor c1 is
select * from temp_name;
begin
for i in c1
loop
vquery :='INSERT INTO ot.temp_new(id)
select '''||i.id||''' from ot.customers';
dbms_output.put_line(i.id);
end loop;
end;
/
Output of select * from temp_name is :
ID
--------------------------------------------------------------------------------
customer_id
1 row selected.
I have customers table which has customer_id column.I want to insert all the customer_id into temp_new table but it is not being inserted. The PLSQL block executes successfully but the temp_new table is empty.
The output of dbms_output.put_line(i.id); is
customer_id
What is wrong there?
The main problem is that you generate a dynamic statement that you never execute; at some point you need to do:
execute immediate vquery;
But there are other problems. If you output the generated vquery string you'll see it contains:
INSERT INTO ot.temp_new(id)
select 'customer_id' from ot.customers
which means that for every row in customers you'll get one row in temp_new with ID set to the same fixed literal 'customer_id'. It's unlikely that's what you want; if customer_id is a column name from customers then it shouldn't be in single quotes.
As #mathguy suggested, long is not a sensible data type to use; you could use a CLOB but only really need a varchar2 here. So something more like this, where I've also switched to use an implicit cursor:
declare
l_stmt varchar2(4000);
begin
for i in (select id from temp_name)
loop
l_stmt := 'INSERT INTO temp_new(id) select '||i.id||' from customers';
dbms_output.put_line(i.id);
dbms_output.put_line(l_stmt);
execute immediate l_stmt;
end loop;
end;
/
db<>fiddle
The loop doesn't really make sense though; if your temp_name table had multiple rows with different column names, you'd try to insert the corresponding values from those columns in the customers table into multiple rows in temp_new, all in the same id column, as shown in this db<>fiddle.
I guess this is the starting point for something more complicated, but still seems a little odd.

iterating thru cursor in Oracle

I've found a good question at https://dba.stackexchange.com/questions/3587/oracle-automate-export-unload-of-data. Is it valid to use such a construction:
FOR r IN (SELECT * FROM table) LOOP
UTL_FILE.PUT_LINE(lfFilelog, r.row);
END LOOP;
I'm trying to use something like this:
CREATE OR REPLACE PROCEDURE p_name(DESTFOLDER in varchar2, FILENAME in varchar2)
IS
V_FILEHANDLE UTL_FILE.FILE_TYPE;
CURSOR dataset IS
SELECT
field1,
field2,
fieldN
FROM
table1,
table2,
(SELECT field3 from table3);
-- WHERE CLAUSE ... and so on..
BEGIN
V_FILEHANDLE := UTL_FILE.FOPEN(DESTFOLDER, FILENAME, 'w');
FOR R IN dataset LOOP
UTL_FILE.PUT_LINE(V_FILEHANDLE, R.ROW);
END LOOP;
END;
/
and getting pls-00302 error which states that I should have defined ROW component. So as far as I undrestand this field should already exist in the query. Am I right?
Can I simply write a row from the cursor?
The answer mentionned is not complete, I think it was given as an example (pseudo-code) that lacks implementation details.
As it is:
your SELECT clause is invalid, you aren't selecting anything. What do you want to select?
the construct XX.row where xx is a cursor doesn't exist
furthermore, the UTL_FILE.get_line procedure accepts a VARCHAR2 as its second argument, not any kind of rowtype
you can't name a table table (although you could name it "table").
Given a table mytable(col1, col2, ... , colN) you could write:
CREATE OR REPLACE PROCEDURE p_name()
IS
V_FILEHANDLE UTL_FILE.FILE_TYPE;
CURSOR dataset IS SELECT col1, col2, /*...*/ coln FROM mytable;
BEGIN
/*utl_file.fopen maybe?*/
FOR R IN dataset LOOP
UTL_FILE.PUT_LINE(V_FILEHANDLE, R.col1 ||';'|| r.col2 /*...*/ || r.coln);
END LOOP;
END;

Oracle - select a specific column from a ref cursor

My situation:
I have a table named Table1. It has lots of columns, one of them is Column1. I don't know the other columns, they may even change sometimes.
There is a strongly typed ref cursor type which returns Table1%rowtype, named cur_Table1.
I have a stored procedure named SP1 which has an out parameter of type cur_Table1. I'm calling this SP1 stored procedure from another database that only sees this stored procedure, but not the table or the type itself.
How do I select only Column1 from the returned cursor?
I know I can fetch into a record or as many variables as the cursor has columns, but I only know of one column's existence so I can't declare the complete record or correct number of variables.
You can do this with DBMS_SQL, but it ain't pretty.
Table and sample data (COLUMN1 has the numbers 1 - 10):
create table table1(column1 number, column2 date, column3 varchar2(1000), column4 clob);
insert into table1
select level, sysdate, level, level from dual connect by level <= 10;
commit;
Package with a procedure that opens a ref cursor and selects everything:
create or replace package test_pkg is
type cur_Table1 is ref cursor return table1%rowtype;
procedure sp1(p_cursor in out cur_table1);
end;
/
create or replace package body test_pkg is
procedure sp1(p_cursor in out cur_table1) is
begin
open p_cursor for select column1, column2, column3, column4 from table1;
end;
end;
/
PL/SQL block that reads COLUMN1 data from the ref cursor:
--Basic steps are: call procedure, convert cursor, describe and find columns,
--then fetch rows and retrieve column values.
--
--Each possible data type for COLUMN1 needs to be added here.
--Currently only NUMBER is supported.
declare
v_cursor sys_refcursor;
v_cursor_number number;
v_columns number;
v_desc_tab dbms_sql.desc_tab;
v_position number;
v_typecode number;
v_number_value number;
begin
--Call procedure to open cursor
test_pkg.sp1(v_cursor);
--Convert cursor to DBMS_SQL cursor
v_cursor_number := dbms_sql.to_cursor_number(rc => v_cursor);
--Get information on the columns
dbms_sql.describe_columns(v_cursor_number, v_columns, v_desc_tab);
--Loop through all the columns, find COLUMN1 position and type
for i in 1 .. v_desc_tab.count loop
if v_desc_tab(i).col_name = 'COLUMN1' then
v_position := i;
v_typecode := v_desc_tab(i).col_type;
--Pick COLUMN1 to be selected.
if v_typecode = dbms_types.typecode_number then
dbms_sql.define_column(v_cursor_number, i, v_number_value);
--...repeat for every possible type.
end if;
end if;
end loop;
--Fetch all the rows, then get the relevant column value and print it
while dbms_sql.fetch_rows(v_cursor_number) > 0 loop
if v_typecode = dbms_types.typecode_number then
dbms_sql.column_value(v_cursor_number, v_position, v_number_value);
dbms_output.put_line('Value: '||v_number_value);
--...repeat for every possible type
end if;
end loop;
end;
/
Given the original question, jonearles's answer is still correct, so I'll leave it marked as such, but I ended up doing something completely different and much better.
The problem was/is that I have no control over SP1's database, I just have to call it from somewhere else as a 3rd party client. Now I managed to get permission to see not only SP, but also the type of the cursor. I still don't see the table but now there is a much cleaner solution:
In the other database I have been granted access to see this type now:
type cur_Table1 is ref cursor return Table1%rowtype;
So in my database I can do this now:
mycursor OtherDB.cur_Table1;
myrecord mycursor%rowtype;
...
OtherDB.SP1(mycursor);
fetch mycursor into myrecord;
dbms_output.put_line(myrecord.Column1);
See, I still don't need any access to the table, I see the cursor only. The key is that the magical %rowtype works for cursors as well, not just tables. It doesn't work on a sys_refcursor, but it does on a strongly typed one. Given this code, I don't have to care if anything changes on the other side, I don't have to define all the columns or records at all, I just specify the one column I'm interested in.
I really love this OOP attitude about Oracle.
Don't know if it's an option or not, but wouldn't a better solution be to create a function that returns the specific value you're looking for? That avoids the overhead of sending the extra data. Alternatively, you could define a cursor with a set of known fields in it that both parties know about.

Using OLD and NEW object for dynamic operations inside trigger

I want to know whether I can use the OLD and NEW objects for dynamic operations inside trigger.
What I am looking for is something like this :-
ABC is a table for which I need to write Trigger.
TracK_Table maintains list of columns for table which need to be tracked (logged).
f_log is a function that inserts changes in data into a tracking(log) table.
CREATE OR REPLACE TRIGGER trg_TRACK
AFTER INSERT OR UPDATE OR DELETE ON ABC
FOR EACH ROW
declare
v_old_val varchar2(1000);
v_new_val varchar2(1000);
n_ret int;
n_id varchar(50);
cursor cur_col is
SELECT COLUMN_NAME,
TABLE_name
FROM track_TABLE
WHERE upper(TABLE_NAME) = upper('ABC')
AND exists (select cname
from col
where UPPER(tname) =upper('ABC')
and upper(cname)=upper(COLUMN_NAME))
AND upper(allow) = 'Y';
begin
n_id:= :old.id;
for i_get_col in c_get_col
loop
execute immediate
'begin
:v_old_val:= select '||i_get_col.column_name ||'
from '||:old ||'
where id = '||n_id ||';
end;' using out v_old_val;
execute immediate
'begin
:v_new_val:= select '||i_get_col.column_name ||'
from '||:new ||'
where id = '||n_id ||';
end;' using out v_new_val;
n_ret := f_log(n_id,i_get_col.column_name,v_old_val,v_new_val);
end loop;
end;
/
One Option: Push the logic to check if a column is being tracke into the f_log procedure and then pass across all of the columns.
For example, if your track_Table holds (table_name, column_name, allow) values for each column that you want to trackm then something like this
CREATE OF REPLACE PROCEDURE f_log( p_id varchar2
,p_table_name varchar2
,p_column_name varchar2
,p_old_val varchar2
,p_new_val varchar2)
as
l_exists number;
cursor chk_column_track IS
SELECT 1
FROM track_TABLE
WHERE upper(TABLE_NAME) = upper(p_table_name)
AND UPPER(column_name) = upper(p_column_name)
AND upper(allow) = 'Y';
begin
open chk_column_track;
fetch chk_column_track into l_exists;
if chk_column_track%found then
--do the insert here
end if;
close chk_column_track;
end;
/
CREATE OR REPLACE TRIGGER trg_TRACK
AFTER INSERT OR UPDATE OR DELETE ON ABC
FOR EACH ROW
DECLARE
n_id varchar(50);
BEGIN
n_id := NVL(:old.id, :new.id);
-- send all of the values to f_log and have it decide whether to save them
f_log(:old.id,'COL1',:old.col1,:new.col1);
f_log(:old.id,'COL2',:old.col2,:new.col2);
f_log(:old.id,'COL3',:old.col3,:new.col3);
...
END;
And for goodness sake, upper-case the values in your track_table on insert so that you don't have to UPPER() the stored values thus making any index on those values useless!
Now, this will chew up some resources checking each column name on each operation, but if you are not running high-volumes then it might be manageable.
Otherwise you will need a more elegant solution. Like leveraging the power of collections and the TABLE() clause to do the track_table lookup in a bulk operation. Bear in mind that I am away from my database at the moment, so I have not test-compiled this code.
CREATE OR REPLACE TYPE t_audit_row AS OBJECT (
p_table_name varchar2(30)
,p_column_name varchar2(30)
,p_id varchar2(50)
,p_old_val varchar2(2000)
,p_new_val varchar2(2000)
);
CREATE OR REPLACE TYPE t_audit_row_table AS TABLE OF t_audit_row;
CREATE OR REPLACE PROCEDURE f_log (p_audit_row_table t_audit_Row_table)
AS
begin
-- see how we can match the contents of the collection to the values
-- in the table all in one query. the insert is just my way of showing
-- how this can be done in one bulk operation. Alternately you could make
-- the select a cursor and loop through the rows to process them individually.
insert into my_audit_log (table_name, column_name, id, old_val, new_val)
select p_table_name
,p_column_name
,p_id
,p_old_val
,p_new_val
FROM track_TABLE TT
,table(p_audit_row_table) art
WHERE tt.TABLE_NAME = art.p_table_name
AND tt.column_name = art.p_column_name
AND tt.allow = 'Y';
end;
/
CREATE OR REPLACE TRIGGER trg_TRACK
AFTER INSERT OR UPDATE OR DELETE ON ABC
FOR EACH ROW
DECLARE
l_id varchar(50);
l_audit_table t_audit_row_table;
BEGIN
l_id := NVL(:old.id, :new.id);
-- send all of the values to f_log and have it decide whether to save them
l_audit_table := t_audit_row_table (
t_audit_row ('ABC','COL1',l_id, :old.col1, :new.col1)
,t_audit_row ('ABC','COL2',l_id, :old.col2, :new.col2)
,t_audit_row ('ABC','COL3',l_id, :old.col3, :new.col3)
,...
,t_audit_row ('ABC','COLn',l_id, :old.coln, :new.coln)
);
f_log(l_audit_table);
end;
/
No, you cannot access the OLD and NEW pseudo-variables dynamically. What you can do is use your track_table data in a script or procedure to generate static triggers that look like:
CREATE OR REPLACE TRIGGER trg_TRACK
AFTER INSERT OR UPDATE OR DELETE ON ABC
FOR EACH ROW
DECLARE
n_id varchar(50);
BEGIN
n_id := NVL(:old.id, :new.id);
f_log(:old.id,'COL1',:old.col1,:new.col1);
f_log(:old.id,'COL3',:old.col3,:new.col3);
...
END;
So if the data in the TRACK_CHANGES table changes you just have to re-generate the triggers.

Resources