PL/SQL - Split the loop to load the bad data - oracle

declare
cursor c_data
is
select * from test_product_u;
error_row varchar2(4000);
v_errormsg varchar2(200);
begin
for i in c_data
loop
begin
insert into test_product_u_final (PRODUCT_NO, CREATED_DATE_RAW, DATE_FORMAT)
values (i.PRODUCT_NO, i.CREATED_DATE_RAW,i.DATE_FORMAT);
commit;
exception when others then
error_row := i.PRODUCT_NO ||';'|| i.CREATED_DATE_RAW ||';'|| i.DATE_FORMAT;
v_errormsg := SUBSTR(SQLERRM,1,64);
insert into test_product_error_new(error_no,error_row_msg,errormsg_sql)
values (ERROR_NO.NEXTVAL,error_row,v_errormsg);
end;
end loop;
end;
1 - The above code inserts all the rows irrespective bad or good into the error table. I would want to split the code to just put the good data in the destination and bad data into the error? Any help here.
When I split the loop the reference to the FOR loop is not there.
Sample data
Product_no CREATED_DATE_RAW CREATED_DATE PRICE
1 01-JAN-16 01-JAN-16 55
2 03-JAN-16 03-JAN-16 null

No need for PL/SQL or a loop here. You can use Oracle's error logging feature for this:
First create a table where the errors should be stored:
execute dbms_errlog.create_error_log('TEST_PRODUCT_U_FINAL', 'TEST_PRODUCT_ERRORS');
Then run the insert:
insert into test_product_u_final (PRODUCT_NO, CREATED_DATE_RAW, DATE_FORMAT)
select i.PRODUCT_NO, i.CREATED_DATE_RAW,i.DATE_FORMAT
from test_product_u i
log errors into test_product_errors
reject limit unlimited;
Documentation for dbms_errlog
Documentation for log errors into

Related

how to get better performance stored procedure oracle

I wrote a stored procedure, but I don't think it is performing at all. How can I make it work better? Thanks.
Table A has 800k records. Table B has 36k records. Is test data, there will be more records in production environment.
Table B has customer_no column index defined.
I ran it once and it took 22 minutes.
create or replace procedure SP_KVKK
is
TYPE json_data_type IS RECORD
(
del_data CLOB
);
customer_no number(10);
r_del_data C%ROWTYPE;
l_deleted_cur SYS_REFCURSOR;
l_deleted_rec json_data_type;
l_sel_sql VARCHAR2 (500);
cursor mbb_list is
select customer_no from A;
begin
open mbb_list;
loop
fetch mbb_list into customer_no;
exit when mbb_list%notfound;
l_sel_sql := 'SELECT JSON_OBJECT(* RETURNING CLOB) AS DEL_DATA
FROM B where customer_no=' || customer_no;
open l_deleted_cur for l_sel_sql;
loop
fetch l_deleted_cur into l_deleted_rec;
exit when l_deleted_cur%notfound;
r_del_data.DELETED_DOCUMENT_JSON := l_deleted_rec.del_data;
r_del_data.DELETE_DATE := SYSTIMESTAMP;
Insert into C
values r_del_data;
end loop;
close l_deleted_cur;
end loop;
close mbb_list;
end;
The best thing you can do is deal with this construct as a single SQL statement. Stop thinking in terms of row-by-row nested loops which will always be slow, and look to SQL batch operations to handle this:
begin
-- insert your two columns into table C using the data
-- from table B returned as a json_object and systimestamp,
-- where only records from table B that have a customer_no
-- from table A will be selected
insert into C (deleted_document_json, delete_date)
select json_object(B.* returning clob), systimestamp
from B
inner join A using (customer_no);
commit;
end;

How to handle exceptions in for loop for insert rows using a stored procedure? Oracle PLSQL

I'm coding a complex PLSQL block (complex for me hahaha) to insert rows using information from the FOR LOOP CURSOR and add parameters to insert using a stored procedure. The current problem is there are around 200 rows to be inserted but when a simple row fail to insert all rows inserted broke and oracle execute a ROLLBACK command (I think so). So... How could I handle exceptions to insert succefully all rounds I can and when any rows fail show it in screen? Thanks
FOR i IN c_mig_saldos LOOP
IF i.tipo_comprobante = 'P' THEN -- Nota de debito (positivo)
v_cmp_p.prn_codigo := 'VIV';
v_cmp_p.tcm_codigo := 'NRA';
v_cmp_p.cmp_fecha_emision := TRUNC(SYSDATE);
v_cmp_p.cmp_fecha_contable := TRUNC(SYSDATE);
v_cmp_p.cmp_observacion := 'GENERACION AUTOMATICA DE SALDOS';
v_cmp_p.cli_codigo := i.cli_codigo;
v_tab_dco_p(1).cnc_codigo := 'VIA';
v_tab_dco_p(1).dco_precio_unitario := i.total_final;
v_tab_dco_p(1).dco_cantidad := 1;
v_tab_dco_p(1).dco_importe := i.total_final;
-- Insert a new row using stored procedure but when a itereted fail, no rows has inserted in table
PKG_COMPROBANTES.PRC_INSERTAR_COMPROBANTE(v_cmp_p, v_tab_dco_p, v_tab_pgc_p, v_tab_apl_p, v_tab_mar_p);
COMMIT;
END IF;
END LOOP;
-- Insert a new row using stored procedure but when a itereted fail, no rows has inserted in table
begin
PKG_COMPROBANTES.PRC_INSERTAR_COMPROBANTE(v_cmp_p, v_tab_dco_p, v_tab_pgc_p, v_tab_apl_p, v_tab_mar_p);
exception
when others then --this could be made more specific but you didn't say what type of error you were getting
-- Log to a table so you can export failed inserts later.
-- Make sure log table cols are large enough to store everything that can possibly be inserted here...
ErrorLogProc(the, cols, you, want, to, see, and, SQLERRM);
end;
In the ErrorLogProc() I'd recommend a couple things, here is a snippet of some things I do in my error logging proc that you may find helpful (it's just a few snippets, not intended to fully work, but you should get the idea)...
oname varchar2(100);
pname varchar2(100);
lnumb varchar2(100);
callr varchar2(100);
g1B CHAR(2) := chr(13)||chr(10);
PRAGMA AUTONOMOUS_TRANSACTION; --important!
begin
owa_util.who_called_me(oname, pname, lnumb, callr);
--TRIM AND FORMAT FOR ERRORLOG
lv_errLogText := 'Package: '||pname||' // Version: '||version_in||' // Line Number: '||lnumb||' // Error: ';
lv_string1 := mid(errStr_in,1,4000-Length(lv_errLogText));
lv_errLogText := lv_errLogText||lv_string1;
lv_errLogText := lv_errLogText||g1B||'Error Backtrace: '||replace(dbms_utility.format_error_backtrace,'ORA-', g1b||'ORA-');
insertIntoYourErrorLogTable(lv_errLogText, and, whatever, else, you, need);
commit;
To keep this simple, since there's not enough information to know the what and why of the question, this will kick out some text information about failures as desired.
SQL> set serveroutput on
Then here's an anonymous PL/SQL block:
BEGIN
FOR i IN c_mig_saldos
LOOP
-- not relevant
BEGIN
PKG_COMPROBANTES.PRC_INSERTAR_COMPROBANTE(v_cmp_p, v_tab_dco_p, v_tab_pgc_p, v_tab_apl_p, v_tab_mar_p);
EXCEPTION
-- The goal of this is to ignore but output information about your failures
WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE('whatever you want about v_cmp_p, v_tab_dco_p, v_tab_pgc_p, v_tab_apl_p, v_tab_mar_p or SQLERRM/SQLCODE or the error stack - not enough info in the question');
END;
END LOOP;
END;
/
Note: I have removed the commit from the execution.
Then if you like what you see...
SQL> commit;
Ideally, if I knew more about why the insert failures were expected to occur and what I wanted to do about them, I would not insert them in the first place.
Agree with comment that more information is needed, but a couple of things to consider:
Does this need to be done as a loop - if you can write your query as a select statement, then you can do a simple insert without the need for PLSQL, which would be simpler and likely quicker (set based SQL vs row-by-row PLSQL)
You say a ROLLBACK is occuring - you have a commit inside your IF statement, so any records which make it into your IF statement and succesfully make it through your insert procedure will be committed i.e. they will not be rolled back; You should consider if some records you think are being rolled back are actually not making it into the IF statement at all
Can you provide example data, or an example of the error you are receiving?

Bulk inserting in Oracle PL/SQL

I have around 5 million of records which needs to be copied from table of one schema to table of another schema(in the same database). I have prepared a script but it gives me the below error.
ORA-06502: PL/SQL: numeric or value error: Bulk bind: Error in define
Following is my script
DECLARE
TYPE tA IS TABLE OF varchar2(10) INDEX BY PLS_INTEGER;
TYPE tB IS TABLE OF SchemaA.TableA.band%TYPE INDEX BY PLS_INTEGER;
TYPE tD IS TABLE OF SchemaA.TableA.start_date%TYPE INDEX BY PLS_INTEGER;
TYPE tE IS TABLE OF SchemaA.TableA.end_date%TYPE INDEX BY PLS_INTEGER;
rA tA;
rB tB;
rD tD;
rE tE;
f number :=0;
BEGIN
SELECT col1||col2||col3 as main_col, band, effective_start_date as start_date, effective_end_date as end_date
BULK COLLECT INTO rA, rB, rD, rE
FROM schemab.tableb;
FORALL i IN rA.FIRST..rE.LAST
insert into SchemaA.TableA(main_col, BAND, user_type, START_DATE, END_DATE, roll_no)
values(rA(i), rB(i), 'C', rD(i), rE(i), 71);
f:=f+1;
if (f=10000) then
commit;
end if;
end;
Could you please help me in finding where the error lies?
Why not a simple
insert into SchemaA.TableA (main_col, BAND, user_type, START_DATE, END_DATE, roll_no)
SELECT col1||col2||col3 as main_col, band, 'C', effective_start_date, effective_end_date, 71
FROM schemab.tableb;
This
f:=f+1;
if (f=10000) then
commit;
end if;
does not make any sense. f becomes 1 - that's it. f=10000 will never be true, thus you don't make a COMMIT.
Following script worked for me and i was able to load around 5 millions of data within 15 minutes.
ALTER SESSION ENABLE PARALLEL DML
/
DECLARE
cursor c_p1 is
SELECT col1||col2||col3 as main_col, band, effective_start_date as start_date, effective_end_date as end_date
FROM schemab.tableb;
TYPE TY_P1_FULL is table of c_p1%rowtype
index by pls_integer;
v_P1_FULL TY_P1_FULL;
v_seq_num number;
BEGIN
open c_p1;
loop
fetch c_p1 BULK COLLECT INTO v_P1_FULL LIMIT 10000;
exit when v_P1_FULL.count = 0;
FOR i IN 1..v_P1_FULL.COUNT loop
INSERT /*+ APPEND */ INTO schemaA.tableA VALUES (v_P1_FULL(i));
end loop;
commit;
end loop;
close c_P1;
dbms_output.put_line('Load completed');
end;
-- Disable parallel mode for this session
ALTER SESSION DISABLE PARALLEL DML
/
ORA-06502: PL/SQL: numeric or value error: Bulk bind: Error in define
You get that error because you have a literal in the VALUES clause of the INSERT. The FORALL expects everything to be bind to an array.
Your program has a massive problem, literally. You have no LIMIT on the BULK COLLECT clause, so that's going to try to load all five million records from TableB into your collections. That will blow your session's memory limit.
The point of using BULK COLLECT and FORALL is to bite off chunks of a bigger data set and process it in batches. For that you need a loop. The loop has no FOR condition: instead test whether the fetch returned anything and exit when the array has zero records.
DECLARE
TYPE recA IS RECORD (
main_col SchemaA.TableA.main_col%TYPE
, band SchemaA.TableA.band%TYPE
, start_date date
, end_date date
, roll_ni number);
TYPE recsA is table of recA
nt_a recsA;
f number :=0;
CURSOR cur_b is
SELECT col1||col2||col3 as main_col,
band,
effective_start_date as start_date,
effective_end_date as end_date ,
71 as roll_no
FROM schemab.tableb;
BEGIN
open cur_b;
loop
fetch curb_b bulk collect into nt_a limit 1000;
exit when nt_a.count() = 0;
FORALL i IN rA.FIRST..rE.LAST
insert into SchemaA.TableA(main_col, BAND, user_type, START_DATE, END_DATE, roll_no)
values nt_a(i);
f := f + sql%rowcount;
if (f > = 10000) then
commit;
f := 0;
end if;
end loop;
commit;
close cur_b;
end;
Please note that issuing commits inside a loop is contraindicated. You lay yourself open to runtime errors such as ORA-01002 and ORA-01555. If your program does crash half-way through you will have great difficulty in resuming it without problems. By all means persist if you have problems with UNDO tablespace, but the correct answer is to get the DBA to enlarge the UNDO tablespace not weaken your code.
"i am using bulk insert because it gives better performance"
It is true that BULK COLLECT and FORALL ... INSERT is more performative than a CURSOR FOR loop with row-by-row single inserts. It is not more efficient than a pure SQL INSERT INTO ... SELECT. The value of the construct is that it allows us to manipulate the contents of the array before inserting it. This is handle if we have complex business rules which can only be applied programmatically.
Please try after changing first 2 line of your code with below:
DECLARE
TYPE tA IS TABLE OF SchemaA.TableA.main_col%TYPE INDEX BY PLS_INTEGER;
...
...
This may be because of data type/length mismatch. In declaration section you have missed to declare one to inherit type from table.
Also as mentioned, f logic for commit will not do the magic for you. Better you should use LIMIT with BULL COLLECT

ORACLE PL/SQL: The lost last chunk

Please help me to solve this ORACLE PL/SQL problem.
I have the following code:
BEGIN
DECLARE
P_COMMIT_STEP NUMBER := 10000; -- Commit every 10000 record copied
V_QUERY VARCHAR2 (4000) := NULL;
MY_CURSOR SYS_REFCURSOR;
TYPE FETCH_ARRAY IS TABLE OF MY_TABLE_BACKUP%ROWTYPE;
S_ARRAY FETCH_ARRAY;
BEGIN
V_QUERY := 'SELECT * FROM MY_TABLE_BACKUP';
OPEN MY_CURSOR FOR V_QUERY;
LOOP
FETCH MY_CURSOR
BULK COLLECT INTO S_ARRAY
LIMIT P_COMMIT_STEP;
FORALL I IN 1 .. S_ARRAY.COUNT
INSERT INTO MY_TABLE_BIS /*+ APPEND */
VALUES S_ARRAY (I);
COMMIT;
EXIT WHEN MY_CURSOR%NOTFOUND;
END LOOP;
CLOSE MY_CURSOR;
COMMIT;
END;
END;
Since the commit step is 10000, the copy works for a multiple of 10000 record.
So, if the original table has 1000010 records, only 1000000 records will be copied.
Where is the error?
The code seems correct, in my opinion.
Thank you very much for considering my request.
As noted in this article, you shouldn't rely on %NOTFOUND with bulk collect and forall. Check how many rows were fetched:
LOOP
FETCH MY_CURSOR
BULK COLLECT INTO S_ARRAY
LIMIT P_COMMIT_STEP;
FORALL I IN 1 .. S_ARRAY.COUNT
INSERT INTO MY_TABLE_BIS /*+ APPEND */
VALUES S_ARRAY (I);
COMMIT;
EXIT WHEN S_ARRAY.COUNT < P_COMMIT_STEP;
END LOOP;
Your first thousand iterations will get 10000 rows, so the count will equal your limit for all of those, and it will continue. The next one will get only 10 rows, so it will exit after the forall.
It should still work with the %NOTFOUND check at the end of the loop - the article is really talking about it being an issue if you use it to exit before processing the partial batch - and the documentation shows that pattern; but in some circumstances it seems not to. Having said that, I can't reproduce the issue from your code in 11.2.0.3.
Incidentally, committing inside a loop is generally a bad idea unless you've made the block restartable.
HI small modification to your Forall loop
FORALL I IN S_ARRAY.first .. S_ARRAY.last
INSERT INTO MY_TABLE_BIS /*+ APPEND */
VALUES S_ARRAY (I);
I think it will work for you.

Reasonable SELECT ... INTO Oracle solution for case of multiple OR no rows

I just want to SELECT values into variables from inside a procedure.
SELECT blah1,blah2 INTO var1_,var2_
FROM ...
Sometimes a large complex query will have no rows sometimes it will have more than one -- both cases lead to exceptions. I would love to replace the exception behavior with implicit behavior similiar to:
No rows = no value change, Multiple rows = use last
I can constrain the result set easily enough for the "multiple rows" case but "no rows" is much more difficult for situations where you can't use an aggregate function in the SELECT.
Is there any special workarounds or suggestions? Looking to avoid significantly rewriting queries or executing twice to get a rowcount before executing SELECT INTO.
Whats wrong with using an exception block?
create or replace
procedure p(v_job VARCHAR2) IS
v_ename VARCHAR2(255);
begin
select ename into v_ename
from (
select ename
from scott.emp
where job = v_job
order by v_ename desc )
where rownum = 1;
DBMS_OUTPUT.PUT_LINE('Found Rows Logic Here -> Found ' || v_ename);
EXCEPTION WHEN NO_DATA_FOUND THEN
DBMS_OUTPUT.PUT_LINE('No Rows found logic here');
end;
SQL> begin
p('FOO');
p('CLERK');
end; 2 3 4
5 /
No Rows found logic here
Found Rows Logic Here -> Found SMITH
PL/SQL procedure successfully completed.
SQL>
You could use a for loop. A for loop would do nothing for no rows returned and would be applied to every row returned if there where multiples. You could adjust your select so that it only returns the last row.
begin
for ARow in (select *
from tableA ta
Where ta.value = ???) loop
-- do something to ARow
end loop;
end;

Resources