procedure taking too much time to update the database - oracle

CREATE OR REPLACE PROCEDURE UPDATE_CRDT_JV IS
BEGIN
UPDATE GL_DISTRIBUTION
SET GL_DATE = (SELECT ADJ_DATE FROM ADJUSTMENTS WHERE
ADJ_NUMBER = TO_NUMBER(TR_NUMBER))
WHERE TR_TYPE = 'ADJST';
UPDATE GL_DISTRIBUTION
SET GL_DATE = (SELECT PARTY_ADJ_DATE FROM PARTY_ADJUSTMENT
WHERE PARTY_ADJ_NUMBER = TO_NUMBER(TR_NUMBER))
WHERE TR_TYPE = 'PRTAJ';
UPDATE GL_DISTRIBUTION
SET GL_DATE = (SELECT VEN_PAY_VOU_DATE FROM PAYMENTS_TO_VENDORS WHERE
VEN_PAY_VOU_NUMBER = TO_NUMBER(TR_NUMBER))
WHERE TR_TYPE = 'CRPAY';
UPDATE GL_DISTRIBUTION
SET GL_DATE = (SELECT CHEQUE_DATE FROM SYS_PAYMENTS_HEADER WHERE
REF_NUMBER = TO_NUMBER(TR_NUMBER))
WHERE TR_TYPE = 'SYSPY';
UPDATE GL_DISTRIBUTION
SET GL_DATE = (SELECT POSTED_DATE FROM PURCHASE_INVOICE_HEADER WHERE
POSTED_DATE IS NOT NULL AND PIV_NUMBER = TO_NUMBER(TR_NUMBER))
WHERE TR_TYPE = 'CRINV';
UPDATE GL_dISTRIBUTION
SET GL_dATE = (SELECT DOC_dATE FROM REVERSE_HISTORY
WHERE TR_NUMBER = TO_NUMBER(GL_DISTRIBUTION.TR_NUMBER)
AND DOC_dATE IS NOT NULL AND TR_TYPE IN ('SYSPY','CRPAY'))
WHERE TR_TYPE IN ('RSYSPY','RCRPAY');
commit;
UPDATE_INV_DET;
END;
is taking more than 15 minutes to update the database.
now i am updating this by using the following query in SQL PLUS:
EXECUTE UPDATE_CRDT_JV;
pls help me if any body knows the solution for this problem

I agree with the already given advice to start figuring out where your code is spending time. However, your case is quite common and I think I recognize this situation: you have coded your update statements in such a way that the other tables are accessed for every row of GL_DISTRIBUTION of that type.
The solution is to rewrite your update statements and I see two possibilities to do that efficiently:
1) Update a select statement (UPDATE (SELECT ...) SET ... WHERE ...). This requires some unique key constraints to be in place or using the BYPASS_UJVC hint.
2) Use a MERGE statement.
Below you see an example of how to rewrite your code using a single merge statement. I'm expecting big performance gains because the access of the other tables is now done once using a single outer join for each of the tables, instead of for every row in the GL_DISTRIBUTION table.
The example. Test data:
SQL> create table gl_distribution (tr_number, tr_type, gl_date)
2 as
3 select '1', 'ADJST', date '2011-01-01' from dual union all
4 select '2', 'ADJST', null from dual union all
5 select '3', 'PRTAJ', date '2011-01-01' from dual union all
6 select '4', 'SYSPY', date '2011-01-01' from dual union all
7 select '5', 'RCRPAY', date '2011-01-01' from dual
8 /
Table created.
SQL> create table adjustments (adj_number, adj_date)
2 as
3 select 1, sysdate from dual union all
4 select 2, sysdate from dual
5 /
Table created.
SQL> create table party_adjustment (party_adj_number, party_adj_date)
2 as
3 select 3, sysdate from dual union all
4 select 33, sysdate from dual
5 /
Table created.
SQL> create table payments_to_vendors (ven_pay_vou_number, ven_pay_vou_date)
2 as
3 select 34, sysdate from dual
4 /
Table created.
SQL> create table sys_payments_header (ref_number,cheque_date)
2 as
3 select 4, sysdate from dual
4 /
Table created.
SQL> create table purchase_invoice_header (piv_number,posted_date)
2 as
3 select 35, sysdate from dual
4 /
Table created.
SQL> create table reverse_history (tr_number,doc_date,tr_type)
2 as
3 select 5, sysdate, 'CRPAY' from dual
4 /
Table created.
SQL>
Your procedure (for comparison):
SQL> CREATE OR REPLACE PROCEDURE UPDATE_CRDT_JV
2 IS
3 BEGIN
4 UPDATE GL_DISTRIBUTION
5 SET GL_DATE = (SELECT ADJ_DATE FROM ADJUSTMENTS WHERE ADJ_NUMBER = TO_NUMBER(TR_NUMBER))
6 WHERE TR_TYPE = 'ADJST'
7 ;
8 UPDATE GL_DISTRIBUTION
9 SET GL_DATE = (SELECT PARTY_ADJ_DATE FROM PARTY_ADJUSTMENT
10 WHERE PARTY_ADJ_NUMBER = TO_NUMBER(TR_NUMBER))
11 WHERE TR_TYPE = 'PRTAJ'
12 ;
13 UPDATE GL_DISTRIBUTION
14 SET GL_DATE = (SELECT VEN_PAY_VOU_DATE FROM PAYMENTS_TO_VENDORS
15 WHERE VEN_PAY_VOU_NUMBER = TO_NUMBER(TR_NUMBER))
16 WHERE TR_TYPE = 'CRPAY'
17 ;
18 UPDATE GL_DISTRIBUTION
19 SET GL_DATE = (SELECT CHEQUE_DATE FROM SYS_PAYMENTS_HEADER WHERE
20 REF_NUMBER = TO_NUMBER(TR_NUMBER))
21 WHERE TR_TYPE = 'SYSPY'
22 ;
23 UPDATE GL_DISTRIBUTION
24 SET GL_DATE = (SELECT POSTED_DATE FROM PURCHASE_INVOICE_HEADER WHERE
25 POSTED_DATE IS NOT NULL AND PIV_NUMBER = TO_NUMBER(TR_NUMBER))
26 WHERE TR_TYPE = 'CRINV'
27 ;
28 UPDATE GL_dISTRIBUTION
29 SET GL_dATE = (SELECT DOC_dATE FROM REVERSE_HISTORY
30 WHERE TR_NUMBER = TO_NUMBER(GL_DISTRIBUTION.TR_NUMBER)
31 AND DOC_dATE IS NOT NULL AND TR_TYPE IN ('SYSPY','CRPAY'))
32 WHERE TR_TYPE IN ('RSYSPY','RCRPAY')
33 ;
34 --commit;
35 --UPDATE_INV_DET;
36 END;
37 /
Procedure created.
SQL>
My suggestion:
SQL> create procedure new_update_crdt_jv
2 as
3 begin
4 merge into gl_distribution d
5 using ( select to_number(d.tr_number) tr_number
6 , coalesce
7 ( a.adj_date
8 , pa.party_adj_date
9 , pv.ven_pay_vou_date
10 , sph.cheque_date
11 , pih.posted_date
12 , rh.doc_date
13 ) new_date
14 from gl_distribution d
15 left outer join adjustments a
16 on to_number(d.tr_number) = a.adj_number
17 and d.tr_type = 'ADJST'
18 left outer join party_adjustment pa
19 on to_number(d.tr_number) = pa.party_adj_number
20 and d.tr_type = 'PRTAJ'
21 left outer join payments_to_vendors pv
22 on to_number(d.tr_number) = pv.ven_pay_vou_number
23 and d.tr_type = 'CRPAY'
24 left outer join sys_payments_header sph
25 on to_number(d.tr_number) = sph.ref_number
26 and d.tr_type = 'SYSPY'
27 left outer join purchase_invoice_header pih
28 on to_number(d.tr_number) = pih.piv_number
29 and d.tr_type = 'CRINV'
30 left outer join reverse_history rh
31 on to_number(d.tr_number) = rh.tr_number
32 and rh.tr_type in ('SYSPY','CRPAY')
33 and d.tr_type in ('RSYSPY','RCRPAY')
34 ) n
35 on ( d.tr_number = n.tr_number)
36 when matched then
37 update set d.gl_date = n.new_date
38 ;
39 end new_update_crdt_jv;
40 /
Procedure created.
SQL>
Let's run your procedure:
SQL> select * from gl_distribution
2 /
T TR_TYP GL_DATE
- ------ -------------------
1 ADJST 01-01-2011 00:00:00
2 ADJST
3 PRTAJ 01-01-2011 00:00:00
4 SYSPY 01-01-2011 00:00:00
5 RCRPAY 01-01-2011 00:00:00
5 rows selected.
SQL> exec update_crdt_jv
PL/SQL procedure successfully completed.
SQL> select * from gl_distribution
2 /
T TR_TYP GL_DATE
- ------ -------------------
1 ADJST 31-03-2011 14:41:19
2 ADJST 31-03-2011 14:41:19
3 PRTAJ 31-03-2011 14:41:19
4 SYSPY 31-03-2011 14:41:19
5 RCRPAY 31-03-2011 14:41:19
5 rows selected.
SQL> rollback
2 /
Rollback complete.
SQL>
My procedure returns the same results:
SQL> exec new_update_crdt_jv
PL/SQL procedure successfully completed.
SQL> select * from gl_distribution
2 /
T TR_TYP GL_DATE
- ------ -------------------
1 ADJST 31-03-2011 14:41:19
2 ADJST 31-03-2011 14:41:19
3 PRTAJ 31-03-2011 14:41:19
4 SYSPY 31-03-2011 14:41:19
5 RCRPAY 31-03-2011 14:41:19
5 rows selected.
Hope this helps.
Regards,
Rob.

The solution for this problem is:
Figure out where your code is
spending its time (i.e. profile it)
Figure out how to speed up the
slowest part
Repeat until performance is acceptable
If you prefer guesswork, then you might want to try any of the following:
Combine multiple UPDATEs into a single UPDATE statement, e.g. using a CASE condition as shown by #Aklopper.
Use MERGE instead of UPDATE to avoid correlated subqueries. Might be better, might not.
Look into the UPDATE_INV_DET procedure which is called at the end of the procedure shown.

I come from a SQL Server environment, won't a CASE UPDATE statement like this also help you(example of usage of the UPDATE CASE statement in SQL(don't know if Oracle has an equivalent methods) :
UPDATE titles
SET GL_DATE=
CASE
WHEN TR_TYPE = 'ADJST' THEN (SELECT ADJ_DATE FROM ADJUSTMENTS WHERE ADJ_NUMBER = TO_NUMBER(TR_NUMBER) END
WHEN TR_TYPE = 'PRTAJ'' THEN (SELECT PARTY_ADJ_DATE FROM PARTY_ADJUSTMENT
WHERE PARTY_ADJ_NUMBER = TO_NUMBER(TR_NUMBER))
END
ELSE price
END

Related

Generate Random String in PL/SQL (oracle 12c)

I'm trying to generate a Random String using PL/SQL with only 2 fixed words. It's this possible?
Is this what you're looking for?
SQL> with
2 -- two fixed words
3 test as
4 (select 'fixed words' col from dual),
5 -- split them to rows
6 inter as
7 (select level lvl, regexp_substr(col, '.', 1, level) let
8 from test
9 connect by level <= length(col)
10 )
11 -- aggregate them back, randomly
12 select listagg(let, '') within group (order by dbms_random.value(1, max_lvl)) result
13 from inter
14 join (select max(lvl) max_lvl from inter) on 1 = 1;
RESULT
--------------------------------------------------------------------------------
reiosdwxf d
SQL> /
RESULT
--------------------------------------------------------------------------------
fe ixoddrws
SQL> /
RESULT
--------------------------------------------------------------------------------
wdxeorsdfi
SQL>

How can I do this faster than cursor?

I have 2 tables:
first is : dept
second is : payment
I want to compare these tables and make one result table .
Tables include :
table debt:
customerid order amount
1 1 30
1 2 50
1 3 70
table payment:
customerid recorddate amount
1 20080101 10
1 20080102 20
1 20080103 180
And I want result table is :
customerid recorddate amount order
1 20080101 10 1
1 20080102 20 1
1 20080103 50 2
1 20080103 70 2
1 20080103 60 -
I make this result with 2 cursor. And I have 1 million record and It takes too long. How can I make this faster?
thanks in advance
EDIT
I did it with this code:
DECLARE
V_RECORDDATE DATE;
V_CUSTOMERID VARCHAR2(500CHAR);
V_PAYMENT NUMBER;
CURSOR TAH_HES IS
SELECT /*+ PARALLEL(16) */ * FROM
payment_table
WHERE customerid='1'
ORDER BY 3,1;
BEGIN
EXECUTE IMMEDIATE 'alter session force parallel query parallel 16';
EXECUTE IMMEDIATE 'alter session force parallel dml parallel 16';
OPEN TAH_HES;
LOOP
FETCH TAH_HES INTO V_RECORDDATE, V_CUSTOMERID,V_PAYMENT;
EXIT WHEN TAH_HES%NOTFOUND;
FOR CUR_X IN (
SELECT /*+ PARALLEL(16) */
COMPENENT,
AMOUNT
FROM
DEBT_TABLE
WHERE 1=1
AND CUSTOMERID=V_CUSTOMERID
ORDER BY "ORDER"
)
LOOP
IF(CUR_X.AMOUNT<=V_PAYMENT)
THEN
INSERT INTO
RESULT_TABLE
SELECT /*+ PARALLEL(16) */
V_CUSTOMERID,V_RECORDDATE,CUR_X.COMPENENT,CUR_X.AMOUNT
FROM
DUAL;
COMMIT;
DELETE FROM
DEBT_TABLE
WHERE CUSTOMERID=V_CUSTOMERID
AND COMPENENT=CUR_X.COMPENENT
AND AMOUNT=CUR_X.AMOUNT;
COMMIT;
UPDATE
PAYMENT_TABLE
SET PAYMENT=PAYMENT-CUR_X.AMOUNT
WHERE CUSTOMERID=V_CUSTOMERID
AND HISLEMTARIH=V_RECORDDATE;
COMMIT;
ELSE
INSERT INTO
RESULT_TABLE
SELECT /*+ PARALLEL(16) */
V_CUSTOMERID,V_RECORDDATE,CUR_X.COMPENENT,V_PAYMENT
FROM
DUAL;
COMMIT;
UPDATE
DEBT_TABLE
SET AMOUNT=AMOUNT-V_PAYMENT
WHERE CUSTOMERID=V_CUSTOMERID
AND COMPENENT=CUR_X.COMPENENT ;
COMMIT;
DELETE FROM
PAYMENT_TABLE
WHERE CUSTOMERID=V_CUSTOMERID
AND PAYMENT=V_PAYMENT
AND RECORDDATE=V_RECORDDATE;
COMMIT;
EXIT;
END IF;
END LOOP;
END LOOP;
END;
INSERT INTO
RESULT_TABLE
SELECT /*+ PARALLEL(16) */
CUSTOMERID,
RECORDDATE,
'-',
PAYMENT
FROM
PAYMENT_TABLE;
COMMIT;
If payments are used to clear off the debt then your result is a bit unexpected.
More logically
payments 10 and 20 will clear off order 1: 30 = 10 + 20
payment 180 will clear off order 2: 50 (180 - 50 = 130 remaining)
payment 180 will clear off order 3: 70 (130 - 70 = 60 remaining)
Or in pure SQL
with debt(customerid, ord, amount) as
(
select 1, 1, 30 from dual
union all select 1, 2, 50 from dual
union all select 1, 3, 70 from dual
),
payment(customerid, recorddate, amount) as
(
select 1, 20080101, 10 from dual
union all select 1, 20080102, 20 from dual
union all select 1, 20080103, 180 from dual
),
allocation as
(
select *
from
(select d.customerid, d.ord, d.amount, p.recorddate, p.amount as pay_amount
from debt d
join payment p on d.customerid = p.customerid)
model ignore nav
partition by (customerid)
dimension by (recorddate, ord)
measures(amount, pay_amount, 0 allocated)
rules
(
allocated[any, any] order by ord, recorddate =
least(pay_amount[cv(recorddate), cv(ord)] -
sum(allocated)[cv(recorddate), ord <= cv(ord)]
,amount[cv(recorddate), cv(ord)] -
sum(allocated)[recorddate <= cv(recorddate), cv(ord)])
)
)
select a.*, pay_amount -
sum(allocated) over (partition by recorddate order by ord) remaining
from allocation a
where allocated > 0
order by ord, recorddate;
CUSTOMERID RECORDDATE ORD AMOUNT PAY_AMOUNT ALLOCATED REMAINING
---------- ---------- ---------- ---------- ---------- ---------- ----------
1 20080101 1 30 10 10 0
1 20080102 1 30 20 20 0
1 20080103 2 50 180 50 130
1 20080103 3 70 180 70 60
PL/SQL solution would be more optimal for this task (but not your implementation).

What is the Best way to Perform Bulk insert in oracle ?

With this, I mean Inserting millions of records in tables. I know how to insert data using loops but for inserting millions of data it won't be a good approach.
I have two tables
CREATE TABLE test1
(
col1 NUMBER,
valu VARCHAR2(30),
created_Date DATE,
CONSTRAINT pk_test1 PRIMARY KEY (col1)
)
/
CREATE TABLE test2
(
col2 NUMBER,
fk_col1 NUMBER,
valu VARCHAR2(30),
modified_Date DATE,
CONSTRAINT pk_test2 PRIMARY KEY (col2),
FOREIGN KEY (fk_col1) REFERENCES test1(col1)
)
/
Please suggest a way to insert some dummy records upto 1 million without loops.
As a fairly simplistic approach, which may be enough for you based on your comments, you can generate dummy data using a hierarchical query. Here I'm using bind variables to control how many are created, and to make some of the logic slightly clearer, but you could use literals instead.
First, parent rows:
var parent_rows number;
var avg_children_per_parent number;
exec :parent_rows := 5;
exec :avg_children_per_parent := 3;
-- create dummy parent rows
insert into test1 (col1, valu, created_date)
select level,
dbms_random.string('a', dbms_random.value(1, 30)),
trunc(sysdate) - dbms_random.value(1, 365)
from dual
connect by level <= :parent_rows;
which might generate rows like:
COL1 VALU CREATED_DA
---------- ------------------------------ ----------
1 rYzJBVI 2016-11-14
2 KmSWXfZJ 2017-01-20
3 dFSTvVsYrCqVm 2016-07-19
4 iaHNv 2016-11-08
5 AvAxDiWepPeONGNQYA 2017-01-20
Then child rows, which a random fk_col1 in the range generated for the parent:
-- create dummy child rows
insert into test2 (col2, fk_col1, valu, modified_date)
select level,
round(dbms_random.value(1, :parent_rows)),
dbms_random.string('a', dbms_random.value(1, 30)),
trunc(sysdate) - dbms_random.value(1, 365)
from dual
connect by level <= :parent_rows * :avg_children_per_parent;
which might generate:
select * from test2;
COL2 FK_COL1 VALU MODIFIED_D
---------- ---------- ------------------------------ ----------
1 2 AqRUtekaopFQdCWBSA 2016-06-30
2 4 QEczvejfTrwFw 2016-09-23
3 4 heWMjFshkPZNyNWVQG 2017-02-19
4 4 EYybXtlaFHkAYeknhCBTBMusGAkx 2016-03-18
5 4 ZNdJBQxKKARlnExluZWkHMgoKY 2016-06-21
6 3 meASktCpcuyi 2016-10-01
7 4 FKgmf 2016-09-13
8 3 JZhk 2016-06-01
9 2 VCcKdlLnchrjctJrMXNb 2016-05-01
10 5 ddL 2016-11-27
11 4 wbX 2016-04-20
12 1 bTfa 2016-06-11
13 4 QP 2016-08-25
14 3 RgmIahPL 2016-03-04
15 2 vhinLUmwLwZjczYdrPbQvJxU 2016-12-05
where the number of children varies for each parent:
select fk_col1, count(*) from test2 group by fk_col1 order by fk_col1;
FK_COL1 COUNT(*)
---------- ----------
1 1
2 3
3 3
4 7
5 1
To insert a million rows instead, just change the bind variables.
If you needed more of a relationship between the children and parents, e.g. so the modified date is always after the created date, you can modify the query; for example:
insert into test2 (col2, fk_col1, valu, modified_date)
select *
from (
select level,
round(dbms_random.value(1, :parent_rows)) as fk_col1,
dbms_random.string('a', dbms_random.value(1, 30)),
trunc(sysdate) - dbms_random.value(1, 365) as modified_date
from dual
connect by level <= :parent_rows * :avg_children_per_parent
) t2
where not exists (
select null from test1 t1
where t1.col1 = t2.fk_col1 and t1.created_date > t2.modified_date
);
You may also want non-midnight times (I set everything to midnight via the trunc() call, based on the column name being 'date' not 'datetime'), or some column values null; so this might just be a starting point for you.

split records into buckets based on a sum of counts

i have a table that looks like below. i need to find a way to pick out phone numbers based on a sum of counts (the number will always be different but let's use 130 for this example).
So one of the solutions would be rows 1 through 5 and 11 (if you add up CountOfPeople values from those rows you will get 130). or 1-4,6,7,9,11,12. it doesn't matter which phone numbers are picked, as long as the total is 130.
sometimes you might not be able to get exactly 130, so "as close as possible but not exceeding" would be the rule.
is there a way to do this?
AutoID Phone Number Count Of People
1 5565787 57
2 2342343 30
3 2654456 17
4 3868556 12
5 9856756 12
6 9756456 4
7 4346365 4
8 2376743 3
9 9756343 3
10 2524349 3
11 2029393 2
12 9285656 1
I'm not sure that problem could be solved with pure SQL. But you can use table functions. Here is a little example for your problem.
First of all, we need to create table type:
create type t_bucket_row as object(
phone_number varchar2(10),
count_of_people number,
bucket_no number);
/
create type t_bucket_table as table of t_bucket_row;
/
And table with test data:
create table test_data as
with t as (
select 1 AutoID, '5565787' Phone_Number, 57 Count_Of_People from dual union all
select 2, '2342343', 30 from dual union all
select 3, '2654456', 17 from dual union all
select 4, '3868556', 12 from dual union all
select 5, '9856756', 12 from dual union all
select 6, '9756456', 4 from dual union all
select 7, '4346365', 4 from dual union all
select 8, '2376743', 3 from dual union all
select 9, '9756343', 3 from dual union all
select 10, '2524349', 3 from dual union all
select 11, '2029393', 2 from dual union all
select 12, '9285656', 1 from dual)
select * from t;
Then we create a function that implements an algorithm of distribution of clients (sorry, there is no comments in the code how it works, but it works; I can write it later if you need). Here we create a variable of table type, fill it with phones and bucket numbers, then return it from a function. After that, in SQL query, we use function's result as table in FROM clause. Parameter p_sum is your desired sum of counts of clients:
create or replace function get_buckets(p_sum number) return t_bucket_table is
buckets t_bucket_table := t_bucket_table();
type bucket_sums is table of number index by binary_integer;
sums bucket_sums;
counter number := 0;
found boolean;
begin
sums(1) := 0;
-- next line was edited to fix bug in resuult of distribution:
for i in (select t.*, rownum from test_data t order by t.count_of_people desc) loop
buckets.extend;
counter := counter + 1;
buckets(counter) := t_bucket_row(i.phone_number, i.count_of_people, 0);
if i.count_of_people > p_sum then
continue;
end if;
found := false;
for j in 1..sums.count loop
if sums(j) + i.count_of_people <= p_sum then
sums(j) := sums(j) + i.count_of_people;
buckets(counter).bucket_no := j;
found := true;
exit;
end if;
end loop;
if not found then
sums(sums.count + 1) := i.count_of_people;
buckets(counter).bucket_no := sums.count;
end if;
end loop;
return buckets;
end;
/
Now we can execute this function. Result is:
SQL> select * from table(get_buckets(130));
PHONE_NUMB COUNT_OF_PEOPLE BUCKET_NO
---------- --------------- ----------
5565787 57 1
2342343 30 1
2654456 17 1
3868556 12 1
9856756 12 1
9756456 4 2
4346365 4 2
2376743 3 2
9756343 3 2
2524349 3 2
2029393 2 1
9285656 1 2
12 rows selected.
Buckets distribution:
select bucket_no, sum(count_of_people) from table(get_buckets(130)) group by bucket_no;
BUCKET_NO SUM(COUNT_OF_PEOPLE)
---------- --------------------
1 130
2 18
If count_of_people is more than p_sum, it goes to bucket "0":
SQL> select * from table(get_buckets(35));
PHONE_NUMB COUNT_OF_PEOPLE BUCKET_NO
---------- --------------- ----------
5565787 57 0
2342343 30 1
2654456 17 2
3868556 12 2
9856756 12 3
9756456 4 1
4346365 4 2
2376743 3 3
9756343 3 3
2524349 3 3
2029393 2 2
9285656 1 1
12 rows selected.
SQL> select bucket_no, sum(count_of_people) from table(get_buckets(35)) group by bucket_no;
BUCKET_NO SUM(COUNT_OF_PEOPLE)
---------- --------------------
1 35
2 35
3 21
0 57
You can also try to use User-Defined Aggregate function. Will try to show You in a little example.
First of all, we need to create table types:
create or replace type TTN as table of number;
/
Then we are creating the routines that need to be implemented to define a user-defined aggregate function.
create or replace type TO_BALANCED_BUCKET as object
(
summ TTN,
result int,
static function ODCIAggregateInitialize(sctx in out nocopy TO_BALANCED_BUCKET) return number,
member function ODCIAggregateIterate(self in out nocopy TO_BALANCED_BUCKET, value in number)
return number,
member function ODCIAggregateTerminate(self in TO_BALANCED_BUCKET,
returnValue out number,
flags in number) return number,
member function ODCIAggregateMerge(self in out nocopy TO_BALANCED_BUCKET, ctx2 in TO_BALANCED_BUCKET)
return number
)
/
create or replace type body TO_BALANCED_BUCKET is
static function ODCIAggregateInitialize(sctx in out nocopy TO_BALANCED_BUCKET) return number is
begin
sctx := TO_BALANCED_BUCKET(TTN(0), 1);
return ODCIConst.Success;
end;
member function ODCIAggregateIterate(self in out nocopy TO_BALANCED_BUCKET, value in number)
return number is
b_FoundGroup boolean := false;
begin
if value > 130 then
result := 0;
else
for li in 1..summ.count loop
if summ(li) + value <= 130 then
b_FoundGroup := true;
summ(li) := summ(li) + value;
result := li;
exit;
end if;
end loop;
if not b_FoundGroup then
summ.extend;
summ(summ.count) := value;
result := summ.count;
end if;
end if;
return ODCIConst.Success;
end;
member function ODCIAggregateTerminate(self in TO_BALANCED_BUCKET,
returnValue out number,
flags in number) return number is
begin
returnValue := self.result;
return ODCIConst.Success;
end;
member function ODCIAggregateMerge(self in out nocopy TO_BALANCED_BUCKET, ctx2 in TO_BALANCED_BUCKET)
return number is
begin
return ODCIConst.Error;
end;
end;
/
Then we are creating the aggregate function itself.
create or replace function balanced_bucket(input number) return number
parallel_enable
aggregate using TO_BALANCED_BUCKET;
/
And finally the query itself
with test_data as (
select 1 as AutoID, '5565787' as Phone_Number, 12 as Count_Of_People from dual union all
select 2, '2342343', 3 from dual union all
select 3, '2654456', 1 from dual union all
select 4, '3868556', 12 from dual union all
select 5, '9856756', 4 from dual union all
select 6, '9756456', 4 from dual union all
select 7, '4346365', 57 from dual union all
select 8, '2376743', 3 from dual union all
select 9, '9756343', 3 from dual union all
select 10, '2524349', 30 from dual union all
select 11, '2029393', 2 from dual union all
select 12, '9285656', 17 from dual
)
select t.phone_number, t.count_of_people,
balanced_bucket(t.count_of_people) over(order by t.count_of_people desc) balanced_bucket
from test_data t
Hope this solution will help. The algorithm of distribution of clients is Dmity's.
For the "first bucket" solution this is a nice exercise in recursive subquery factoring. The following query gives you such a bucket (although with phone numbers concatenated to a single string):
with source$ as (
select 1 as AutoID, '5565787' as Phone_Number, 12 as Count_Of_People from dual union all
select 2, '2342343', 3 from dual union all
select 3, '2654456', 1 from dual union all
select 4, '3868556', 12 from dual union all
select 5, '9856756', 4 from dual union all
select 6, '9756456', 4 from dual union all
select 7, '4346365', 57 from dual union all
select 8, '2376743', 3 from dual union all
select 9, '9756343', 3 from dual union all
select 10, '2524349', 30 from dual union all
select 11, '2029393', 2 from dual union all
select 12, '9285656', 17 from dual
),
permutator$ (autoid, phone_number, count_of_people, autoid_list, phone_number_list, count_of_people_sum, count_of_people_list) as (
select S.autoid, phone_number, count_of_people,
to_char(autoid), cast(phone_number as varchar2(4000)), count_of_people, to_char(count_of_people)
from source$ S
union all
select S.autoid, S.phone_number, S.count_of_people,
P.autoid_list||'|'||S.autoid, P.phone_number_list||'|'||S.phone_number, P.count_of_people_sum + S.count_of_people, P.count_of_people_list||'+'||S.count_of_people
from permutator$ P
join source$ S
on S.autoid > P.autoid
where P.count_of_people_sum + S.count_of_people <= 130
)
search depth first by autoid asc set siblings_order$,
priority_ordered$ as (
select P.*,
row_number() over (partition by null order by abs(count_of_people_sum-130), siblings_order$ asc) as your_best_call$
from permutator$ P
)
select autoid_list, phone_number_list, count_of_people_sum, count_of_people_list
from priority_ordered$
where your_best_call$ = 1
;
... and if you rather want a row-by-row list of the original items, then replace the last ...
select autoid_list, phone_number_list, count_of_people_sum, count_of_people_list
from priority_ordered$
where your_best_call$ = 1
;
... with ...
select autoid, count_of_people, phone_number
from priority_ordered$ PO
start with your_best_call$ = 1
connect by PO.autoid_list||'|'||prior PO.autoid = prior PO.autoid_list
;
With a little help from the object-relational features of Oracle the phone number collection may be, in a very elegant way, resolved by a collector object (an object which collects data to its member collection attribute via a member method, returning a new instance of its class). A small example of SQL*Plus spools for this solution:
SQL> set verify off
SQL> define maxcountofpeoplesum = 130
SQL> ##23023283-split-records-into-buckets-based-on-a-sum-of-counts.sql
COUNT_OF_PEOPLE_SUM AUTOID PHONE_NUMBER COUNT_OF_PEOPLE
------------------- ---------- --------------- ---------------
130 1 5565787 12
130 2 2342343 3
130 3 2654456 1
130 5 9856756 4
130 6 9756456 4
130 7 4346365 57
130 10 2524349 30
130 11 2029393 2
130 12 9285656 17
9 rows selected.
SQL> define maxcountofpeoplesum = 15
SQL> ##23023283-split-records-into-buckets-based-on-a-sum-of-counts.sql
COUNT_OF_PEOPLE_SUM AUTOID PHONE_NUMBER COUNT_OF_PEOPLE
------------------- ---------- --------------- ---------------
15 1 5565787 12
15 2 2342343 3
SQL> define maxcountofpeoplesum = 200
SQL> ##23023283-split-records-into-buckets-based-on-a-sum-of-counts.sql
COUNT_OF_PEOPLE_SUM AUTOID PHONE_NUMBER COUNT_OF_PEOPLE
------------------- ---------- --------------- ---------------
148 1 5565787 12
148 2 2342343 3
148 3 2654456 1
148 4 3868556 12
148 5 9856756 4
148 6 9756456 4
148 7 4346365 57
148 8 2376743 3
148 9 9756343 3
148 10 2524349 30
148 11 2029393 2
148 12 9285656 17
12 rows selected.
SQL> define maxcountofpeoplesum = 147
SQL> ##23023283-split-records-into-buckets-based-on-a-sum-of-counts.sql
COUNT_OF_PEOPLE_SUM AUTOID PHONE_NUMBER COUNT_OF_PEOPLE
------------------- ---------- --------------- ---------------
147 1 5565787 12
147 2 2342343 3
147 4 3868556 12
147 5 9856756 4
147 6 9756456 4
147 7 4346365 57
147 8 2376743 3
147 9 9756343 3
147 10 2524349 30
147 11 2029393 2
147 12 9285656 17
11 rows selected.
I'm pretty sure the query could be enhanced to query all buckets, as Dmitry's solution does, but that would result in an even huger and possibly badly performing query. Dmitry's solution looks much simpler and more straightforward for your problem.
Enjoy.

oracle update query [duplicate]

This question already has an answer here:
Closed 12 years ago.
Possible Duplicate:
Oracle Multiple update Query
I have a query
Select item_code,comp_code from item;
which returns
item_code, comp_code
912001 01
912001 04
912002 01
912002 02
912002 03
912003 01
and i have three values for comp_code for each item. suppose comp_1,comp_2,comp_3
now i want to update the table with each item code will have these three values. ie, there will be three entry for each item with comp_code value as comp_1,comp_2,comp_3 like below o/p
item_code, comp_code
912001 comp_1
912001 comp_2
912001 comp_3
912002 comp_1
912002 comp_2
912002 comp_3
912003 comp_1
912003 comp_2
912003 comp_3
How can write a single query which select and update these values
I'm not sure I quite follow what you want.. but to get from figure A to figure B, you could do the following.
Create a holding table with the new values
Drop the old table
Rename the holding table.
This of course does not take into account indexes, permissions, etc...
SQL> #so_test
SQL> DROP
2 TABLE SO_TEST;
Table dropped.
SQL>
SQL> CREATE
2 TABLE SO_TEST
3 (
4 ITEM_CODE NUMBER
5 , COMP_CODE VARCHAR2 (10)
6 );
Table created.
SQL>
SQL> INSERT
2 INTO
3 SO_TEST VALUES
4 (
5 912001
6 ,'01'
7 );
1 row created.
SQL>
SQL> INSERT
2 INTO
3 SO_TEST VALUES
4 (
5 912001
6 ,'04'
7 );
1 row created.
SQL>
SQL> INSERT
2 INTO
3 SO_TEST VALUES
4 (
5 912002
6 ,'01'
7 );
1 row created.
SQL>
SQL> INSERT
2 INTO
3 SO_TEST VALUES
4 (
5 912002
6 ,'02'
7 );
1 row created.
SQL>
SQL> INSERT
2 INTO
3 SO_TEST VALUES
4 (
5 912002
6 ,'03'
7 );
1 row created.
SQL>
SQL> INSERT
2 INTO
3 SO_TEST VALUES
4 (
5 912003
6 ,'01'
7 );
1 row created.
SQL>
SQL> CREATE
2 TABLE SO_TEST_NEW AS
3 SELECT
4 A.ITEM_CODE AS ITEM_CODE
5 , 'comp_'
6 || B.N AS COMP_CODE
7 FROM
8 (
9 SELECT
10 ITEM_CODE
11 FROM
12 SO_TEST
13 GROUP BY
14 ITEM_CODE
15 )
16 A
17 , (
18 SELECT
19 ROWNUM AS N
20 FROM
21 (
22 SELECT
23 1 X
24 FROM
25 DUAL
26 CONNECT BY LEVEL <= 3
27 )
28 )
29 B
30 ORDER BY
31 A.ITEM_CODE;
Table created.
SQL>
SQL> DROP
2 TABLE SO_TEST;
Table dropped.
SQL>
SQL> ALTER TABLE SO_TEST_NEW RENAME TO SO_TEST;
Table altered.
SQL>
SQL> SELECT
2 *
3 FROM
4 SO_TEST;
ITEM_CODE COMP_CODE
---------- ---------------------------------------------
912001 comp_1
912001 comp_2
912001 comp_3
912002 comp_1
912002 comp_3
912002 comp_2
912003 comp_1
912003 comp_3
912003 comp_2
9 rows selected.
SQL>

Resources