How to fetch the system generated check constraint name of table column in oracle - oracle

I have created my TEST_TABLE table using below query in oracle
CREATE TABLE "PK"."TEST_TABLE"
( "MYNAME" VARCHAR2(50),
"MYVAL1" NUMBER(12,0),
"MYVAL2" NUMBER(12,0),
"MYVAL3" NUMBER(12,0) NOT NULL,
CHECK ("MYVAL1" IS NOT NULL) DEFERRABLE ENABLE NOVALIDATE
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255 NOCOMPRESS LOGGING
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1 BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "SYSTEM" ;
After this, I want to drop the check constraints applied on column MYVAL1.
For this, first I need to fetch the check constraint name on column MYVAL1. The I can run the alter command to drop that constraint.
So how can I fetch the exact system generated check constraint name on column MYVAL1.
i tried to fetch the data using below query but as search condition is long data type column, it was trowing below error
select * from user_constraints where TABLE_NAME = 'TEST_TABLE';
WHERE TABLE_NAME='TEST_TABLE'
AND TO_LOB(search_condition) LIKE '%"MYVAL1" IS NOT NULL%'
ERROR:
ORA-00932: inconsistent datatypes: expected - got LONG
00932. 00000 - "inconsistent datatypes: expected %s got %s"
*Cause:
*Action:
Error at Line: 23 Column: 6
Any clue?

There are two ways. First (recommended) - to give name to constraints when creating it. Second - to search in ALL_CONS_COLUMNS (or USER_CONS_COLUMNS) system view.
You need something like this:
select constraint_name
from all_cons_columns
where table_name = 'TEST_TABLE'
and owner = 'PK'
and column_name = 'MYVAL1'
See documentation: https://docs.oracle.com/cloud/latest/db121/REFRN/refrn20045.htm#REFRN20045

Related

Oracle update throws timeout

I am having a very simple oracle update:
update inv_li_pck_inst set mig_li_pck_inst_id = 9377 where id = 9384
Both records exist in the table inv_li_pck_inst: id=9377 and id=9384
Record with id=9377 is migration record.
Problem is that this very simple update query takes ages to run - and at the end throws timeout exception. What could possibly be wrong here? IDs in the table inv_li_pck_inst are unique.
Table DDL:
CREATE TABLE "TESTING_INV"."INV_LI_PCK_INST"
( "ID" NUMBER NOT NULL ENABLE,
"LI_PCK_ID" NUMBER NOT NULL ENABLE,
"WORKFLOW_ID" NUMBER,
"INSERTED" DATE NOT NULL ENABLE,
"INSERTED_BY" NUMBER(9,0) NOT NULL ENABLE,
"UPDATED" DATE,
"UPDATED_BY" NUMBER(9,0),
"DELETED" DATE,
"DELETED_BY" NUMBER(9,0),
"MIG_LI_PCK_INST_ID" NUMBER,
"STATUS_ID" NUMBER,
"WFI_ID" NUMBER,
"TOF_WFI_ID" NUMBER,
CONSTRAINT "PK_INV_LI_PCK_INST" PRIMARY KEY ("ID")
USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "TESTING_INV_DATA" ENABLE,
CONSTRAINT "FK_LPI_WORKFLOW" FOREIGN KEY ("WORKFLOW_ID")
REFERENCES "TESTING_INV"."WORKFLOW" ("WORKFLOW_ID") ENABLE,
CONSTRAINT "FK_LIPI_STATUS" FOREIGN KEY ("STATUS_ID")
REFERENCES "TESTING_INV"."INV_LI_PCK_INST_STATUS" ("ID") ENABLE,
CONSTRAINT "FK_LIPI_MIG_PCK_INST" FOREIGN KEY ("MIG_LI_PCK_INST_ID")
REFERENCES "TESTING_INV"."INV_LI_PCK_INST" ("ID") ENABLE,
CONSTRAINT "FK_LI_PCK_INST_WFI" FOREIGN KEY ("WFI_ID")
REFERENCES "TESTING_INV"."WFI" ("WFI_ID") ENABLE,
CONSTRAINT "FK_INV_LI_PCK_INST5" FOREIGN KEY ("TOF_WFI_ID")
REFERENCES "TESTING_INV"."WFI" ("WFI_ID") ENABLE,
CONSTRAINT "FK_LPI_LI_PCK" FOREIGN KEY ("LI_PCK_ID")
REFERENCES "TESTING_INV"."INV_LI_PCK" ("ID") ENABLE
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "TESTING_INV_DATA" ;
CREATE INDEX "TESTING_INV"."IDX_LI_PCK_INST_WFI" ON "TESTING_INV"."INV_LI_PCK_INST" ("WFI_ID")
PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "TESTING_INV_DATA" ;
CREATE UNIQUE INDEX "TESTING_INV"."UN_LI_PCK_INST" ON "TESTING_INV"."INV_LI_PCK_INST" (NVL2("DELETED","ID",NULL), "WORKFLOW_ID")
PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "TESTING_INV_DATA" ;
CREATE OR REPLACE EDITIONABLE TRIGGER "TESTING_INV"."TRG_INV_LI_PCK_INST"
before insert on INV_LI_PCK_INST
for each row
begin
select SEQ_INV_LI_PCK_INST.nextval into :new.ID from dual;
end;
/
ALTER TRIGGER "TESTING_INV"."TRG_INV_LI_PCK_INST" ENABLE;
Most probable scenario is that there is an other migration session (or more of them) and it blocks your session.
Simple setup
create table testing
(ID NUMBER primary key,
MIG_ID NUMBER );
alter table testing add (
constraint mig foreign key(MIG_ID) references testing(id));
insert into testing (id, mig_id) values(9384, null);
insert into testing (id, mig_id) values(9377, null);
commit;
If you now performs the UPDATE is goes perfectly smooth:
update testing set mig_id = 9377 where id = 9384;
But if you before the update performs a delete of the migrated ID form an other session and do not commit it, your update will "hang" forever.
-- perform from other session and do not commit
delete from testing where id = 9377;
Why, because if the update would be done and the delete session would be commited - the referential integrity will be violeted. The UPDATE must wait until the deleted is commited or rollbacked to see if the reference ID is there or not.
How to dignose?
Simple check the v$session (or GV of RAC) and see the blocking_status and event
select SID, SERIAL#,STATUS,SQL_ID, BLOCKING_SESSION_STATUS, BLOCKING_SESSION,EVENT
from v$session
where USERNAME = your_user
You will see (most probably) your session with BLOCKING_SESSION_STATUS = VALID and
EVENT = enq: TX - row lock contention. In BLOCKING_SESSION you can find the session ID of your interacting session.
The SQL_ID will tell you what the other session is doing.

DECLARE A FOREIGN KEY CONSTRAINT causes ORA-00907

I have a create table statement, but it's not compile,
The fk_myFirstTable CONSTRAINT cause the problem.
Someone know what wrong in the CONSTRAINT ?
I get : ORA-00907: - "missing right parenthesis"
CREATE TABLE "mySchema"."mySecondTable "
(
idNumber NUMBER(10,0) NOT NULL ENABLE,
SystemId NUMBER(10,0) NOT NULL ENABLE,
CONSTRAINT "mySecondTable _PK" PRIMARY KEY (idNumber ),
CONSTRAINT "fk_myFirstTable" FOREIGN KEY (SystemId) REFERENCES myFirstTable(SystemId)
USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "MYTBS" ENABLE
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 0 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "MYTBS" ;
thanks for help!
The USING INDEX clause is part of the primary key constraint. I moved the foreign key constraint to after the ENABLE,.
Try this:
CREATE TABLE "mySchema"."mySecondTable "
(
idNumber NUMBER(10,0) NOT NULL ENABLE,
SystemId NUMBER(10,0) NOT NULL ENABLE,
CONSTRAINT "mySecondTable _PK" PRIMARY KEY (idNumber )
USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "MYTBS" ENABLE,
CONSTRAINT "fk_myFirstTable" FOREIGN KEY (SystemId) REFERENCES myFirstTable(SystemId)
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 0 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "MYTBS" ;
I have a stupid space in the declare line:
CREATE TABLE "mySchema"."mySecondTable "
instead of:
CREATE TABLE "mySchema"."mySecondTable"
Sorry for the lack of attention!
A few objections, if I may: you didn't write that code, did you? It suspiciously looks like a copy/paste from some GUI which - true - fetches all that information from dictionary and creates a jungle, when all you need is a bush.
Don't enclose Oracle objects' names into double quotes and - even worse - use mixed case within those double quotes. Otherwise, you'll have to reference them using double quotes, the same mixed case, ALWAYS. A true nightmare. By default, Oracle will create them using uppercase, but you can reference them using any case (lower, mixed, upper - just don't store them as such!).
NO: "mySecondTable " - note a trailing space - horror!
YES (as their name will be stored in UPPERCASE anyway):
mysecondtable
MYSECONDTABLE
mySecondTable
There's no need to specify NOT NULL for column(s) that make(s) primary key constraint; by default, they can't be NULL.
All that storage mumbo-jumbo ... phew, if this is an ordinary table we use for daily purposes, you really shouldn't take care about it and let Oracle handle those information. I agree - no problem in specifying all that if you know what they are. Reading your question, I think you should rely on Oracle.
Shortened, your query might/should look as follows (I'm creating the my_first_table, just to make the foreign key constraint work):
SQL> create table my_first_table
2 (system_id number constraint pk_mft primary key);
Table created.
SQL> create table my_second_table
2 (id_number number constraint pk_mst primary key,
3 system_id number constraint fk_my_first_table references my_first_table (system_id)
4 not null
5 );
Table created.
SQL>

Is it ever possible to design an Oracle trigger that modifies same table (guaranteed not same row)?

I need to write a TRIGGER that preserves the old value of a column before it is updated, by inserting or updating the old value into another row in the same table. (yes, I know).
The following MERGE/DUAL trickery has served me well,
but because in this case, I'm inserting into or updating the same table,
Oracle complains at runtime. Also, for some reason, I found it unusually difficult to write code that compiles without errors.
Two questions:
Is it ever possible to modify the same table that the trigger is on, even when I can guarantee that the trigger will never update the row that triggered the trigger? Or do I have to do something like (e.g.): insert pending changes into another table, so that a 2nd trigger can merge them back into the original table? (This table is a customer interface, so I can't re-architect this to use a second table for permanently storing old values.)
What's with the compiler errors that don't let me use :old.event_key, but do let me use :old.property_val in the MERGE statement? (declaring a variable old_event_key and assigning it to the value of :old.event_key seems to work) Is there some sort of hidden intermediate language that knows when a column is (part of) the primary key, and prevents you from referencing it via :old.?
Here is the offending code:
create or replace trigger remember_old_status
before update on event_properties
for each row
when (old.property_name = 'CURRENT-STATUS')
declare
old_event_key varchar2(20);
begin
old_event_key := :old.event_key;
merge into event_properties eprop
using (select 1 from dual) dummy
on ( eprop.event_key = old_event_key
AND eprop.property_name = 'PREVIOUS-STATUS')
when matched then
update set property_val = :old.property_val
when not matched then
insert (event_key, property_name, property_val)
values (old_event_key, 'PREVIOUS-STATUS', :old.property_val);
end;
And here's the table:
CREATE TABLE "CUST"."EVENT_PROPERTIES"
( "EVENT_KEY" VARCHAR2(20 BYTE) CONSTRAINT "NN_FLE_FLK" NOT NULL ENABLE,
"PROPERTY_NAME" VARCHAR2(20 BYTE) CONSTRAINT "NN_FLE_PN" NOT NULL ENABLE,
"PROPERTY_VAL" VARCHAR2(80 BYTE),
CONSTRAINT "PX_EVENT_PROPERTIES" PRIMARY KEY ("EVENT_KEY", "PROPERTY_NAME") DEFERRABLE
USING INDEX PCTFREE 10 INITRANS 2 MAXTRANS 255 COMPUTE STATISTICS
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "CUST_TS" ENABLE
) SEGMENT CREATION IMMEDIATE
PCTFREE 10 PCTUSED 40 INITRANS 1 MAXTRANS 255
NOCOMPRESS LOGGING
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "CUST_TS" ;
And here are the error messages:
ORA-04091: table CUST.EVENT_PROPERTIES is mutating, trigger/function may not see it
ORA-06512: at "CUST.REMEMBER_OLD_STATUS", line 5
You could use a compound trigger to do this, storing the old values in variables in a before each row section, and then merging in an after statement.
This assumes you'll only ever update one row at a time:
create or replace trigger remember_old_status
for update on event_properties
compound trigger
old_rec event_properties%rowtype;
before each row is
begin
if (:old.property_name = 'CURRENT-STATUS') then
old_rec.event_key := :old.event_key;
old_rec.property_name := :old.property_name;
old_rec.property_val := :old.property_val;
end if;
end before each row;
after statement is
begin
if (old_rec.property_name = 'CURRENT-STATUS') then
merge into event_properties eprop
using (
select old_rec.event_key as event_key,
'PREVIOUS-STATUS' as property_name,
old_rec.property_val as property_val
from dual
) dummy
on (eprop.event_key = dummy.event_key
and eprop.property_name = dummy.property_name)
when matched then
update set property_val = old_rec.property_val
when not matched then
insert (event_key, property_name, property_val)
values (dummy.event_key, dummy.property_name, dummy.property_val);
end if;
end after statement;
end remember_old_status;
/
Quick test:
insert into event_properties values('SOME_EVENT', 'CURRENT-STATUS', 'A');
1 row inserted.
update event_properties set property_val = 'B' where event_key = 'SOME_EVENT' and property_name = 'CURRENT-STATUS';
1 row updated.
select * from event_properties;
EVENT_KEY PROPERTY_NAME PROPERTY_VAL
-------------------- -------------------- --------------------------------------------------------------------------------
SOME_EVENT CURRENT-STATUS B
SOME_EVENT PREVIOUS-STATUS A
update event_properties set property_val = 'C' where event_key = 'SOME_EVENT' and property_name = 'CURRENT-STATUS';
1 row updated.
select * from event_properties;
EVENT_KEY PROPERTY_NAME PROPERTY_VAL
-------------------- -------------------- --------------------------------------------------------------------------------
SOME_EVENT CURRENT-STATUS C
SOME_EVENT PREVIOUS-STATUS B
If you want to deal with multiple updates on one statement then the before each row can populate a collection instead, and you can then use that in the after statement.
create or replace trigger remember_old_status
for update on event_properties
compound trigger
type t_type is table of event_properties%rowtype;
old_recs t_type := t_type();
before each row is
begin
if (:old.property_name = 'CURRENT-STATUS') then
old_recs.extend();
old_recs(old_recs.count).event_key := :old.event_key;
old_recs(old_recs.count).property_name := :old.property_name;
old_recs(old_recs.count).property_val := :old.property_val;
end if;
end before each row;
after statement is
begin
forall i in old_recs.first..old_recs.last
merge into event_properties eprop
using (
select old_recs(i).event_key as event_key,
'PREVIOUS-STATUS' as property_name,
old_recs(i).property_val as property_val
from dual
) dummy
on (eprop.event_key = dummy.event_key
and eprop.property_name = dummy.property_name)
when matched then
update set property_val = old_recs(i).property_val
when not matched then
insert (event_key, property_name, property_val)
values (dummy.event_key, dummy.property_name, dummy.property_val);
end after statement;
end remember_old_status;
/

Get complete ddl for index in oracle

I am using oracle 11g/12c. I want to get ddl of indexes in my database. For this I used the query -
SELECT DBMS_METADATA.GET_DDL('INDEX','SYS_IL0000091971C00001$$','CCEEXPERTS') FROM dual
Here 'SYS_IL0000091971C00001$$' is my index name and 'CCEEXPERTS' is my owner name.
From this I get the ddl -
CREATE UNIQUE INDEX "CCEEXPERTS"."SYS_IL0000091971C00001$$" ON "CCEEXPERTS"."DATABLOB" (
And my actual ddl is -
CREATE UNIQUE INDEX "CCEEXPERTS"."SYS_IL0000091971C00001$$" ON "CCEEXPERTS"."DATABLOB" (
PCTFREE 10 INITRANS 2 MAXTRANS 255
STORAGE(INITIAL 65536 NEXT 1048576 MINEXTENTS 1 MAXEXTENTS 2147483645
PCTINCREASE 0 FREELISTS 1 FREELIST GROUPS 1
BUFFER_POOL DEFAULT FLASH_CACHE DEFAULT CELL_FLASH_CACHE DEFAULT)
TABLESPACE "USERS"
PARALLEL (DEGREE 0 INSTANCES 0) ;
In actual ddl after "CCEEXPERTS"."DATABLOB" ( , next line character and from their the ddl is truncted.
How can I get the complete ddl? Please help me...
Thanks in advance.
In SQLplus, set these before running the procedure.
set long 100000
set longchunksize 100000
Oracle has a DBMS_METADATA package to retrieve and customize the DDL returned. Default settings for all indexes SQL:
select DBMS_METADATA.GET_DDL('INDEX', index_name)
from all_indexes
where owner in (USER, 'USER_OTHER_THAN_LOGGED_IN_USER');
A pl/sql block example:
set serveroutput on
DECLARE
V_DDL CLOB;
BEGIN
DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM,'STORAGE',false);
DBMS_METADATA.SET_TRANSFORM_PARAM(DBMS_METADATA.SESSION_TRANSFORM, 'PRETTY', true);
dbms_metadata.set_transform_param(dbms_metadata.session_transform,'TABLESPACE',false);
V_DDL := DBMS_METADATA.GET_DDL('VIEW', 'A_VIEW_NAME');
DBMS_OUTPUT.PUT_LINE(V_DDL);
END;

Toad for Oracle, Editing ID Column to be an auto-increment ID

I work with Toad for Oracle 12.1 for my database. I have a Table called TBLEMPLOYEE which already contain some data in it and having Column Name called ID whose data values are increasing from 1 to N.
ID Name Gender DateOfBirth Type
------------------------------------
1 Mark Male 10/10/1982 1
2 Mary Female 11/11/1981 2
3 Esther Female 12/12/1984 2
4 Matthew Male 9/9/1983 1
5 John Male 5/5/1985 1
6 Luke Male 6/6/1986 1
Now I want to change the Column ID such that it will have auto-incremented ID when I add a new data to the Table.
I know that in Toad we can do it when we create a New Table with that behavior. For instance, using Create Table and in the newly created Column, we could set Default / Virtual / Identity settings as Identity:
And Toad will show a UI with bunch of settings to do that:
And will be automatically translated to something like:
(START WITH 1 INCREMENT BY 1 MINVALUE 1 MAXVALUE 9999999999999999999999999999 CACHE 20 NOCYCLE ORDER NOKEEP)
In the Default / Virtual / Identity settings.
But I can't seem to do the same when I do Alter Table instead of Create Table.
Why is that so?
And since I already have some data in the TBLEMPLOYEE, I want to avoid creating a new table and re-inserting the data if possible.
How can I do that?
This is the current SQL script (if this may help):
ALTER TABLE MYSCHEMA.TBLEMPLOYEE
DROP PRIMARY KEY CASCADE;
DROP TABLE MYSCHEMA.TBLEMPLOYEE CASCADE CONSTRAINTS;
CREATE TABLE MYSCHEMA.TBLEMPLOYEE
(
ID NUMBER NOT NULL,
NAME VARCHAR2(80 BYTE) NOT NULL,
GENDER VARCHAR2(6 BYTE),
DATEOFBIRTH DATE,
EMPLOYEETYPE INTEGER NOT NULL,
)
TABLESPACE USERS
RESULT_CACHE (MODE DEFAULT)
PCTUSED 0
PCTFREE 10
INITRANS 1
MAXTRANS 255
STORAGE (
INITIAL 64K
NEXT 1M
MAXSIZE UNLIMITED
MINEXTENTS 1
MAXEXTENTS UNLIMITED
PCTINCREASE 0
BUFFER_POOL DEFAULT
FLASH_CACHE DEFAULT
CELL_FLASH_CACHE DEFAULT
)
LOGGING
NOCOMPRESS
NOCACHE
NOPARALLEL
MONITORING;
ALTER TABLE MYSCHEMA.TBLEMPLOYEE ADD (
PRIMARY KEY
(ID)
USING INDEX
TABLESPACE USERS
PCTFREE 10
INITRANS 2
MAXTRANS 255
STORAGE (
INITIAL 64K
NEXT 1M
MAXSIZE UNLIMITED
MINEXTENTS 1
MAXEXTENTS UNLIMITED
PCTINCREASE 0
BUFFER_POOL DEFAULT
FLASH_CACHE DEFAULT
CELL_FLASH_CACHE DEFAULT
)
ENABLE VALIDATE);
First of all, your sequence should start with the max value + 1 from the table e.g.
(START WITH 7 INCREMENT BY 1 MINVALUE 1 MAXVALUE 9999999999999999999999999999 CACHE 20 NOCYCLE ORDER NOKEEP)
If you want to automatically populate the value for the Id and you're not running on Oracle 12c, I suggest you to use a trigger
drop sequence seq_mytest_id;
truncate table my_test_t;
drop table my_test_t;
create table my_test_t (id number, string varchar2(30));
-- prepopulate with fixed values for the id
insert into my_test_t(id, string) values (1,'test');
insert into my_test_t(id, string) values (2,'test');
insert into my_test_t(id, string) values (3,'test');
insert into my_test_t(id, string) values (4,'test');
insert into my_test_t(id, string) values (5,'test');
insert into my_test_t(id, string) values (6,'test');
commit;
--Now create the sequence and the trigger for automatically
--populating the ID column
create sequence seq_mytest_id start with 7 increment by 1 nocycle nocache;
create trigger t_mytest_bi before insert on my_test_t for each row
begin
select seq_mytest_id.nextval into :new.id from dual;
end;
/
-- Test the trigger
insert into my_test_t(string) values ('test');
insert into my_test_t(string) values ('test2');
commit;
select * from my_test_t;
If you're running on Oracle 12c you can define your column as an identity column
https://oracle-base.com/articles/12c/identity-columns-in-oracle-12cr1
Hope it helps,
R

Resources