Schlagwort-Archive: SQL

Materialized View with refresh fast Example

At first create a Table as basis for the Materialized view:


create table matviewtab as
with resNmb(iNmb) as
(select 1 as iNmb from dual
union all
select iNmb + 1 from resNmb where iNmb < 1005 ) select iNmb,'SomeText' as num_ber from resNmb;

Add a primary Key.

alter table matviewtab add constraint matviewtab_pk primary key (iNmb);

And create a Materialized View Log for capturing the operations on the basis-table.

CREATE MATERIALIZED VIEW LOG ON matviewtab WITH PRIMARY KEY, ROWID;

Then lets create the Materialized View with fast refresh option on commit:

CREATE MATERIALIZED VIEW matviewview
NOLOGGING
CACHE
BUILD IMMEDIATE
REFRESH FAST ON COMMIT
AS SELECT * from matviewtab;

Check the content:

select * from matviewview order by inmb desc;

INMB NUM_BER
1005 SomeText
1004 SomeText
1003 SomeText
1002 SomeText
1001 SomeText
1000 SomeText
999 SomeText
998 SomeText
997 SomeText
996 SomeText

Insert a row and commit and check the content again:

insert into matviewtab values(1006,'OtherTxt');
commit;


select * from (select * from matviewview order by inmb desc) where rownum < 11 order by inmb desc;

INMB NUM_BER
1006 OtherTxt
1005 SomeText
1004 SomeText
1003 SomeText
1002 SomeText
1001 SomeText
1000 SomeText
999 SomeText
998 SomeText
997 SomeText

Insert another Row and check for changes in the DB-Objects user_tab_modifications and MLOG$_matviewtab (MLOG$_<>):

insert into matviewtab values(1007,'OtherTxt');


select * from user_tab_modifications;

TABLE_NAME PARTITION_NAME SUBPARTITION_NAME INSERTS UPDATES DELETES TIMESTAMP TRUNCATED DROP_SEGMENTS
BIN$TPmrAsfFAN7gU6wZ4BSJ9w==$0     2 0 0 12.04.2017 16:00:54 NO 0


select * from MLOG$_matviewtab;

INMB M_ROW$$ SNAPTIME$$ DMLTYPE$$ OLD_NEW$$ CHANGE_VECTOR$$ XID$$
1007 AAFaYQAQAAAAAA9AAB 01.01.4000 00:00:00 I N FE 3096379362658352


commit;


select * from (select * from matviewview order by inmb desc) where rownum < 11 order by inmb desc;

INMB NUM_BER
1007 OtherTxt
1006 OtherTxt
1005 SomeText
1004 SomeText
1003 SomeText
1002 SomeText
1001 SomeText
1000 SomeText
999 SomeText
998 SomeText


drop MATERIALIZED VIEW LOG on matviewtab;
drop MATERIALIZED VIEW matviewview;
drop table matviewtab;

Additional Infos:
Materialized Views: how can I find the number of updates, inserts, and deletes applied during refresh?

SQL: Densification or Condensing of Date Ranges or History

— Verdichten/Densification von Zeitranges mit Berücksichtigung der Reihenfolge

Condensing/Densification or Merging of Date Ranges in one table with considering the chronology.
This is necessary if the Table is a subset of columns of the original table
There can be changes in attributes which are not in the table to condense causing the generation of new versions.
We now need to condense or merge the date ranges so we get as a result for every change in our table only one row.

/*
Verdichtet eine Tabelle mit BK und einer History,
falls UNMITTELBAR AUFEINANDER FOLGEND gleiche Attributsauspraegungen vorhanden sind.
Diese fallen dann zusammen.
—-
This logic condenses a table concerning consecutively history-rows with the same attributes.
These Rows are combined.
History-holes are considered!
*/

–drop table CONDENSING_TEST purge;

CREATE TABLE CONDENSING_TEST
(
H_KONTO_S_CID NUMBER, — = Primary key
H_KONTO_CID NUMBER, — = Businesskey
C_START_DT DATE,
C_END_DT DATE,
TESTKONTO_FLG NUMBER,
SONDERKONTO_FLG NUMBER,
EXISTIERT_FLG NUMBER,
BEZUG_FLG NUMBER,
BARZAHLUNG_FLG NUMBER
)
TABLESPACE CORE_DTA;


SET DEFINE OFF;
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('28.06.1984 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('06.02.2003 11:49:47','DD.MM.RRRR HH24:MI:SS'), '1','-1','1','1','1');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('06.02.2003 11:49:48','DD.MM.RRRR HH24:MI:SS'),to_date('13.08.2004 14:23:09','DD.MM.RRRR HH24:MI:SS'), '2','-1','1','1','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('03.03.1994 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.2009 23:59:59','DD.MM.RRRR HH24:MI:SS'), '3','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('03.03.1994 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.2009 23:59:59','DD.MM.RRRR HH24:MI:SS'), '4','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('13.08.2004 14:23:10','DD.MM.RRRR HH24:MI:SS'),to_date('13.08.2004 14:23:24','DD.MM.RRRR HH24:MI:SS'), '5','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('13.08.2004 14:23:25','DD.MM.RRRR HH24:MI:SS'),to_date('14.03.2005 09:57:42','DD.MM.RRRR HH24:MI:SS'), '6','-1','1','1','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('14.03.2005 09:57:43','DD.MM.RRRR HH24:MI:SS'),to_date('07.10.2005 09:59:31','DD.MM.RRRR HH24:MI:SS'), '7','-1','1','1','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43481',to_date('07.10.2005 09:59:32','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.9999 23:59:59','DD.MM.RRRR HH24:MI:SS'), '8','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('01.01.2010 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.9999 23:59:59','DD.MM.RRRR HH24:MI:SS'), '9','-1','1','0','0');
Insert into condensing_test (H_KONTO_S_CID,H_KONTO_CID,C_START_DT,C_END_DT,TESTKONTO_FLG,SONDERKONTO_FLG,EXISTIERT_FLG,BEZUG_FLG,BARZAHLUNG_FLG) values ('0','43482',to_date('01.01.2010 00:00:00','DD.MM.RRRR HH24:MI:SS'),to_date('31.12.9999 23:59:59','DD.MM.RRRR HH24:MI:SS'),'10','-1','1','0','0');
commit;

/*
Beruecksichtigte beschreibende Attribute:
Considered descriptive Attributes:
-TESTKONTO_FLG
-SONDERKONTO_FLG
-EXISTIERT_FLG
-BEZUG_FLG
-BARZAHLUNG_FLG
*/
select H_KONTO_CID
,C_START_DT
,C_END_DT
,TESTKONTO_FLG
,SONDERKONTO_FLG
,EXISTIERT_FLG
,BEZUG_FLG
,BARZAHLUNG_FLG
from condensing_test
order by h_konto_cid,C_START_DT;

H_KONTO_CID C_START_DT C_END_DT TESTKONTO _FLG SONDERKONTO _FLG EXISTIERT _FLG BEZUG _FLG BARZAHLUNG _FLG COMMENT
43481 28.06.1984 00:00:00 06.02.2003 11:49:47 0 -1 1 1 1
43481 06.02.2003 11:49:48 13.08.2004 14:23:09 0 -1 1 1 0
43481 13.08.2004 14:23:10 13.08.2004 14:23:24 0 -1 1 0 0
43481 13.08.2004 14:23:25 14.03.2005 09:57:42 0 -1 1 1 0 Row 4/5 have to condense
43481 14.03.2005 09:57:43 07.10.2005 09:59:31 0 -1 1 1 0 Row 4/5 have to condense
43481 07.10.2005 09:59:32 31.12.9999 23:59:59 0 -1 1 0 0
43482 03.03.1994 00:00:00 31.12.2009 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense
43482 03.03.1994 00:00:00 31.12.2009 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense
43482 01.01.2010 00:00:00 31.12.9999 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense
43482 01.01.2010 00:00:00 31.12.9999 23:59:59 0 -1 1 0 0 Row 7/8/9/10 have to condense

— H_KONTO_CID/HUB_CID entspricht dem Businesskey aus der Business-View
— H_KONTO_CID/HUB_CID correlates to the Businesskey, coming from the Business-View-Logic

with res01 as
(select H_KONTO_CID as hub_cid
,C_START_DT as C_START_DT
,C_END_DT as C_END_DT
,testkonto_flg as attrib1
,lag ( testkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib1_LAG
,lead( testkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib1_LEAD
,sonderkonto_flg as attrib2
,lag ( sonderkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib2_LAG
,lead( sonderkonto_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib2_LEAD
,existiert_flg as attrib3
,lag ( existiert_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib3_LAG
,lead( existiert_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib3_LEAD
,bezug_flg as attrib4
,lag ( bezug_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib4_LAG
,lead( bezug_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib4_LEAD
,barzahlung_flg as attrib5
,lag ( barzahlung_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib5_LAG
,lead( barzahlung_flg, 1 ) over (partition by H_KONTO_CID order by C_START_DT) as attrib5_LEAD
from condensing_test
where h_konto_cid in (43481,43482)
)
,
res02 as
(-- nimm die Zeile, wenn aktuelle Auspraegung nicht der Vorgaengerversion entspricht
-- take the row if current occurrence not equal to the previous occurrence
select HUB_CID
,C_START_DT as CHANGE
,attrib1
,attrib2
,attrib3
,attrib4
,attrib5
,1 as TAKEROW
from res01
where (attrib1_LAG != attrib1 or attrib1_LAG is null)
or (attrib2_LAG != attrib2 or attrib2_LAG is null)
or (attrib3_LAG != attrib3 or attrib3_LAG is null)
or (attrib4_LAG != attrib4 or attrib4_LAG is null)
or (attrib5_LAG != attrib5 or attrib5_LAG is null)
union all
-- nimm die Zeile NICHT, wenn sich danach was geaendert hat, d.h.
-- wenn aktuelle Auspraegung nicht der Nachfolgeversion entspricht
-- don't take the row, if current occurrence is not equals to the previous occurrence
select HUB_CID
,C_END_DT as CHANGE
,attrib1
,attrib2
,attrib3
,attrib4
,attrib5
,0 as TAKEROW
from res01
where (attrib1_LEAD != attrib1 or attrib1_LEAD is null)
or (attrib2_LEAD != attrib2 or attrib2_LEAD is null)
or (attrib3_LEAD != attrib3 or attrib3_LEAD is null)
or (attrib4_LEAD != attrib4 or attrib4_LEAD is null)
or (attrib5_LEAD != attrib5 or attrib5_LEAD is null)
)
,
res03 as
(
select HUB_CID
,CHANGE as C_START_DT
,lead( CHANGE, 1 ) over (partition by HUB_CID order by CHANGE) as C_END_DT --HUB_CID anstatt TABLE_CID
,attrib1
,attrib2
,attrib3
,attrib4
,attrib5
,TAKEROW
from res02)
select HUB_CID as H_KONTO_CID
,C_START_DT as C_START_DT
,C_END_DT as C_END_DT
,0 AS c_audit_cid
,attrib1 as testkonto_flg
,attrib2 as sonderkonto_flg
,attrib3 as existiert_flg
,attrib4 as bezug_flg
,attrib5 as barzahlung_flg
from res03
where TAKEROW = 1
order by HUB_CID, C_START_DT;

EXECUTE IMMEDIATE with USING and OUT Parameter

If you want to use an OUT – Parameter in an EXECUTE IMMEDIATE – Statement including an anonymous PL/SQL-Block: here is an example:

create table in_out_test ( num_ber number);


set serveroutput on;
DECLARE
vc_sql CLOB;
p_insert_count NUMBER;
BEGIN
vc_sql := '
DECLARE
cursor curData is
with resNmb(iNmb) as
(select 1 as iNmb from dual
union all
select iNmb + 1 from resNmb where iNmb < 1005
)
select iNmb as num_ber from resNmb;
type typeTblData is table of curData%ROWTYPE;
tblData typeTblData;
iterator NUMBER := 0;
insert_count NUMBER;
begin
open curData;
loop
fetch curData bulk collect into tblData limit 100;
exit when tblData.COUNT = 0;
forall ii in tblData.FIRST..tblData.LAST
INSERT INTO in_out_test
values tblData(ii) ;
insert_count := (iterator*100)+Sql%Rowcount;
:1 := insert_count;
commit;
iterator := iterator +1;
end loop;
close curData;
exception
when others then
rollback;
if curData%ISOPEN then close curData; end if;
raise;
end;
';
execute immediate vc_sql using out p_insert_count;
dbms_output.put_line('IN OUT PARAMETER: '||p_insert_count||' rows inserted');
END;
/

>> PL/SQL procedure successfully completed.
>> IN OUT PARAMETER: 1005 rows inserted

drop table in_out_test purge;

ORACLE Interval-Partitioning with NUMBER

ORACLE 11.2
We needed a listpartitioning for every load of an DWH, but of course we wanted to have the comfort of the autmatic partition-generation.
This can be achieved with Interval-Partitioning not with Date but with Number.
It was not easy to find a example so I want to show here a simple Table for which every new Load or Better Insert with a new C_LADE_LAUF_CID generates a new Partition.
We used PCTFREE 0 (of course on partition-level) to avoid unused space, because there were no updates on this table.
We set the initial extend to 1M  to avoid having  a lot of empty partitions allocating unnecessarily storage on creating the partition(s).

To change the behavior of the big inital extend(8M instead of 64k) on partitioning beginning with ORACLE 11.2.0.2 on system-level  read this.

drop table INTERVAL_NUMBER purge;
CREATE TABLE interval_number (
C_LADE_LAUF_CID   NUMBER(5)
,begdate           DATE
,enddate           DATE
,description       VARCHAR2(100)
)
PARTITION BY RANGE (C_LADE_LAUF_CID) INTERVAL (1)
( PARTITION P1 VALUES LESS THAN (2) ) PCTFREE 0 STORAGE (INITIAL 1M)
;


insert into interval_number values (1,sysdate,sysdate,'partition 1');
insert into interval_number values (2,sysdate,sysdate,'partition 2');
insert into interval_number values (3,sysdate,sysdate,'partition 3');
commit;

After generation of statistics, we can check what happened:

exec DBMS_STATS.GATHER_TABLE_STATS (ownname => 'CORE_TEST',tabname => 'interval_number',estimate_percent => dbms_stats.auto_sample_size);


Select for the partitions
select segment_name, partition_name, blocks, bytes/1024 as KB
from dba_segments
where segment_name = 'INTERVAL_NUMBER';

SEGMENT_NAME    PARTITION_NAME BLOCKS KB

INTERVAL_NUMBER P1             128 1024
INTERVAL_NUMBER SYS_P12666     128 1024
INTERVAL_NUMBER SYS_P12667     128 1024


Select for the complete table
(pctfree is on partitionlevel, so here null)
select table_name, pct_free, num_rows, partitioned
from user_tables
where table_name = 'INTERVAL_NUMBER';

TABLE_NAME      PCT_FREE NUM_ROWS PARTITIONED

INTERVAL_NUMBER null     3        YES


Select for the pctfree-value on partition-level
select def_pct_free
from USER_PART_TABLES
where table_name = 'INTERVAL_NUMBER';
DEF_PCT_FREE
0

Select for the initial extend for the partitions
select partition_name, initial_extent/1024 as initial_extent_kb
from user_tab_partitions
where table_name = 'INTERVAL_NUMBER';
PARTITION_NAME, INITIAL_EXTEND_KB
P1              1024
SYS_P13229      1024
SYS_P13230      1024

ORACLE – redo-log by using append-hint, nologging and archive-log-mode

Here a very good overview about the combinations when Redo-Log is generated or not for inserts with and without logging/append-hint/archive-log-mode. Found by asktom.

Check DB for archive-log-mode turned on or off:
select log_mode from v$database;

Check for logging-mode for table:
select logging from all_tables
where owner = '<USER>'
and table_name = '<TABLE_NAME>';

Example for Direct-Path-Insert with append-hint:
insert into '<TABLE_NAME>' /*+ append */
select * from all_objects;


TableMode  Insert Mode ArchiveLogMode  result
---------- ----------- --------------- ----------
LOGGING    APPEND      ARCHIVE LOG     redogenerated
NOLOGGING  APPEND      ARCHIVE LOG     no redo
LOGGING    no append   ""              redogenerated
NOLOGGING  no append   ""              redogenerated
LOGGING    APPEND      noarchive       no redo
NOLOGGING  APPEND      noarchive       no redo
LOGGING    no append   noarchive       redogenerated
NOLOGGING  no append   noarchive       redogenerated

If „FORCE LOGGING“ is used, than of course redo is generated!

Optimizer-Modes ALL_ROWS vs FIRST_ROWS

FIRST_ROWS and ALL_ROWS are cost based optimizer features.

Overview:

  • Default: ALL_ROWS
  • Change: alter [system|session] OPTIMIZER_MODE = [ALL_ROWS|FIRST_ROWS|FIRST_ROWS_1|FIRST_ROWS_10|FIRST_ROWS_100|FIRST_ROWS_1000]
  • Check: SHOW PARAMETER OPTIMIZER_MODE (with dba-role)
  • SQL – Hint for retrieving a few rows: /*+ FIRST_ROWS*/

 

http://docs.oracle.com/cd/E18283_01/server.112/e17110/initparams167.htm

first_rows_n
The optimizer uses a cost-based approach and optimizes with a goal of best response time to return the first n rows (where n = 1, 10, 100, 1000).

first_rows
The optimizer uses a mix of costs and heuristics to find a best plan for fast delivery of the first few rows.

all_rows
The optimizer uses a cost-based approach for all SQL statements in the session and optimizes with a goal of best throughput (minimum resource use to complete the entire statement).


http://oracle-online-help.blogspot.de/2007/03/optimizermode-allrows-or-firstrows.html

Foreign Key Constraints in a ORACLE DWH

There are several possibilities to have Foreign Key Constraints in a ORACLE-DWH-Environment.

ENABLE VALIDATE
ALTER TABLE sales ADD CONSTRAINT sales_time_fk
FOREIGN KEY (time_id) REFERENCES times (time_id)
ENABLE VALIDATE;

Enabled and validated means on every commit the constraint is validated to be correct.

ENABLE NOVALIDATE
ALTER TABLE sales ADD CONSTRAINT sales_time_fk
FOREIGN KEY (time_id) REFERENCES times (time_id)
ENABLE NOVALIDATE;

You can use this option if the tables are not correct corresponding to the constraint, but you need to have a enforced constraint for future inserts.
That means the current state of the existing data is not validated but you can’t insert data in the child-table without having a parent-key in the parent table.
That means that the constraint is checked on inserts/updates/deletes.

RELY DISABLE NOVALIDATE
ALTER TABLE sales ADD CONSTRAINT sales_time_fk
FOREIGN KEY (time_id) REFERENCES times (time_id)
RELY DISABLE NOVALIDATE;

This constraint is NOT used for data validation. If you know data is correct by running the ETLs than you can use this option.
DWH-Tools can use this info for retrieving information on this constraint. And advanced query-rewriting can be possible for materialized views.

  • No data validation -> no cpu-consumption.
  • No data validation -> inexpensive and no overhead on running dml.

More Infos can be found in the
ORACLE Database Data Warehousing Guide 11g or
ORACLE Database Data Warehousing Guide 12c.

PL/SQL – Native Compilation

Show configuration concerning native compilation of PL/SQL-Code:

SQL> show parameter PLSQL_CODE_TYPE;
NAME TYPE VALUE
--------------- ------ -----------
plsql_code_type string INTERPRETED

INTERPRETED : Default-Value, the Database will interpret the code at runtime and will not compile it into native code.
NATIVE : Native-Compilation before running the code

Change Configuration on session- or system-level:

SQL> alter system set PLSQL_CODE_TYPE=native scope=both;
System altered.

From now on, all plsql-code is compiled into native code.
scope=both : Affects MEMORY and SPFILE

Check compilation-method on PL/SQL-Object-Level:

select OWNER,NAME,TYPE,PLSQL_CODE_TYPE from all_plsql_object_settings;
select NAME,TYPE,PLSQL_CODE_TYPE from user_plsql_object_settings;

change compilation-method for one procedure/function/package
PLSQL_CODE_TYPE= native or interpreted

SQL> alter <procedure|function|package> <objectname> compile PLSQL_CODE_TYPE=native;

SQL%ROWCOUNT for merge

create table merge_sql_rowcount(
dummy VARCHAR2(1),
x number,
y number
);
truncate table merge_sql_rowcount;
select * from merge_sql_rowcount;

insert into merge_sql_rowcount values ('X',1,0);
commit;

set serveroutput on
begin
merge into merge_sql_rowcount using dual on (merge_sql_rowcount.dummy = dual.dummy)
when matched then update set y = y+1
when not matched then insert (x,y) values ( dual.dummy, 0 );

if sql%rowcount > 0 then
dbms_output.put_line( sql%rowcount || ' rows affected...' );
end if;
end;
/
select * from merge_sql_rowcount;

drop table merge_sql_rowcount;

>Table MERGE_SQL_ROWCOUNT created.
>Table MERGE_SQL_ROWCOUNT truncated.
>no rows selected
>1 row inserted.
>Commit complete.
>PL/SQL procedure successfully completed.
>1 rows affected…

D X Y
– ———- ———-
X 1 1

>Table MERGE_SQL_ROWCOUNT dropped.

Table column-names in Assoziative Array

Here a Example for the usage of Assoziative Array to handle the column-names of a table automated…

set serveroutput on

create table column_list_test
( myid number
,mydesc varchar2(20)
,myname varchar2(100)
,mydate date
);

DECLARE
v_table_name varchar2(30) := 'column_list_test';
TYPE v_column_names_type IS TABLE OF varchar2(30) INDEX BY BINARY_INTEGER;
v_column_names v_column_names_type;
v_index BINARY_INTEGER;
v_col_list varchar2(4000);
BEGIN
--initialize table-names-array
FOR currow in (select column_id,column_name
from user_tab_columns
where 1=1
and table_name = upper(v_table_name)
order by table_name,column_id)
LOOP
v_column_names(currow.column_id) := currow.column_name;
END LOOP;

-- loop through the names-array
v_index := v_column_names.first;
while v_index is not NULL loop

dbms_output.put_line(v_index || ' - ' ||v_column_names(v_index));
v_col_list := v_col_list || v_column_names(v_index);

-- next index:
v_index := v_column_names.next (v_index);

-- add a comma only if it is not the last attribute.
if nvl(v_index,0) <> 0 then
v_col_list := v_col_list ||',';
end if;
end loop;

--complete col-list comma separated for further dynamic-sql...
dbms_output.put_line(v_col_list);

EXCEPTION
WHEN OTHERS THEN DBMS_OUTPUT.PUT_LINE(DBMS_UTILITY.FORMAT_ERROR_STACK);
END;

Output:
table COLUMN_LIST_TEST created.
anonymous block completed
1 - MYID
2 - MYDESC
3 - MYNAME
4 - MYDATE
MYID,MYDESC,MYNAME,MYDATE