DB2 dinamic search - db2

I need to create search (like '%-99' or like '%NON%')---there for all tables in SYS in DB2 in all columns --below code
---please for help--this is test code---but I have DB2 Database Error: ERROR [42703] [IBM][DB2/LINUXX8664] SQL0206N "WHERE" is not valid in the context where it is used.
DECLARE GLOBAL TEMPORARY TABLE SESSION.TEMP_DINAMIC_TEST_SEARCH
(
TABSET VARCHAR(128)
, TABSCHEMA VARCHAR(128)
, TABNAME VARCHAR(128)
, COLUMN_NAME VARCHAR(128)
, ROW_COUNT BIGINT
)
ON COMMIT PRESERVE ROWS NOT LOGGED
;
BEGIN
FOR C AS CUR CURSOR WITH HOLD FOR
SELECT 'INSERT INTO SESSION.TEMP_DINAMIC_TEST_SEARCH SELECT '''
||''' ,''' || TABLE_SCHEM || ''',''' || TABLE_NAME || ''',''' || COLUMN_NAME || ''', COUNT( DISTINCT(' || COLUMN_NAME || ')) FROM '
|| '"' || TABLE_SCHEM || '"."' || TABLE_NAME|| '"' ||WHERE|| '"' || COLUMN_NAME || '"' || LIKE || '"' ||'%-99'|| '"' || AS S
FROM SYSIBM.SQLCOLUMNS
WHERE TABLE_SCHEM = 'DWD' AND TABLE_NAME LIKE 'T_X_%'
WITH UR
DO
EXECUTE IMMEDIATE C.S;
COMMIT;
END FOR;
END

Wrong expression is used.
Try this:
SELECT
'INSERT INTO SESSION.TEMP_DINAMIC_TEST_SEARCH SELECT '''
|| ''' ,''' || TABLE_SCHEM || ''',''' || TABLE_NAME || ''',''' || COLUMN_NAME || ''', COUNT( DISTINCT(' || COLUMN_NAME || ')) FROM '
|| '"' || TABLE_SCHEM || '"."' || TABLE_NAME|| '" WHERE "' || COLUMN_NAME || '" LIKE ''%-99''' AS S
FROM SYSIBM.SQLCOLUMNS
WHERE TABLE_SCHEM = 'DWD' AND TABLE_NAME LIKE 'T_X_%'
/*
AND
(
TYPE_NAME LIKE '%CHAR'
OR TYPE_NAME LIKE '%GRAPHIC'
OR TYPE_NAME LIKE '%CLOB'
)
*/
WITH UR
BTW,
It's worth to generate the INSERT SELECT statements for string columns only as shown in the commented out lines.
Try not to use tables in the SYSIBM schema in Db2 for LUW - they are not documented and their content may be changed with some new release / fixpack without any notice. Use SYSCAT views instead - the SYSCAT.COLUMNS view in your case.

Related

How to move all timestamptz dates on the postgresql database?

I have a postgresql dump of some seed database. This dump was created few months ago so all data is there are about the past. It is not very convenient to develop on the past data because I have to always scroll in UI to that past date.
I was thinking to automatically shift every timestamptz field in the database by specific offset. It sounds doable via some script which will go throw database schema, find every timestamptz field, and then build a SQL update for every field.
So, are there any ready-made solutions for this?
I solved it using this SQL query:
--
-- This SQL query shift all timestamptz fields in the database
--
--
BEGIN;
DO $$
declare
sql_query text;
table_row record;
column_row record;
trigger_row record;
BEGIN
FOR table_row IN (
SELECT table_schema, table_name
FROM information_schema.tables
WHERE table_type = 'BASE TABLE' AND table_schema = 'public'
) LOOP
sql_query := '';
RAISE NOTICE 'Checking %', table_row.table_name;
FOR column_row IN (
SELECT column_name
FROM information_schema.columns
WHERE
table_schema = table_row.table_schema
AND table_name = table_row.table_name
AND udt_name = 'timestamptz'
AND is_updatable = 'YES'
) LOOP
sql_query := sql_query ||
'"' || column_row.column_name || '" = "' || column_row.column_name || '" + interval ''100'' day,';
END LOOP;
IF sql_query != '' THEN
sql_query := substr(sql_query,1, length(sql_query)-1); -- Remove last ","
sql_query := 'UPDATE ' || table_row.table_schema || '.' || table_row.table_name || ' SET ' || sql_query || ';';
-- There might be some triggers which so let's disable them before update
FOR trigger_row IN (
SELECT trigger_name FROM information_schema.triggers WHERE
trigger_schema = table_row.table_schema
AND event_object_table = table_row.table_name
AND event_manipulation = 'UPDATE' and
(action_timing = 'BEFORE' or action_timing = 'AFTER')
) LOOP
sql_query := 'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' disable trigger ' || trigger_row.trigger_name || ';' ||
sql_query ||
'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' enable trigger ' || trigger_row.trigger_name || ';';
END LOOP;
-- Same for the row level security, disable it if it was enabled
IF (SELECT pg_class.oid FROM pg_class
LEFT JOIN pg_catalog.pg_namespace ON pg_catalog.pg_namespace.oid = pg_class.relnamespace
WHERE relname = table_row.table_name AND
pg_catalog.pg_namespace.nspname = table_row.table_schema AND relrowsecurity) IS NOT NULL THEN
sql_query := 'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' disable row level security;' ||
sql_query ||
'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' enable row level security;';
END IF;
RAISE NOTICE ' %', sql_query;
EXECUTE sql_query;
RAISE NOTICE '---------------------------';
END IF;
END LOOP;
END$$;
COMMIT;
Just add things to the database and then update it with this query, change the column name, table name and the amount of days you want it incremented by
UPDATE table_name
SET timestamptz = timestamptz + interval '1' day
WHERE 1 = 1;

Hello my fellow members, I'm new to postgresql. I want to drop table partition base on max date of field reg_dt in function as below

CREATE OR REPLACE FUNCTION cbsadm.create_part_tbl_cpvmm(cpvmm_custi_m text)
RETURNS void
LANGUAGE plpgsql
AS $function$
DECLARE
BEGIN
PERFORM pg_terminate_backend(pid) FROM pg_stat_activity WHERE pid IN
(SELECT pid FROM pg_catalog.pg_locks pl WHERE pl.relation in
(SELECT oid FROM pg_catalog.pg_class pc WHERE pc.relname = 'cpvmm_custi_m' AND pc.relowner in
(SELECT usesysid FROM pg_catalog.pg_user WHERE usename = 'cbsadm')));
EXECUTE 'CREATE TABLE ' || cpvmm_custi_m || '_' || to_char(now()+'1day','YYYYMMDD') || ' PARTITION OF ' || cpvmm_custi_m || ' FOR VALUES FROM (''' || to_char(now()+'1day','YYYYMMDD') ||''') TO (''' || to_char(now()+'2day','YYYYMMDD') || ''')
tablespace tbs_cbs;';
EXECUTE 'ALTER TABLE '|| cpvmm_custi_m || '_' || to_char(now()+'1day','YYYYMMDD') || ' ADD CONSTRAINT ' || cpvmm_custi_m || '_' || to_char(now()+'1day','YYYYMMDD') || '_un' || ' UNIQUE (reg_dt, card_no, card_sttl_acno);';
EXECUTE 'CREATE UNIQUE INDEX ix_'|| cpvmm_custi_m || '_' || to_char(now()+'1day','YYYYMMDD') || ' ON ' || cpvmm_custi_m || '_' || to_char(now()+'1day','YYYYMMDD') || ' (reg_dt, reg_seqno, cstno) tablespace tbs_cbs;';
EXECUTE 'ANALYZE '|| cpvmm_custi_m || '_' || to_char(now()+'1day','YYYYMMDD') ||';';
--execute 'DROP TABLE cpvmm_custi_m_' || to_char(now()+'-10day','YYYYMMDD')||' ;';
execute 'DROP TABLE cpvmm_custi_m_' || SELECT max(reg_dt) FROM cpvmm_custi_m +'-10day','YYYYMMDD')||' ;';
END;
$function$
;

Postgresql - Generating where not Exists condition dynamically for re-runnable insert script

I need to generate Insert script in postgres for all the tables in a database such that it can be run again without throwing any error.
The problem is, Only few tables have primary key while the rest have Unique index on different columns.
This is why I am not able to list out the columns on which unique index has been created.
The reason behind this is that the schema is automatically created through Magnolia.
Can anyone help me write the query which produces Insert statement including 'Where not Exists (Select 1 from table where column = value)' condition based on Primary Key/Unique columns?
You can use on conflict:
insert into t ( . . . )
values ( . . . )
on conflict do nothing;
This function returns Insert script for data and works well with tables on which primary constraint is not available.
I have modified the code that I found on another thread by adding the condition to it.
CREATE OR REPLACE FUNCTION public.generate_inserts(varSchema text, varTable text) RETURNS TABLE(resultado text) AS $$
DECLARE CODE TEXT;
BEGIN
CODE :=
(
SELECT
'SELECT ''INSERT INTO '
|| table_schema || '.'
|| table_name ||' ('
|| replace(replace(array_agg(column_name::text)::text,'{',''),'}','') || ') SELECT ''||'
|| replace(replace(replace(array_agg( 'quote_nullable(' || column_name::text || ')')::text,'{',''),'}',''),',',' || '','' || ')
|| ' || '' Where Not Exists (Select 1 From ' || table_name ||' Where 1 = 1 '
|| ''''
|| replace(replace(replace(replace(array_agg(' || '' and (' || column_name::text || ' = '' || quote_nullable(' || column_name::text || '),' || ' || '' or ' || column_name::text || ' is null)''')::text,'{',''),'}',''),'"',''),',','')
|| '|| '');'''
|| ' FROM ' || table_schema || '.' || table_name || ';'
FROM information_schema.columns c
WHERE table_schema = varSchema
AND table_name = varTable
GROUP BY table_schema, table_name);
RETURN QUERY
EXECUTE CODE;
END;
$$ LANGUAGE plpgsql;

How to use a WITH block with dynamic sql query

I've got a plpgsql function that needs to prepare data from 3 tables based on user input, and export the data using COPY TO. The data are road accidents, so the 3 tables are accident, casualty and vehicle, each accident links to zero or more records in the vehicle and casualty tables via an accidentid column that exists in all three tables. severity and local_authorities are input parameters (both text []).
sql_query = 'SELECT COUNT(*) FROM accident WHERE severity = ANY(' || quote_literal(severity)
|| ') AND local_auth = ANY (' || quote_literal(local_authorities) || ')';
EXECUTE sql_query INTO result_count;
IF result_count > 0 THEN
-- replace Select Count(*) With Select *
sql_query = Overlay(sql_query placing '*' from 8 for 8);
-- copy the accident data first
EXECUTE 'COPY (' || sql_query || ') TO ' || quote_literal(file_path || file_name_a) ||
' CSV';
This first bit will get the relevant accidents, so I'm now looking for the most efficient way to use the accidentid's from the first query to download the related vehicle and casualty data.
I thought I'd be able to use a WITH block like this:
-- replace * with accidentid
sql_query = Overlay(sql_query placing 'accidentid' from 8 for 1);
WITH acc_ids AS (sql_query)
EXECUTE 'COPY (SELECT * FROM vehicle WHERE accidentid IN (SELECT accidentid FROM
acc_ids)) TO ' || out_path_and_vfilename || ' CSV';
EXECUTE 'COPY (SELECT * FROM casualty WHERE accidentid IN (SELECT accidentid FROM
acc_ids)) TO ' || out_path_and_cfilename || ' CSV';
but get an error:
ERROR: syntax error at or near "$1"
LINE 1: WITH acc_ids AS ( $1 ) EXECUTE 'COPY (SELECT * FROM accident....
I have tried the above in a non-dynamic test case e.g.
WITH acc_ids AS (
SELECT accidentid FROM accident
WHERE severity = ANY ('{3,2}')
AND local_auth = ANY ('{E09000001,E09000002}')
)
SELECT * FROM vehicle
WHERE accidentid IN (
SELECT accidentid FROM acc_ids);
which works. Unfortunately the server is still running Postgres 8.4 so I can't use format() for the time being.
Perhaps this isn't possible with a WITH block, but I hope it at least illustrates what I'm trying to achieve.
Edit/Update
The main goal is to get the relevant data from the 3 tables in 3 separate csv files, ideally without having to run the selection on the accident table 3 times
If you want to run a query (part) that is stored in a string variable, you need a dynamic query like
EXECUTE 'WITH acc_ids AS (' || sql_query || ')'
'SELECT ... ';
Either the whole query is a string executed by EXECUTE, or the whole query is static SQL. You cannot mix them.
Do you need a CTE? If you can express the query as a join, the optimizer has more options.
This does what I need to do without CTE but I can't see this being the most efficient way of solving this since I have to perform the same query on the accident table 3 times:
sql_query = sql_query || which_tab || ' WHERE severity = ANY ('||
quote_literal(severity) ||') AND ' || date_start || ' AND ' ||
date_end || ' AND local_auth = ANY (' ||
quote_literal(local_authorities) || ')';
-- replace * with COUNT(*)
sql_query = Overlay(sql_query placing 'COUNT(*)' from 8 for 1);
EXECUTE sql_query INTO result_count;
IF result_count > 0 THEN
-- replace COUNT(*) with *
sql_query = Overlay(sql_query placing '*' from 8 for 8);
-- copy the accident data first
EXECUTE 'COPY (' || sql_query || ') TO ' || quote_literal(file_path ||
file_name_a) || ' CSV';
sql_query = Overlay(sql_query placing 'accidentid' from 8 for 1);
-- vehicles
EXECUTE 'COPY (SELECT * FROM vehicle WHERE accidentid IN (
SELECT accidentid FROM accident
WHERE severity = ANY (' || quote_literal(severity) || ')
AND local_auth = ANY (' || quote_literal(local_authorities) ||')))
TO ' || quote_literal(file_path || file_name_v) || ' CSV';
-- casualties
EXECUTE 'COPY (SELECT * FROM casualty WHERE accidentid IN (
SELECT accidentid FROM accident
WHERE severity = ANY (' || quote_literal(severity) || ')
AND local_auth = ANY (' || quote_literal(local_authorities) ||')))
TO ' || quote_literal(file_path || file_name_c) || ' CSV';
END IF;

db2 issue selecting dynamically from an unknown table

I need a store procedure that loop in a table that returns a name of a table in the db2 and depending from that name i need to do a select statement from the named table. i have tried doing it with an 'execute immediate' in so many ways that a lost the count here is an example of the execute immediate:
set insertstring = 'INSERT INTO pribpm.TEMP_T_TOQUE_CICLO (idSemana,tiempo_ciclo,tiempo_toque)
SELECT to_number(to_char( '''|| ' time_stamp ' ||''' ,' || ' IW ' || ')) ,SUM(KPITOTALTIMECLOCK),SUM(s.KPIEXECUTIONTIMECLOCK) FROM ' || TABLA || ' where to_number(to_char( '''|| ' time_stamp ' ||''' ,' || ' IW ' || ')) between ' || (to_number(to_char(FECHA,'IW'))-3) || ' and ' || to_number(to_char(FECHA,'IW')) || ' GROUP BY to_number(to_char('''|| ' time_stamp ' ||''' ,' || ' IW ' || '))';
PREPARE stmt FROM insertstring;
EXECUTE IMMEDIATE insertstring;
where tabla is a string that contains the name of the table and fecha is a date in timestamp type
besides i've tried it with cursors like this
set select_ = 'SELECT time_stamp, KPITOTALTIMECLOCK, KPIEXECUTIONTIMECLOCK FROM ' || tabla;
PREPARE stmt FROM select_;
FOR v2 AS
c2 cursor for
execute select_
do
if to_number(to_char(time_stamp,'IW')) between
(to_number(to_char(fecha,'IW'))-3) and to_number(to_char(fecha,'IW')) then
--something here
end if;
END FOR;
but with no success.
may you or may someone please help me clear my error or giving some other idea about this im trying to do?
all this in db2 environment
Write a procedure and loop tables from SYSCAT.TABLES to get the table name and again loop to fire a select query for each and every table.
I am not 100% sure as it has been a long time I worked on db2