How to move all timestamptz dates on the postgresql database? - postgresql

I have a postgresql dump of some seed database. This dump was created few months ago so all data is there are about the past. It is not very convenient to develop on the past data because I have to always scroll in UI to that past date.
I was thinking to automatically shift every timestamptz field in the database by specific offset. It sounds doable via some script which will go throw database schema, find every timestamptz field, and then build a SQL update for every field.
So, are there any ready-made solutions for this?

I solved it using this SQL query:
--
-- This SQL query shift all timestamptz fields in the database
--
--
BEGIN;
DO $$
declare
sql_query text;
table_row record;
column_row record;
trigger_row record;
BEGIN
FOR table_row IN (
SELECT table_schema, table_name
FROM information_schema.tables
WHERE table_type = 'BASE TABLE' AND table_schema = 'public'
) LOOP
sql_query := '';
RAISE NOTICE 'Checking %', table_row.table_name;
FOR column_row IN (
SELECT column_name
FROM information_schema.columns
WHERE
table_schema = table_row.table_schema
AND table_name = table_row.table_name
AND udt_name = 'timestamptz'
AND is_updatable = 'YES'
) LOOP
sql_query := sql_query ||
'"' || column_row.column_name || '" = "' || column_row.column_name || '" + interval ''100'' day,';
END LOOP;
IF sql_query != '' THEN
sql_query := substr(sql_query,1, length(sql_query)-1); -- Remove last ","
sql_query := 'UPDATE ' || table_row.table_schema || '.' || table_row.table_name || ' SET ' || sql_query || ';';
-- There might be some triggers which so let's disable them before update
FOR trigger_row IN (
SELECT trigger_name FROM information_schema.triggers WHERE
trigger_schema = table_row.table_schema
AND event_object_table = table_row.table_name
AND event_manipulation = 'UPDATE' and
(action_timing = 'BEFORE' or action_timing = 'AFTER')
) LOOP
sql_query := 'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' disable trigger ' || trigger_row.trigger_name || ';' ||
sql_query ||
'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' enable trigger ' || trigger_row.trigger_name || ';';
END LOOP;
-- Same for the row level security, disable it if it was enabled
IF (SELECT pg_class.oid FROM pg_class
LEFT JOIN pg_catalog.pg_namespace ON pg_catalog.pg_namespace.oid = pg_class.relnamespace
WHERE relname = table_row.table_name AND
pg_catalog.pg_namespace.nspname = table_row.table_schema AND relrowsecurity) IS NOT NULL THEN
sql_query := 'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' disable row level security;' ||
sql_query ||
'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' enable row level security;';
END IF;
RAISE NOTICE ' %', sql_query;
EXECUTE sql_query;
RAISE NOTICE '---------------------------';
END IF;
END LOOP;
END$$;
COMMIT;

Just add things to the database and then update it with this query, change the column name, table name and the amount of days you want it incremented by
UPDATE table_name
SET timestamptz = timestamptz + interval '1' day
WHERE 1 = 1;

Related

DB2 dinamic search

I need to create search (like '%-99' or like '%NON%')---there for all tables in SYS in DB2 in all columns --below code
---please for help--this is test code---but I have DB2 Database Error: ERROR [42703] [IBM][DB2/LINUXX8664] SQL0206N "WHERE" is not valid in the context where it is used.
DECLARE GLOBAL TEMPORARY TABLE SESSION.TEMP_DINAMIC_TEST_SEARCH
(
TABSET VARCHAR(128)
, TABSCHEMA VARCHAR(128)
, TABNAME VARCHAR(128)
, COLUMN_NAME VARCHAR(128)
, ROW_COUNT BIGINT
)
ON COMMIT PRESERVE ROWS NOT LOGGED
;
BEGIN
FOR C AS CUR CURSOR WITH HOLD FOR
SELECT 'INSERT INTO SESSION.TEMP_DINAMIC_TEST_SEARCH SELECT '''
||''' ,''' || TABLE_SCHEM || ''',''' || TABLE_NAME || ''',''' || COLUMN_NAME || ''', COUNT( DISTINCT(' || COLUMN_NAME || ')) FROM '
|| '"' || TABLE_SCHEM || '"."' || TABLE_NAME|| '"' ||WHERE|| '"' || COLUMN_NAME || '"' || LIKE || '"' ||'%-99'|| '"' || AS S
FROM SYSIBM.SQLCOLUMNS
WHERE TABLE_SCHEM = 'DWD' AND TABLE_NAME LIKE 'T_X_%'
WITH UR
DO
EXECUTE IMMEDIATE C.S;
COMMIT;
END FOR;
END
Wrong expression is used.
Try this:
SELECT
'INSERT INTO SESSION.TEMP_DINAMIC_TEST_SEARCH SELECT '''
|| ''' ,''' || TABLE_SCHEM || ''',''' || TABLE_NAME || ''',''' || COLUMN_NAME || ''', COUNT( DISTINCT(' || COLUMN_NAME || ')) FROM '
|| '"' || TABLE_SCHEM || '"."' || TABLE_NAME|| '" WHERE "' || COLUMN_NAME || '" LIKE ''%-99''' AS S
FROM SYSIBM.SQLCOLUMNS
WHERE TABLE_SCHEM = 'DWD' AND TABLE_NAME LIKE 'T_X_%'
/*
AND
(
TYPE_NAME LIKE '%CHAR'
OR TYPE_NAME LIKE '%GRAPHIC'
OR TYPE_NAME LIKE '%CLOB'
)
*/
WITH UR
BTW,
It's worth to generate the INSERT SELECT statements for string columns only as shown in the commented out lines.
Try not to use tables in the SYSIBM schema in Db2 for LUW - they are not documented and their content may be changed with some new release / fixpack without any notice. Use SYSCAT views instead - the SYSCAT.COLUMNS view in your case.

Update Null columns to Zero dynamically in Redshift

Here is the code in SAS, It finds the numeric columns with blank and replace with 0's
DATA dummy_table;
SET dummy_table;
ARRAY DUMMY _NUMERIC_;
DO OVER DUMMY;
IF DUMMY=. THEN DUMMY=0;
END;
RUN;
I am trying to replicate this in Redshift, here is what I tried
create or replace procedure sp_replace_null_to_zero(IN tbl_nm varchar) as $$
Begin
Execute 'declare ' ||
'tot_cnt int := (select count(*) from information_schema.columns where table_name = ' || tbl_nm || ');' ||
'init_loop int := 0; ' ||
'cn_nm varchar; '
Begin
While init_loop <= tot_cnt
Loop
Raise info 'init_loop = %', Init_loop;
Raise info 'tot_cnt = %', tot_cnt;
Execute 'Select column_name into cn_nm from information_schema.columns ' ||
'where table_name ='|| tbl_nm || ' and ordinal_position = init_loop ' ||
'and data_type not in (''character varying'',''date'',''text''); '
Raise info 'cn_nm = %', cn_nm;
if cn_nm is not null then
Execute 'Update ' || tbl_nm ||
'Set ' || cn_nm = 0 ||
'Where ' || cn_nm is null or cn_nm =' ';
end if;
init_loop = init_loop + 1;
end loop;
End;
End;
$$ language plpgsql;
Issues I am facing
When I pass the Input parameter here, I am getting 0 count
tot_cnt int := (select count(*) from information_schema.columns where table_name = ' || tbl_nm || ');'
For testing purpose I tried hardcode the table name inside proc, I am getting the error amazon invalid operation: value for domain information_schema.cardinal_number violates check constraint "cardinal_number_domain_check"
Is this even possible in redshift, How can I do this logic or any other workaround.
Need Expertise advise here!!
You can simply run an UPDATE over the table(s) using the NVL(cn_nm,0) function
UPDATE tbl_raw
SET col2 = NVL(col2,0);
However UPDATE is a fairly expensive operation. Consider just using a view over your table that wraps the columns in NVL(cn_nm,0)
CREATE VIEW tbl_clean
AS
SELECT col1
, NVL(col2,0) col2
FROM tbl_raw;

Postgresql - Generating where not Exists condition dynamically for re-runnable insert script

I need to generate Insert script in postgres for all the tables in a database such that it can be run again without throwing any error.
The problem is, Only few tables have primary key while the rest have Unique index on different columns.
This is why I am not able to list out the columns on which unique index has been created.
The reason behind this is that the schema is automatically created through Magnolia.
Can anyone help me write the query which produces Insert statement including 'Where not Exists (Select 1 from table where column = value)' condition based on Primary Key/Unique columns?
You can use on conflict:
insert into t ( . . . )
values ( . . . )
on conflict do nothing;
This function returns Insert script for data and works well with tables on which primary constraint is not available.
I have modified the code that I found on another thread by adding the condition to it.
CREATE OR REPLACE FUNCTION public.generate_inserts(varSchema text, varTable text) RETURNS TABLE(resultado text) AS $$
DECLARE CODE TEXT;
BEGIN
CODE :=
(
SELECT
'SELECT ''INSERT INTO '
|| table_schema || '.'
|| table_name ||' ('
|| replace(replace(array_agg(column_name::text)::text,'{',''),'}','') || ') SELECT ''||'
|| replace(replace(replace(array_agg( 'quote_nullable(' || column_name::text || ')')::text,'{',''),'}',''),',',' || '','' || ')
|| ' || '' Where Not Exists (Select 1 From ' || table_name ||' Where 1 = 1 '
|| ''''
|| replace(replace(replace(replace(array_agg(' || '' and (' || column_name::text || ' = '' || quote_nullable(' || column_name::text || '),' || ' || '' or ' || column_name::text || ' is null)''')::text,'{',''),'}',''),'"',''),',','')
|| '|| '');'''
|| ' FROM ' || table_schema || '.' || table_name || ';'
FROM information_schema.columns c
WHERE table_schema = varSchema
AND table_name = varTable
GROUP BY table_schema, table_name);
RETURN QUERY
EXECUTE CODE;
END;
$$ LANGUAGE plpgsql;

Why can 'create table' in Postgres take several seconds?

In my project we have to sometimes copy all the data from one schema into another. I automated this by simple truncate / insert into select * script, but sooner realized that this way is not tolerant to changes in the source schema (adding/deleteing tables required modifying the script). So today I decided to change it to PL/PGSQL script which creates tables and copies data using dynamic queries. My first implementation was something like this:
do
$$
declare
source_schema text := 'source_schema';
dest_schema text := 'dest_schema';
obj_name text;
source_table text;
dest_table text;
alter_columns text;
begin
for dest_table in
select table_schema || '.' || table_name
from information_schema.tables
where table_schema = dest_schema
order by table_name
loop
execute 'drop table ' || dest_table;
end loop;
raise notice 'Data cleared';
for obj_name in
select table_name
from information_schema.tables
where table_schema = source_schema
order by table_name
loop
source_table := source_schema || '.' || obj_name;
dest_table := dest_schema || '.' || obj_name;
execute 'create unlogged table ' || dest_table
|| ' (like ' || source_table || ' including comments)';
alter_columns := (
select string_agg('alter column ' || column_name || ' drop not null', ', ')
from information_schema.columns
where table_schema = dest_schema and table_name = obj_name
and is_nullable = 'NO');
if alter_columns is not null then
execute 'alter table ' || dest_table || ' ' || alter_columns;
end if;
execute 'insert into ' || dest_table || ' select * from ' || source_table;
raise notice '% done', obj_name;
end loop;
end;
$$
language plpgsql;
As destination schema is read only, I create it without constrants to reach maximum performance. I don't think that NOT NULL constraints is a big deal, but I decided to leave everything here as it was.
This solution worked perfectly but I noticed that it was taking longer time to copy data in comparison to static script. Not dramatically, but steadily it took 20-30 seconds longer than static script.
I decided to investigate it. My first step was to comment insert into select * statement to find out what time takes everything else. It shown that it takes only half a second to clear and recreate all tables. My clue was that INSERT statements somehow work longer in procedureal context.
Then I added measuring of the execution time:
ts := clock_timestamp();
execute 'insert into ...';
raise notice 'obj_name: %', clock_timestamp() - ts;
Also I performed the old static script with \timing in psql. But this shown that my assumption was wrong. All insert statements took more or less the same time, predominantly even faster in dynamic script (I suppose it was due to autocommit and network roundtrips after each statement in psql). However the overal time of dynamic script was again longer than time of static script.
Mysticism?
Then I added very verbose logging with timestamps like this:
raise notice '%: %', clock_timestamp()::timestamp(3), 'label';
I discovered that sometimes create table executes immediately, but sometimes it takes several seconds to finish. OK, but how come all these statements for all tables took just milliseconds to complete in my first experiment?
Then I basically split one loop into two: first one creates all the tables (and we now know it takes just milliseconds) and the second one only inserts data:
do
$$
declare
source_schema text := 'onto_oper';
dest_schema text := 'onto';
obj_name text;
source_table text;
dest_table text;
alter_columns text;
begin
raise notice 'Clearing data...';
for dest_table in
select table_schema || '.' || table_name
from information_schema.tables
where table_schema = dest_schema
order by table_name
loop
execute 'drop table ' || dest_table;
end loop;
raise notice 'Data cleared';
for obj_name in
select table_name
from information_schema.tables
where table_schema = source_schema
order by table_name
loop
source_table := source_schema || '.' || obj_name;
dest_table := dest_schema || '.' || obj_name;
execute 'create unlogged table ' || dest_table
|| ' (like ' || source_table || ' including comments)';
alter_columns := (
select string_agg('alter column ' || column_name || ' drop not null', ', ')
from information_schema.columns
where table_schema = dest_schema and table_name = obj_name
and is_nullable = 'NO');
if alter_columns is not null then
execute 'alter table ' || dest_table || ' ' || alter_columns;
end if;
end loop;
raise notice 'All tables created';
for obj_name in
select table_name
from information_schema.tables
where table_schema = source_schema
order by table_name
loop
source_table := source_schema || '.' || obj_name;
dest_table := dest_schema || '.' || obj_name;
execute 'insert into ' || dest_table || ' select * from ' || source_table;
raise notice '% done', obj_name;
end loop;
end;
$$
language plpgsql;
Surprisingly it fixed everything! This version works faster than the old static script!
We are coming to very weird conclusion: create table after inserts sometime may take long time. This is very frustrating. Despite the fact I solved my problem I don't understand why it happens. Does anybody have any idea?

How do I delete the data from all my tables in ORACLE 10g

I have an ORACLE schema containing hundreds of tables. I would like to delete the data from all the tables (but don't want to DROP the tables).
Is there an easy way to do this or do I have to write an SQL script that retrieves all the table names and runs the TRUNCATE command on each ?
I would like to delete the data using commands in an SQL-Plus session.
If you have any referential integrity constraints (foreign keys) then truncate won't work; you cannot truncate the parent table if any child tables exist, even if the children are empty.
The following PL/SQL should (it's untested, but I've run similar code in the past) iterate over the tables, disabling all the foreign keys, truncating them, then re-enabling all the foreign keys. If a table in another schema has an RI constraint against your table, this script will fail.
set serveroutput on size unlimited
declare
l_sql varchar2(2000);
l_debug number := 1; -- will output results if non-zero
-- will execute sql if 0
l_drop_user varchar2(30) := '' -- set the user whose tables you're dropping
begin
for i in (select table_name, constraint_name from dba_constraints
where owner = l_drop_user
and constraint_type = 'R'
and status = 'ENABLED')
loop
l_sql := 'alter table ' || l_drop_user || '.' || i.table_name ||
' disable constraint ' || i.constraint_name;
if l_debug = 0 then
execute immediate l_sql;
else
dbms_output.put_line(l_sql);
end if;
end loop;
for i in (select table_name from dba_tables
where owner = l_drop_user
minus
select view_name from dba_views
where owner = l_drop_user)
loop
l_sql := 'truncate table ' || l_drop_user || '.' || i.table_name ;
if l_debug = 0 then
execute immediate l_sql;
else
dbms_output.put_line(l_sql);
end if;
end loop;
for i in (select table_name, constraint_name from dba_constraints
where owner = l_drop_user
and constraint_type = 'R'
and status = 'DISABLED')
loop
l_sql := 'alter table ' || l_drop_user || '.' || i.table_name ||
' enable constraint ' || i.constraint_name;
if l_debug = 0 then
execute immediate l_sql;
else
dbms_output.put_line(l_sql);
end if;
end loop;
end;
/
Probably the easiest way is to export the schema without data, then drop an re-import it.
I was looking at this too.
Seems like you do need to go through all the table names.
Have you seen this? Seems to do the trick.
I had to do this recently and wrote a stored procedure which you can run via: exec sp_truncate;. Most of the code is based off this: answer on disabling constraints
CREATE OR REPLACE PROCEDURE sp_truncate AS
BEGIN
-- Disable all constraints
FOR c IN
(SELECT c.owner, c.table_name, c.constraint_name
FROM user_constraints c, user_tables t
WHERE c.table_name = t.table_name
AND c.status = 'ENABLED'
ORDER BY c.constraint_type DESC)
LOOP
DBMS_UTILITY.EXEC_DDL_STATEMENT('ALTER TABLE ' || c.owner || '.' || c.table_name || ' disable constraint ' || c.constraint_name);
DBMS_OUTPUT.PUT_LINE('Disabled constraints for table ' || c.table_name);
END LOOP;
-- Truncate data in all tables
FOR i IN (SELECT table_name FROM user_tables)
LOOP
EXECUTE IMMEDIATE 'TRUNCATE TABLE ' || i.table_name;
DBMS_OUTPUT.PUT_LINE('Truncated table ' || i.table_name);
END LOOP;
-- Enable all constraints
FOR c IN
(SELECT c.owner, c.table_name, c.constraint_name
FROM user_constraints c, user_tables t
WHERE c.table_name = t.table_name
AND c.status = 'DISABLED'
ORDER BY c.constraint_type)
LOOP
DBMS_UTILITY.EXEC_DDL_STATEMENT('ALTER TABLE ' || c.owner || '.' || c.table_name || ' enable constraint ' || c.constraint_name);
DBMS_OUTPUT.PUT_LINE('Enabled constraints for table ' || c.table_name);
END LOOP;
COMMIT;
END sp_truncate;
/
Putting the details from the OTN Discussion Forums: truncating multiple tables with single query thread into one SQL script gives the following which can be run in an SQL-Plus session:
SET SERVEROUTPUT ON
BEGIN
-- Disable constraints
DBMS_OUTPUT.PUT_LINE ('Disabling constraints');
FOR reg IN (SELECT uc.table_name, uc.constraint_name FROM user_constraints uc) LOOP
EXECUTE IMMEDIATE 'ALTER TABLE ' || reg.table_name || ' ' || 'DISABLE' ||
' CONSTRAINT ' || reg.constraint_name || ' CASCADE';
END LOOP;
-- Truncate tables
DBMS_OUTPUT.PUT_LINE ('Truncating tables');
FOR reg IN (SELECT table_name FROM user_tables) LOOP
EXECUTE IMMEDIATE 'TRUNCATE TABLE ' || reg.table_name;
END LOOP;
-- Enable constraints
DBMS_OUTPUT.PUT_LINE ('Enabling constraints');
FOR reg IN (SELECT uc.table_name, uc.constraint_name FROM user_constraints uc) LOOP
EXECUTE IMMEDIATE 'ALTER TABLE ' || reg.table_name || ' ' || 'ENABLE' ||
' CONSTRAINT ' || reg.constraint_name;
END LOOP;
END;
/