Postgresql: EXECUTE sql_cmd merge with CREATE TEMP TABLE temp_tbl AS SELECT - postgresql

Soo, here is the thing, I am using in my DB methods 2 approaches:
1.) Is compose and SQL query from various string, depending on what I need to filter out:
sql_cmd := 'SELECT count(*) FROM art_short_term_finished WHERE (entry_time <= ''' || timestamp_up || ''' AND exit_time >= ''' || timestamp_down || ''') AND ' || time_filter || ' AND entry_zone = ' || zone_parameter || ' AND park_uuid = ' || park_id_p || '';
EXECUTE sql_cmd INTO shortterm_counter;
2.) Copy part of the big table, into smaller temp table and work with it:
-- Get the data from FPL into smaller table for processing
DROP TABLE IF EXISTS temp_fpl_filtered;
CREATE TEMP TABLE temp_fpl_filtered AS SELECT car_id FROM flexcore_passing_log fpl WHERE fpl.zone_leaved = '0' AND fpl.status IN (SELECT status_id FROM fpl_ok_statuses) AND fpl.park_uuid = park_id_p AND (fpl.datetime BETWEEN row_i.start_d AND row_i.end_d);
But what If I want to mix those two?
I want to have the SELECT after CREATE TEMP TABLE temp_fpl_filtered AS to have different WHERE clauses depending on input parameters of stored procedure, without having to write the same statement xy times in one stored procedure.
But my approach:
-- art class is shortterm, check shortterm history
IF art_class_p = 1 OR article_p = 0 THEN
-- create temporary table derivated from shortterm history
IF article_p = 0 THEN
article_p_filter := '';
ELSE
article_p_filter := ' AND article_id = ' || article_p;
END IF;
short_cmd := 'SELECT car_id, article_id, entry_time, exit_time FROM art_short_term_finished WHERE zone_leaved = ''0'' AND status IN (SELECT status_id FROM fpl_ok_statuses) ''' || article_p_filter || ''' AND park_uuid = ''' || park_id_p || ''' AND (entry_time <= ''' || timestamp_up || ''' AND exit_time >= ''' || timestamp_down || ''')';
DROP TABLE IF EXISTS temp_short_full;
CREATE TEMP TABLE temp_short_full AS short_cmd;
--EXECUTE sql_cmd INTO shortterm_counter;
END IF;
throws me an exception when I try to inser stored procedure:
psql:report_parking_average.sql:107: ERROR: syntax error at or near "short_cmd"
LINE 50: CREATE TEMP TABLE temp_fpl_filtered AS short_cmd;
^
Also, the another try:
EXECUTE short_cmd INTO TEMP TABLE temp_short_full;
is not working..

You need to include the CREATE TABLE part into the SQL you generate:
short_cmd := 'CREATE TEMP TABLE temp_short_full AS SELECT car_id, article_id, entry_time, exit_time FROM art_short_term_finished WHERE zone_leaved = ''0'' AND status IN (SELECT status_id FROM fpl_ok_statuses) ''' || article_p_filter || ''' AND park_uuid = ''' || park_id_p || ''' AND (entry_time <= ''' || timestamp_up || ''' AND exit_time >= ''' || timestamp_down || ''')';
DROP TABLE IF EXISTS temp_short_full;
execute short_cmd;

Related

How to move all timestamptz dates on the postgresql database?

I have a postgresql dump of some seed database. This dump was created few months ago so all data is there are about the past. It is not very convenient to develop on the past data because I have to always scroll in UI to that past date.
I was thinking to automatically shift every timestamptz field in the database by specific offset. It sounds doable via some script which will go throw database schema, find every timestamptz field, and then build a SQL update for every field.
So, are there any ready-made solutions for this?
I solved it using this SQL query:
--
-- This SQL query shift all timestamptz fields in the database
--
--
BEGIN;
DO $$
declare
sql_query text;
table_row record;
column_row record;
trigger_row record;
BEGIN
FOR table_row IN (
SELECT table_schema, table_name
FROM information_schema.tables
WHERE table_type = 'BASE TABLE' AND table_schema = 'public'
) LOOP
sql_query := '';
RAISE NOTICE 'Checking %', table_row.table_name;
FOR column_row IN (
SELECT column_name
FROM information_schema.columns
WHERE
table_schema = table_row.table_schema
AND table_name = table_row.table_name
AND udt_name = 'timestamptz'
AND is_updatable = 'YES'
) LOOP
sql_query := sql_query ||
'"' || column_row.column_name || '" = "' || column_row.column_name || '" + interval ''100'' day,';
END LOOP;
IF sql_query != '' THEN
sql_query := substr(sql_query,1, length(sql_query)-1); -- Remove last ","
sql_query := 'UPDATE ' || table_row.table_schema || '.' || table_row.table_name || ' SET ' || sql_query || ';';
-- There might be some triggers which so let's disable them before update
FOR trigger_row IN (
SELECT trigger_name FROM information_schema.triggers WHERE
trigger_schema = table_row.table_schema
AND event_object_table = table_row.table_name
AND event_manipulation = 'UPDATE' and
(action_timing = 'BEFORE' or action_timing = 'AFTER')
) LOOP
sql_query := 'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' disable trigger ' || trigger_row.trigger_name || ';' ||
sql_query ||
'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' enable trigger ' || trigger_row.trigger_name || ';';
END LOOP;
-- Same for the row level security, disable it if it was enabled
IF (SELECT pg_class.oid FROM pg_class
LEFT JOIN pg_catalog.pg_namespace ON pg_catalog.pg_namespace.oid = pg_class.relnamespace
WHERE relname = table_row.table_name AND
pg_catalog.pg_namespace.nspname = table_row.table_schema AND relrowsecurity) IS NOT NULL THEN
sql_query := 'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' disable row level security;' ||
sql_query ||
'alter table ' || table_row.table_schema || '.' || table_row.table_name ||
' enable row level security;';
END IF;
RAISE NOTICE ' %', sql_query;
EXECUTE sql_query;
RAISE NOTICE '---------------------------';
END IF;
END LOOP;
END$$;
COMMIT;
Just add things to the database and then update it with this query, change the column name, table name and the amount of days you want it incremented by
UPDATE table_name
SET timestamptz = timestamptz + interval '1' day
WHERE 1 = 1;

Getting A "Could Not Open Relation" Error On Simple Query

I have a function that creates a set of INSERT INTO ... VALUES scripts. If I uncomment the dvp.content line, the function fails with an "ERROR: could not open relation with OID ###", which refers to the temp table. The content column is a jsonb type. Not sure where to begin?
CREATE OR REPLACE FUNCTION export_docs_as_sql(doc_list uuid[], to_org_id uuid)
RETURNS table(id integer, sql text)
AS $$
BEGIN
...
-- use a temp table to gather all INSERT statements
CREATE TEMP TABLE IF NOT EXISTS doc_data_export(
id serial PRIMARY KEY,
sql text
);
...
-- get doc_version_pages
INSERT INTO doc_data_export(sql)
SELECT 'INSERT INTO doc_version_pages(id, doc_version_id, persona_id, care_category_id, patient_group_id, title, content, created_at, updated_at, is_guide, is_root) VALUES (' ||
quote_literal(dvp.id::TEXT) || ', ' ||
quote_literal(dvp.doc_version_id::TEXT) || ', ' ||
CASE WHEN p.name IS NOT NULL THEN '(SELECT px.id FROM personas px WHERE px.org_id = ' || quote_literal(dv.id::TEXT) || ' AND px.name = ' || quote_literal(p.name) || '), ' ELSE 'NULL, ' END ||
CASE WHEN c.name IS NOT NULL THEN '(SELECT cx.id FROM care_categories cx WHERE cx.org_id = ' || quote_literal(to_org_id) || ' AND cx.name = ' || quote_literal(c.name) || '), ' ELSE 'NULL, ' END ||
CASE WHEN g.name IS NOT NULL THEN '(SELECT gx.id FROM patient_groups gx WHERE gx.org_id = ' || quote_literal(to_org_id) || ' AND gx.name = ' || quote_literal(g.name) || '), ' ELSE 'NULL, ' END ||
quote_literal(dvp.title::TEXT) || ', ' ||
--dvp.content || ', ' ||
quote_literal(dvp.created_at::TEXT) || ', ' ||
quote_literal(now()::timestamp) || ', ' ||
quote_literal(dvp.is_guide::TEXT) || ', ' ||
quote_literal(dvp.is_root::TEXT) || ');'
FROM unnest(doc_list) l
INNER JOIN doc_versions dv ON l = dv.doc_id
INNER JOIN doc_version_pages dvp ON dv.id = dvp.doc_version_id
LEFT JOIN personas p ON dvp.persona_id = p.id
LEFT JOIN care_categories c ON dvp.care_category_id = c.id
LEFT JOIN patient_groups g ON dvp.patient_group_id = g.id;
...
-- output all inserts
RETURN QUERY SELECT * FROM doc_data_export;
-- drop temp table
DROP TABLE doc_data_export;
END;
$$ LANGUAGE plpgsql;
The "Could Not Open Relation" problem is occurring due to the bug described here, which remains an issue as of Postgres 14.0:
What seems to be happening is that if the strings are large enough to be
toasted, then the data returned out of the function with RETURN QUERY
contains toast pointers referencing the temp table's toast table.
If you drop the temp table then those pointers will fail upon use.
To explain further, when a column value is greater than the TOAST_TUPLE_THRESHOLD configuration parameter (usually 2KB) and cannot be compressed or when the column is configured with a storage parameter of EXTERNAL, the value will be broken down into chunks and stored in a special secondary table called a TOAST table. This table will be stored in the pg_toast schema and will be named like pg_toast.pg_toast_<table OID>.
So when you add dvp.content to the sql statement you insert that into doc_data_export, some of these values are larger than the aforementioned constraints and are thus TOASTed. Your RETURN QUERY is only sending the pointers to the values in the toast table. After the return is done, the temporary table and its corresponding TOAST table is dropped. Thus when the outer query attempts to materialize the results, it can't find the TOAST table that these pointers reference - hence the cryptic error message you see.
You can avoid sending TOAST pointers for the temporary table -and thus safely DROP it after the RETURN QUERY -by performing an operation on the sql column that returns the same value:
RETURN QUERY SELECT id, sql || '' FROM doc_data_export;
The simple function below will reproduce a minimal example of the TOAST bug when you set fail to true and demonstrate the successful workaround when you set fail to false.
DROP FUNCTION IF EXISTS buttered_toast(boolean);
CREATE OR REPLACE FUNCTION buttered_toast(fail boolean)
RETURNS table(id integer, enormous_data text)
AS $$
BEGIN
CREATE TEMPORARY TABLE tbl_with_toasts (
id integer PRIMARY KEY,
enormous_data text
) ON COMMIT DROP;
--generate a giant string that is sure to generate a TOAST table.
INSERT INTO tbl_with_toasts(id,enormous_data) SELECT 1, string_agg(gen_random_uuid()::text,'-') FROM generate_series(1,10000) as ints(int);
IF buttered_toast.fail THEN
-- will return pointers to tbl_with_toast's TOAST table for the "enormous_data" column.
RETURN QUERY SELECT tbl_with_toasts.id, tbl_with_toasts.enormous_data FROM tbl_with_toasts ;
ELSE
-- will generate and return new values for the "enormous_data" column
RETURN QUERY SELECT tbl_with_toasts.id, tbl_with_toasts.enormous_data || '' FROM tbl_with_toasts ;
END IF;
DROP TABLE tbl_with_toasts;
END;
$$ LANGUAGE plpgsql;
-- fails with "Could Not Open Relation"
select * from buttered_toast(true)
--succeeds
select * from buttered_toast(false);

Postgresql - Generating where not Exists condition dynamically for re-runnable insert script

I need to generate Insert script in postgres for all the tables in a database such that it can be run again without throwing any error.
The problem is, Only few tables have primary key while the rest have Unique index on different columns.
This is why I am not able to list out the columns on which unique index has been created.
The reason behind this is that the schema is automatically created through Magnolia.
Can anyone help me write the query which produces Insert statement including 'Where not Exists (Select 1 from table where column = value)' condition based on Primary Key/Unique columns?
You can use on conflict:
insert into t ( . . . )
values ( . . . )
on conflict do nothing;
This function returns Insert script for data and works well with tables on which primary constraint is not available.
I have modified the code that I found on another thread by adding the condition to it.
CREATE OR REPLACE FUNCTION public.generate_inserts(varSchema text, varTable text) RETURNS TABLE(resultado text) AS $$
DECLARE CODE TEXT;
BEGIN
CODE :=
(
SELECT
'SELECT ''INSERT INTO '
|| table_schema || '.'
|| table_name ||' ('
|| replace(replace(array_agg(column_name::text)::text,'{',''),'}','') || ') SELECT ''||'
|| replace(replace(replace(array_agg( 'quote_nullable(' || column_name::text || ')')::text,'{',''),'}',''),',',' || '','' || ')
|| ' || '' Where Not Exists (Select 1 From ' || table_name ||' Where 1 = 1 '
|| ''''
|| replace(replace(replace(replace(array_agg(' || '' and (' || column_name::text || ' = '' || quote_nullable(' || column_name::text || '),' || ' || '' or ' || column_name::text || ' is null)''')::text,'{',''),'}',''),'"',''),',','')
|| '|| '');'''
|| ' FROM ' || table_schema || '.' || table_name || ';'
FROM information_schema.columns c
WHERE table_schema = varSchema
AND table_name = varTable
GROUP BY table_schema, table_name);
RETURN QUERY
EXECUTE CODE;
END;
$$ LANGUAGE plpgsql;

How to use a WITH block with dynamic sql query

I've got a plpgsql function that needs to prepare data from 3 tables based on user input, and export the data using COPY TO. The data are road accidents, so the 3 tables are accident, casualty and vehicle, each accident links to zero or more records in the vehicle and casualty tables via an accidentid column that exists in all three tables. severity and local_authorities are input parameters (both text []).
sql_query = 'SELECT COUNT(*) FROM accident WHERE severity = ANY(' || quote_literal(severity)
|| ') AND local_auth = ANY (' || quote_literal(local_authorities) || ')';
EXECUTE sql_query INTO result_count;
IF result_count > 0 THEN
-- replace Select Count(*) With Select *
sql_query = Overlay(sql_query placing '*' from 8 for 8);
-- copy the accident data first
EXECUTE 'COPY (' || sql_query || ') TO ' || quote_literal(file_path || file_name_a) ||
' CSV';
This first bit will get the relevant accidents, so I'm now looking for the most efficient way to use the accidentid's from the first query to download the related vehicle and casualty data.
I thought I'd be able to use a WITH block like this:
-- replace * with accidentid
sql_query = Overlay(sql_query placing 'accidentid' from 8 for 1);
WITH acc_ids AS (sql_query)
EXECUTE 'COPY (SELECT * FROM vehicle WHERE accidentid IN (SELECT accidentid FROM
acc_ids)) TO ' || out_path_and_vfilename || ' CSV';
EXECUTE 'COPY (SELECT * FROM casualty WHERE accidentid IN (SELECT accidentid FROM
acc_ids)) TO ' || out_path_and_cfilename || ' CSV';
but get an error:
ERROR: syntax error at or near "$1"
LINE 1: WITH acc_ids AS ( $1 ) EXECUTE 'COPY (SELECT * FROM accident....
I have tried the above in a non-dynamic test case e.g.
WITH acc_ids AS (
SELECT accidentid FROM accident
WHERE severity = ANY ('{3,2}')
AND local_auth = ANY ('{E09000001,E09000002}')
)
SELECT * FROM vehicle
WHERE accidentid IN (
SELECT accidentid FROM acc_ids);
which works. Unfortunately the server is still running Postgres 8.4 so I can't use format() for the time being.
Perhaps this isn't possible with a WITH block, but I hope it at least illustrates what I'm trying to achieve.
Edit/Update
The main goal is to get the relevant data from the 3 tables in 3 separate csv files, ideally without having to run the selection on the accident table 3 times
If you want to run a query (part) that is stored in a string variable, you need a dynamic query like
EXECUTE 'WITH acc_ids AS (' || sql_query || ')'
'SELECT ... ';
Either the whole query is a string executed by EXECUTE, or the whole query is static SQL. You cannot mix them.
Do you need a CTE? If you can express the query as a join, the optimizer has more options.
This does what I need to do without CTE but I can't see this being the most efficient way of solving this since I have to perform the same query on the accident table 3 times:
sql_query = sql_query || which_tab || ' WHERE severity = ANY ('||
quote_literal(severity) ||') AND ' || date_start || ' AND ' ||
date_end || ' AND local_auth = ANY (' ||
quote_literal(local_authorities) || ')';
-- replace * with COUNT(*)
sql_query = Overlay(sql_query placing 'COUNT(*)' from 8 for 1);
EXECUTE sql_query INTO result_count;
IF result_count > 0 THEN
-- replace COUNT(*) with *
sql_query = Overlay(sql_query placing '*' from 8 for 8);
-- copy the accident data first
EXECUTE 'COPY (' || sql_query || ') TO ' || quote_literal(file_path ||
file_name_a) || ' CSV';
sql_query = Overlay(sql_query placing 'accidentid' from 8 for 1);
-- vehicles
EXECUTE 'COPY (SELECT * FROM vehicle WHERE accidentid IN (
SELECT accidentid FROM accident
WHERE severity = ANY (' || quote_literal(severity) || ')
AND local_auth = ANY (' || quote_literal(local_authorities) ||')))
TO ' || quote_literal(file_path || file_name_v) || ' CSV';
-- casualties
EXECUTE 'COPY (SELECT * FROM casualty WHERE accidentid IN (
SELECT accidentid FROM accident
WHERE severity = ANY (' || quote_literal(severity) || ')
AND local_auth = ANY (' || quote_literal(local_authorities) ||')))
TO ' || quote_literal(file_path || file_name_c) || ' CSV';
END IF;

How do I delete the data from all my tables in ORACLE 10g

I have an ORACLE schema containing hundreds of tables. I would like to delete the data from all the tables (but don't want to DROP the tables).
Is there an easy way to do this or do I have to write an SQL script that retrieves all the table names and runs the TRUNCATE command on each ?
I would like to delete the data using commands in an SQL-Plus session.
If you have any referential integrity constraints (foreign keys) then truncate won't work; you cannot truncate the parent table if any child tables exist, even if the children are empty.
The following PL/SQL should (it's untested, but I've run similar code in the past) iterate over the tables, disabling all the foreign keys, truncating them, then re-enabling all the foreign keys. If a table in another schema has an RI constraint against your table, this script will fail.
set serveroutput on size unlimited
declare
l_sql varchar2(2000);
l_debug number := 1; -- will output results if non-zero
-- will execute sql if 0
l_drop_user varchar2(30) := '' -- set the user whose tables you're dropping
begin
for i in (select table_name, constraint_name from dba_constraints
where owner = l_drop_user
and constraint_type = 'R'
and status = 'ENABLED')
loop
l_sql := 'alter table ' || l_drop_user || '.' || i.table_name ||
' disable constraint ' || i.constraint_name;
if l_debug = 0 then
execute immediate l_sql;
else
dbms_output.put_line(l_sql);
end if;
end loop;
for i in (select table_name from dba_tables
where owner = l_drop_user
minus
select view_name from dba_views
where owner = l_drop_user)
loop
l_sql := 'truncate table ' || l_drop_user || '.' || i.table_name ;
if l_debug = 0 then
execute immediate l_sql;
else
dbms_output.put_line(l_sql);
end if;
end loop;
for i in (select table_name, constraint_name from dba_constraints
where owner = l_drop_user
and constraint_type = 'R'
and status = 'DISABLED')
loop
l_sql := 'alter table ' || l_drop_user || '.' || i.table_name ||
' enable constraint ' || i.constraint_name;
if l_debug = 0 then
execute immediate l_sql;
else
dbms_output.put_line(l_sql);
end if;
end loop;
end;
/
Probably the easiest way is to export the schema without data, then drop an re-import it.
I was looking at this too.
Seems like you do need to go through all the table names.
Have you seen this? Seems to do the trick.
I had to do this recently and wrote a stored procedure which you can run via: exec sp_truncate;. Most of the code is based off this: answer on disabling constraints
CREATE OR REPLACE PROCEDURE sp_truncate AS
BEGIN
-- Disable all constraints
FOR c IN
(SELECT c.owner, c.table_name, c.constraint_name
FROM user_constraints c, user_tables t
WHERE c.table_name = t.table_name
AND c.status = 'ENABLED'
ORDER BY c.constraint_type DESC)
LOOP
DBMS_UTILITY.EXEC_DDL_STATEMENT('ALTER TABLE ' || c.owner || '.' || c.table_name || ' disable constraint ' || c.constraint_name);
DBMS_OUTPUT.PUT_LINE('Disabled constraints for table ' || c.table_name);
END LOOP;
-- Truncate data in all tables
FOR i IN (SELECT table_name FROM user_tables)
LOOP
EXECUTE IMMEDIATE 'TRUNCATE TABLE ' || i.table_name;
DBMS_OUTPUT.PUT_LINE('Truncated table ' || i.table_name);
END LOOP;
-- Enable all constraints
FOR c IN
(SELECT c.owner, c.table_name, c.constraint_name
FROM user_constraints c, user_tables t
WHERE c.table_name = t.table_name
AND c.status = 'DISABLED'
ORDER BY c.constraint_type)
LOOP
DBMS_UTILITY.EXEC_DDL_STATEMENT('ALTER TABLE ' || c.owner || '.' || c.table_name || ' enable constraint ' || c.constraint_name);
DBMS_OUTPUT.PUT_LINE('Enabled constraints for table ' || c.table_name);
END LOOP;
COMMIT;
END sp_truncate;
/
Putting the details from the OTN Discussion Forums: truncating multiple tables with single query thread into one SQL script gives the following which can be run in an SQL-Plus session:
SET SERVEROUTPUT ON
BEGIN
-- Disable constraints
DBMS_OUTPUT.PUT_LINE ('Disabling constraints');
FOR reg IN (SELECT uc.table_name, uc.constraint_name FROM user_constraints uc) LOOP
EXECUTE IMMEDIATE 'ALTER TABLE ' || reg.table_name || ' ' || 'DISABLE' ||
' CONSTRAINT ' || reg.constraint_name || ' CASCADE';
END LOOP;
-- Truncate tables
DBMS_OUTPUT.PUT_LINE ('Truncating tables');
FOR reg IN (SELECT table_name FROM user_tables) LOOP
EXECUTE IMMEDIATE 'TRUNCATE TABLE ' || reg.table_name;
END LOOP;
-- Enable constraints
DBMS_OUTPUT.PUT_LINE ('Enabling constraints');
FOR reg IN (SELECT uc.table_name, uc.constraint_name FROM user_constraints uc) LOOP
EXECUTE IMMEDIATE 'ALTER TABLE ' || reg.table_name || ' ' || 'ENABLE' ||
' CONSTRAINT ' || reg.constraint_name;
END LOOP;
END;
/