In postgres how can I delete all columns that share the same prefix - postgresql

I have been using the following code for dropping all tables that share the same prefix (in this case delete all tables that their name starts with 'supenh_'):
DO
$do$
DECLARE
_tbl text;
BEGIN
FOR _tbl IN
SELECT quote_ident(table_schema) || '.'
|| quote_ident(table_name) -- escape identifier and schema-qualify!
FROM information_schema.tables
WHERE table_name LIKE 'supenh_' || '%' -- your table name prefix
AND table_schema NOT LIKE 'pg_%' -- exclude system schemas
LOOP
-- RAISE NOTICE '%',
EXECUTE
'DROP TABLE ' || _tbl;
END LOOP;
END
$do$;
Is there a way to amend this code / or to use a different script in order to delete from one specific table all the columns that start with the same prefix (for example, 'patient1_') ?

You could write it as PL/pgSQL function:
CREATE OR REPLACE FUNCTION drop_columns_with_prefix(tbl_name TEXT, column_prefix TEXT) RETURNS VOID AS
$BODY$
DECLARE
_column TEXT;
BEGIN
FOR _column IN
SELECT quote_ident(column_name)
FROM information_schema.columns
WHERE table_name = tbl_name
AND column_name LIKE column_prefix || '%'
AND table_schema NOT LIKE 'pg_%'
LOOP
-- RAISE NOTICE '%',
EXECUTE
'ALTER TABLE ' || tbl_name || ' DROP COLUMN ' || _column;
END LOOP;
END
$BODY$
LANGUAGE plpgsql VOLATILE;
Call it using:
SELECT drop_columns_with_prefix('tbl_name', 'prefix_');
Or if you don't want to use it as a function:
DO
$do$
DECLARE
_column TEXT;
BEGIN
FOR _column IN
SELECT quote_ident(column_name)
FROM information_schema.columns
WHERE table_name = 'tbl_name'
AND column_name LIKE 'prefix_%'
AND table_schema NOT LIKE 'pg_%'
LOOP
-- RAISE NOTICE '%',
EXECUTE
'ALTER TABLE tbl_name DROP COLUMN ' || _column;
END LOOP;
END
$do$

Related

Function for changing a column value in all tables in one schema

I would appreciate it if you could let me know why my code doesn't do anything ? I use PostgreSQL 12.
I want to create a function to change an specific column's value among all tables in a schema.
CREATE OR REPLACE FUNCTION update_cols(_sch text,_col text, _old int, _new int)
RETURNS text AS
$func$
DECLARE
_tbl text;-- table_name
BEGIN
-- Loop over tables
FOR _tbl IN
SELECT quote_ident(table_name)
FROM information_schema.columns
WHERE table_schema = _sch -- name of schema
AND column_name = _col -- name of column
LOOP
RAISE NOTICE '%',
EXECUTE
'UPDATE ' || _tbl || ' SET _col = new_id WHERE _col = old_id';
END LOOP;
RETURN _tbl;
END
$func$ LANGUAGE plpgsql;
-- Call:
SELECT update_cols('','column_name', 10, 13);
It's working now, thanks to the help of #Adrian Klaver's comment.
CREATE OR REPLACE FUNCTION update_cols(_sch text, _old int, _new int)
RETURNS text AS
$func$
DECLARE
_tbl text;-- table_name
BEGIN
-- Loop over tables
FOR _tbl IN
SELECT quote_ident(table_name)
FROM information_schema.columns
WHERE column_name = 'col_name' and TABLE_Schema = _sch
-- name of column
LOOP
EXECUTE format('UPDATE ' ||_tbl|| ' SET col_name = $1 WHERE col_name= $2') USING _new, _old;
END LOOP;
RETURN _tbl;
END
$func$ LANGUAGE plpgsql;

How to execute query statements generated in pgAdmin4?

I have the following query which generates a list of ALTER TABLE statements in the 'Data Output' field in pgAdmin4. I can copy & paste them back into the query pane and execute them there.
select 'ALTER TABLE ' || table_name || ' OWNER TO myuser;' from information_schema.tables where table_schema = 'public';
How can I execute the generated statements without having to copy & paste them?
You can use the function for that.
CREATE OR REPLACE FUNCTION change_permission_table()
RETURNS VOID AS $$
DECLARE
rec text;
BEGIN
FOR rec IN SELECT 'ALTER TABLE ' || table_name || ' OWNER TO maz;' FROM information_schema.tables WHERE table_schema = 'public'
LOOP
EXECUTE rec;
END LOOP;
END;
$$ LANGUAGE plpgsql;
-- Run the function to change the permission
SELECT change_permission_table()

Why can 'create table' in Postgres take several seconds?

In my project we have to sometimes copy all the data from one schema into another. I automated this by simple truncate / insert into select * script, but sooner realized that this way is not tolerant to changes in the source schema (adding/deleteing tables required modifying the script). So today I decided to change it to PL/PGSQL script which creates tables and copies data using dynamic queries. My first implementation was something like this:
do
$$
declare
source_schema text := 'source_schema';
dest_schema text := 'dest_schema';
obj_name text;
source_table text;
dest_table text;
alter_columns text;
begin
for dest_table in
select table_schema || '.' || table_name
from information_schema.tables
where table_schema = dest_schema
order by table_name
loop
execute 'drop table ' || dest_table;
end loop;
raise notice 'Data cleared';
for obj_name in
select table_name
from information_schema.tables
where table_schema = source_schema
order by table_name
loop
source_table := source_schema || '.' || obj_name;
dest_table := dest_schema || '.' || obj_name;
execute 'create unlogged table ' || dest_table
|| ' (like ' || source_table || ' including comments)';
alter_columns := (
select string_agg('alter column ' || column_name || ' drop not null', ', ')
from information_schema.columns
where table_schema = dest_schema and table_name = obj_name
and is_nullable = 'NO');
if alter_columns is not null then
execute 'alter table ' || dest_table || ' ' || alter_columns;
end if;
execute 'insert into ' || dest_table || ' select * from ' || source_table;
raise notice '% done', obj_name;
end loop;
end;
$$
language plpgsql;
As destination schema is read only, I create it without constrants to reach maximum performance. I don't think that NOT NULL constraints is a big deal, but I decided to leave everything here as it was.
This solution worked perfectly but I noticed that it was taking longer time to copy data in comparison to static script. Not dramatically, but steadily it took 20-30 seconds longer than static script.
I decided to investigate it. My first step was to comment insert into select * statement to find out what time takes everything else. It shown that it takes only half a second to clear and recreate all tables. My clue was that INSERT statements somehow work longer in procedureal context.
Then I added measuring of the execution time:
ts := clock_timestamp();
execute 'insert into ...';
raise notice 'obj_name: %', clock_timestamp() - ts;
Also I performed the old static script with \timing in psql. But this shown that my assumption was wrong. All insert statements took more or less the same time, predominantly even faster in dynamic script (I suppose it was due to autocommit and network roundtrips after each statement in psql). However the overal time of dynamic script was again longer than time of static script.
Mysticism?
Then I added very verbose logging with timestamps like this:
raise notice '%: %', clock_timestamp()::timestamp(3), 'label';
I discovered that sometimes create table executes immediately, but sometimes it takes several seconds to finish. OK, but how come all these statements for all tables took just milliseconds to complete in my first experiment?
Then I basically split one loop into two: first one creates all the tables (and we now know it takes just milliseconds) and the second one only inserts data:
do
$$
declare
source_schema text := 'onto_oper';
dest_schema text := 'onto';
obj_name text;
source_table text;
dest_table text;
alter_columns text;
begin
raise notice 'Clearing data...';
for dest_table in
select table_schema || '.' || table_name
from information_schema.tables
where table_schema = dest_schema
order by table_name
loop
execute 'drop table ' || dest_table;
end loop;
raise notice 'Data cleared';
for obj_name in
select table_name
from information_schema.tables
where table_schema = source_schema
order by table_name
loop
source_table := source_schema || '.' || obj_name;
dest_table := dest_schema || '.' || obj_name;
execute 'create unlogged table ' || dest_table
|| ' (like ' || source_table || ' including comments)';
alter_columns := (
select string_agg('alter column ' || column_name || ' drop not null', ', ')
from information_schema.columns
where table_schema = dest_schema and table_name = obj_name
and is_nullable = 'NO');
if alter_columns is not null then
execute 'alter table ' || dest_table || ' ' || alter_columns;
end if;
end loop;
raise notice 'All tables created';
for obj_name in
select table_name
from information_schema.tables
where table_schema = source_schema
order by table_name
loop
source_table := source_schema || '.' || obj_name;
dest_table := dest_schema || '.' || obj_name;
execute 'insert into ' || dest_table || ' select * from ' || source_table;
raise notice '% done', obj_name;
end loop;
end;
$$
language plpgsql;
Surprisingly it fixed everything! This version works faster than the old static script!
We are coming to very weird conclusion: create table after inserts sometime may take long time. This is very frustrating. Despite the fact I solved my problem I don't understand why it happens. Does anybody have any idea?

Simple PostgreSQL plpgsql to create a new table using existing table

I'm new to plpgsql. I'm sure there is some really simple way to do this, but for some reason I'm having a lot of trouble trying to figure out how to do this.
I'm simply trying to loop through the list of existing tables and execute
CREATE TABLE z_existing_table_name AS SELECT * FROM existing_table_name WITH DATA
So far, I have this:
CREATE OR REPLACE FUNCTION create_backup_row()
RETURNS RECORD
AS $$
DECLARE
row RECORD;
BEGIN
FOR row IN SELECT * FROM information_schema.tables WHERE table_catalog = 'my_db' and table_schema = 'public'
LOOP
EXECUTE 'CREATE TABLE z_' || t.table_name || ' as ' || t.table_name
END LOOP;
END;
$$ LANGUAGE plpgsql;
It would be an added bonus if I can make this function re-runnable. Something like drop table if exist then create table ...
#Steven, use below procedure,
-- Function: create_backup_row()
-- DROP FUNCTION create_backup_row();
CREATE OR REPLACE FUNCTION create_backup_row()
RETURNS integer AS
$BODY$
DECLARE
v_table text;
BEGIN
FOR v_table IN
SELECT table_name
FROM information_schema.tables
WHERE table_catalog = 'my_db'
AND table_schema = 'public'
AND table_name not ilike '%z_%' -- to skip the table with z_ when we rerun it.
LOOP
EXECUTE ' DROP TABLE IF EXISTS z_' || v_table ;
EXECUTE 'CREATE TABLE z_' || v_table || ' as SELECT * FROM ' || v_table ;
END LOOP;
return 1;
END;
$BODY$
LANGUAGE plpgsql VOLATILE
COST 100;
ALTER FUNCTION create_backup_row()
OWNER TO postgres;

How to change schema of multiple PostgreSQL tables in one operation?

I have a PostgreSQL 9.1 database with 100 or so tables that were loaded into the 'public' schema. I would like to move those tables (but not all of the functions in 'public') to a 'data' schema.
I know that I can use the following to move 1 table at a time.
ALTER TABLE [tablename] SET SCHEMA [new_schema]
Is it possible to move all of the tables to the new schema in one operation? If so, what would be the most efficient way to accomplish this task?
DO will do the trick:
DO
$$
DECLARE
row record;
BEGIN
FOR row IN SELECT tablename FROM pg_tables WHERE schemaname = 'public' -- and other conditions, if needed
LOOP
EXECUTE format('ALTER TABLE public.%I SET SCHEMA [new_schema];', row.tablename);
END LOOP;
END;
$$;
-- ####### USING DBEAVER WHICH SUPPORT VARIABLES ########
-- ### ANSWER_1 -- USING DO ###--------
-- Step1: Set variables one by one
#set _SCHEMA = 'public'
#set _COLUMN = 'dml_status'
#set _DATA_TYPE = 'integer'
#set _DEFAULT = '1'
-- Step2: Call the below procedure
DO
$$
DECLARE
row record;
query varchar;
BEGIN
FOR ROW IN SELECT table_name FROM information_schema.tables WHERE table_schema = ${_SCHEMA}
LOOP
query :='ALTER TABLE public.' || quote_ident(row.table_name) ||' ADD COLUMN IF NOT EXISTS '||${_COLUMN} || ' ' || ${_DATA_TYPE} ||' not null default ' || ${_DEFAULT} || ';' ;
execute query;
END LOOP;
END;
$$;
-- ### ANSWER_2 -- STORE PROCEDURE FN ###--------
DROP FUNCTION addColumnToMultipleTables cascade;
create or replace function addColumnToMultipleTables()
returns void
LANGUAGE 'plpgsql'
as $$
DECLARE
row record;
query varchar;
BEGIN
FOR ROW IN SELECT table_name FROM information_schema.tables WHERE table_schema = ${_SCHEMA}
LOOP
query :='ALTER TABLE public.' || quote_ident(row.table_name) ||' ADD COLUMN IF NOT EXISTS '||${_COLUMN} || ' ' || ${_DATA_TYPE} ||' not null default ' || ${_DEFAULT} || ';' ;
raise info 'query : % ', query;
execute query;
END LOOP;
END;
$$;
select addColumnToMultipleTables();