Get table options from query - postgresql

Is there a way to query the options (specifically the autovacuum_vacuum_scale_factor) set for a specific table?
Right now I'm doing \d+ <table_name> in the CLI to get them, but I'd like to do it from a query.
Thanks :)

CREATE TABLE a_a (
a int
)
WITH (
autovacuum_vacuum_scale_factor = 20
);
SELECT
relname,
relnamespace,
reloptions
FROM
pg_class
WHERE
relname = 'a_a';
you will get:
+---------+--------------+-------------------------------------+
| relname | relnamespace | reloptions |
+---------+--------------+-------------------------------------+
| a_a | 16466 | {autovacuum_vacuum_scale_factor=20} |
+---------+--------------+-------------------------------------+

Related

How to verify that column data_type are the same as foreign key data_type in Postgres?

Is there a way to check if all foreign key columns data_type are the same as the column they point to?
This code is valid and works until a user have an ID bigger than what int4 can handle.
CREATE SCHEMA test;
CREATE TABLE test.users (
id bigserial NOT NULL,
name varchar NULL,
CONSTRAINT user_pk PRIMARY KEY (id)
);
CREATE TABLE test.othertable (
blabla varchar NULL,
userid int4 NULL
);
ALTER TABLE test.othertable ADD CONSTRAINT newtable_fk FOREIGN KEY (userid) REFERENCES test.users(id);
An (incomplete) version, using the bare pg_catalogs instead of the information_schema wrapper:
SELECT version();
DROP SCHEMA test CASCADE;
CREATE SCHEMA test;
SET search_path = test;
CREATE TABLE users (
id bigserial NOT NULL CONSTRAINT user_pk PRIMARY KEY
, name varchar NULL
);
CREATE TABLE othertable (
blabla varchar NULL
, userid int4 NULL CONSTRAINT bad_fk REFERENCES users(id)
, goodid bigint NULL CONSTRAINT good_fk REFERENCES users(id)
);
PREPARE insert_two(bigint, text, text) AS
WITH one AS (
INSERT INTO users (id, name)
VALUES ( $1, $2)
RETURNING id
)
INSERT INTO othertable (userid, goodid, blabla)
SELECT id, id, $3
FROM one
;
EXECUTE insert_two(1, 'one', 'bla1' );
EXECUTE insert_two(2, 'two', 'bla2' );
EXECUTE insert_two(10000000000::bigint, 'toobig', 'bigbla' );
SELECT * FROM users;
SELECT * FROM othertable;
SET search_path = pg_catalog;
-- EXPLAIN ANALYZE
WITH cat AS ( -- Class Attribute Type
SELECT cl.oid AS coid, cl.relname
, at.attnum AS cnum, at.attname
, ty.oid AS toid, ty.typname
FROM pg_class cl
JOIN pg_attribute at ON at.attrelid = cl.oid AND at.attnum > 0 -- suppres system columns
JOIN pg_type ty ON ty.oid = at.atttypid
)
SELECT ns.nspname
, co.*
, source.relname AS source_table, source.attname AS source_column, source.typname AS source_type
, target.relname AS target_table, target.attname AS target_column, target.typname AS target_type
FROM pg_constraint co
JOIN pg_namespace ns ON co.connamespace = ns.oid
-- NOTE: this only covers single-column FKs
JOIN cat source ON source.coid = co.conrelid AND co.conkey[1] = source.cnum
JOIN cat target ON target.coid = co.confrelid AND co.confkey[1] = target.cnum
WHERE 1=1
AND co.contype = 'f'
AND ns.nspname = 'test'
-- commented out the line below, to show the differences between "good" and "bad" FK constraints.
-- AND source.toid <> target.toid
;
Rsults (look at the operators, it is a feature, not a bug!)
version
----------------------------------------------------------------------------------------------------------
PostgreSQL 11.6 on armv7l-unknown-linux-gnueabihf, compiled by gcc (Raspbian 8.3.0-6+rpi1) 8.3.0, 32-bit
(1 row)
NOTICE: drop cascades to 2 other objects
DETAIL: drop cascades to table test.users
drop cascades to table test.othertable
DROP SCHEMA
CREATE SCHEMA
SET
CREATE TABLE
CREATE TABLE
PREPARE
INSERT 0 1
INSERT 0 1
ERROR: integer out of range
id | name
----+------
1 | one
2 | two
(2 rows)
blabla | userid | goodid
--------+--------+--------
bla1 | 1 | 1
bla2 | 2 | 2
(2 rows)
SET
nspname | conname | connamespace | contype | condeferrable | condeferred | convalidated | conrelid | contypid | conindid | conparentid | confrelid | confupdtype | confdeltype | confmatchtype | conislocal | coninhcount | connoinherit | conkey | confkey | conpfeqop | conppeqop | conffeqop | conexclop | conbin | consrc | source_table | source_column | source_type | target_table | target_column | target_type
---------+---------+--------------+---------+---------------+-------------+--------------+----------+----------+----------+-------------+-----------+-------------+-------------+---------------+------------+-------------+--------------+--------+---------+-----------+-----------+-----------+-----------+--------+--------+--------------+---------------+-------------+--------------+---------------+-------------
test | good_fk | 211305 | f | f | f | t | 211317 | 0 | 211315 | 0 | 211308 | a | a | s | t | 0 | t | {3} | {1} | {410} | {410} | {410} | | | | othertable | goodid | int8 | users | id | int8
test | bad_fk | 211305 | f | f | f | t | 211317 | 0 | 211315 | 0 | 211308 | a | a | s | t | 0 | t | {2} | {1} | {416} | {410} | {96} | | | | othertable | userid | int4 | users | id | int8
(2 rows)
I made this query that check this :
select
tc.table_schema,
tc.constraint_name,
tc.table_name,
kcu.column_name,
ccu.table_schema AS foreign_table_schema,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name,
sc.data_type AS data_type,
dc.data_type AS foreign_data_type
FROM information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
AND tc.table_schema = kcu.table_schema
JOIN information_schema.columns sc ON sc.table_schema = kcu.table_schema and sc.table_name = kcu.table_name and sc.column_name = kcu.column_name
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
AND ccu.table_schema = tc.table_schema
JOIN information_schema.columns dc ON dc.table_schema = ccu.table_schema and dc.table_name = ccu.table_name and dc.column_name = ccu.column_name
WHERE tc.constraint_type = 'FOREIGN KEY'
and sc.data_type <> dc.data_type;
It is quite slow, any tips for optimisation is welcome.

Check a materialized view exists?

How do I check if a materialized view exists?
I have created one and checked in information_schema.tables and information_schema.viewsbut I cannot see it.
Where should I be looking?
Use the system catalog pg_class, e.g.:
create materialized view my_view as select 1;
select relname, relkind
from pg_class
where relname = 'my_view'
and relkind = 'm';
relname | relkind
---------+---------
my_view | m
(1 row)
or the system view pg_matviews:
select *
from pg_matviews
where matviewname = 'my_view';
schemaname | matviewname | matviewowner | tablespace | hasindexes | ispopulated | definition
------------+-------------+--------------+------------+------------+-------------+------------
public | my_view | postgres | | f | t | SELECT 1;
(1 row)

Use column values to build up query

I have a table log containing columns schema_name & table_name & object_id & data and the table can contain records with different table_names and schema_names:
| schema_name | table_name | object_id | data |
| ------------- |-------------|-------------|-------------|
| bio | sample |5 |jsonb |
| bio | location |8 |jsonb |
| ... | ... |... |jsonb |
I want to execute a query as followed:
select schema_name,
table_name,
object_id,
(select some_column from schema_name.table_name where id = object_id)
from log
PS: id is a column that exists in every table (sample, location, ...)
Is their a way in postgreSQL to use the values in columns to build up a query (so that schema_name and table_name is filled in based on the values of the columns)?

Size of temp tables created in a particular session

I created a temp table using below query
Drop table if exists tmp_a;
Create temp table tmp_a
(
id int
);
Insert into tmp_a select generate_series(1,10000);
When I queried pg_stat_activity, it is showing as "IDLE" in current_query column for the above session.
I will get the size of all temp table from pg_class table using this query.
But I want the list of temp tables created for a particular session and the size of those temp tables i.e if I created two temp tables from two different sessions then the result should be like below
procpid | temp table name | size | username
12345 | tmp_a | 20 | gpadmin
12346 | tmp_b | 30 | gpadmin
Please share the query if anyone has it
It's actually simpler than you think --
The temporary schema namesapce is the same as the session id --
So...
SELECT
a.procpid as ProcessID,
a.sess_id as SessionID,
n.nspname as SchemaName,
c.relname as RelationName,
CASE c.relkind
WHEN 'r' THEN 'table'
WHEN 'v' THEN 'view'
WHEN 'i' THEN 'index'
WHEN 'S' THEN 'sequence'
WHEN 's' THEN 'special'
END as RelationType,
pg_catalog.pg_get_userbyid(c.relowner) as RelationOwner,
pg_size_pretty(pg_relation_size(n.nspname ||'.'|| c.relname)) as RelationSize
FROM
pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
LEFT JOIN pg_catalog.pg_stat_activity a ON 'pg_temp_' || a.sess_id::varchar = n.nspname
WHERE c.relkind IN ('r','s')
AND (n.nspname !~ '^pg_toast' and nspname like 'pg_temp%')
ORDER BY pg_relation_size(n.nspname ||'.'|| c.relname) DESC;
And you get --
processid | sessionid | schemaname | relationname | relationtype | relationowner | relationsize
-----------+-----------+------------+--------------+--------------+---------------+--------------
5006 | 9 | pg_temp_9 | tmp_a | table | gpadmin | 384 kB
5006 | 9 | pg_temp_9 | tmp_b | table | gpadmin | 384 kB
(2 rows)
Let's put that process to sleep -- and startup another....
gpadmin=#
[1]+ Stopped psql
[gpadmin#gpdb-sandbox ~]$ psql
psql (8.2.15)
Type "help" for help.
gpadmin=# SELECT nspname
FROM pg_namespace
WHERE oid = pg_my_temp_schema();
nspname
---------
(0 rows)
gpadmin=# Create temp table tmp_a( id int );
NOTICE: Table doesn't have 'DISTRIBUTED BY' clause -- Using column named 'id' as the Greenplum Database data distribution key for this table.
HINT: The 'DISTRIBUTED BY' clause determines the distribution of data. Make sure column(s) chosen are the optimal data distribution key to minimize skew.
CREATE TABLE
gpadmin=# SELECT nspname
FROM pg_namespace
WHERE oid = pg_my_temp_schema();
nspname
---------
pg_temp_10
(1 row)
... run the same query ...
processid | sessionid | schemaname | relationname | relationtype | relationowner | relationsize
-----------+-----------+------------+--------------+--------------+---------------+--------------
5006 | 9 | pg_temp_9 | tmp_a | table | gpadmin | 384 kB
5006 | 9 | pg_temp_9 | tmp_b | table | gpadmin | 384 kB
27365 | 10 | pg_temp_10 | tmp_a | table | gpadmin | 384 kB
(3 rows)

Amazon Redshift get list of identity columns for a table

How can I get a list of identity columns for a table in Amazon Redshift? (using system's tables)
Thank you.
For those who might be interested to know about how to get all identity columns in a Redshift DB. The following query was posted by Neil#AWS to AWS Redshift Forum:
select
c.relname,
a.attname
from pg_class c, pg_attribute a, pg_attrdef d
where c.oid = a.attrelid
and c.relkind = 'r'
and a.attrelid = d.adrelid
and a.attnum = d.adnum
and d.adsrc like '%identity%'
order by 1;
The PG_TABLE_DEF table contains information about tables and columns:
select * from pg_table_def where tablename = 't2';
schemaname|tablename|column| type | encoding | distkey |sortkey| notnull
----------+---------+------+---------+----------+---------+-------+---------
public | t2 | c1 | bigint | none | t | 0 | f
public | t2 | c2 | integer | mostly16 | f | 0 | f
public | t2 | c3 | integer | none | f | 1 | t
public | t2 | c4 | integer | none | f | 2 | f
(4 rows)
Also, the standard PostgreSQL catalog tables are accessible to Amazon Redshift users. For more information about PostgreSQL system catalogs, see PostgreSQL System Tables.