PostgreSQL does not drop partition cascade - postgresql

I have a DB with 2 partitioned tables like
create table ppp (
id serial not null,
name varchar(255),
primary key (id)
);
insert into ppp (name) values ('ppp_first');
insert into ppp (name) values ('ppp_second');
create table rrr (
id serial not null,
ppp_id integer not null,
name varchar(255),
primary key (id, ppp_id),
foreign key (ppp_id) references ppp (id) on delete cascade
) partition by list(ppp_id);
create table rrr1 partition of rrr for values in (1);
create table rrr2 partition of rrr for values in (2);
create table sss (
id bigserial not null,
ppp_id integer not null,
rrr_id integer not null,
name varchar(255),
primary key (id, ppp_id),
foreign key (ppp_id, rrr_id) references rrr (ppp_id, id) on delete cascade
) partition by list(ppp_id);
create table sss1 partition of sss for values in (1);
create table sss2 partition of sss for values in (2);
insert into rrr (ppp_id, name) values (1, 'rrr_first');
insert into rrr (ppp_id, name) values (2, 'rrr_second');
insert into sss (ppp_id, rrr_id, name) values (1, 1, 'sss_first');
insert into sss (ppp_id, rrr_id, name) values (2, 2, 'sss_second');
I want to drop rrr1 with drop table rrr1 cascade;. It's dropped without errors but partition sss1 not dropped. So
=> select * from rrr;
id | ppp_id | name
----+--------+------------
2 | 2 | rrr_second
(1 row)
=> select * from sss;
id | ppp_id | rrr_id | name
----+--------+--------+------------
1 | 1 | 1 | sss_first
2 | 2 | 2 | sss_second
Why sss1 not dropped by cascade? Request to drop without cascade leads to error.

Related

idiomatic way to atomically create a table that as a record that is associated to other tables

I am coming from graph databases and postgres is still super foreign to me.
I have the following tables
CREATE TYPE runnerenum AS ENUM ('runner');
CREATE TABLE IF NOT EXISTS collections (
collectionid UUID PRIMARY KEY,
name VARCHAR(256) UNIQUE NOT NULL,
runner runnerenum NOT NULL,
runconfig JSONB
);
CREATE TABLE IF NOT EXISTS namedexprs(
namedexprid UUID PRIMARY KEY,
name VARCHAR(256) UNIQUE NOT NULL,
-- exprid UUID NOT NULL REFERENCES expressions(exprid),
collectionid UUID NOT NULL REFERENCES collections(collectionid) ON DELETE CASCADE
);
CREATE TABLE IF NOT EXISTS expressions(
exprid UUID PRIMARY KEY,
ast JSONB NOT NULL,
namedexprid UUID NOT NULL REFERENCES namedexprs(namedexprid) ON DELETE CASCADE
);
My question is what is the idiomatic way to create a collections atomically (while also creating associated expressions and namedexprs). Currently I am executing three separate queries and getting errors because of a foreign key violation.
Example of using DEFERRABLE:
CREATE TABLE parent_tbl (
parent_id integer PRIMARY KEY,
parent_val varchar UNIQUE
);
CREATE TABLE child_tbl (
child_id integer PRIMARY KEY,
parent_fk varchar REFERENCES parent_tbl (parent_val)
ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED,
child_val varchar
);
\d child_tbl
Table "public.child_tbl"
Column | Type | Collation | Nullable | Default
-----------+-------------------+-----------+----------+---------
child_id | integer | | not null |
parent_fk | character varying | | |
child_val | character varying | | |
Indexes:
"child_tbl_pkey" PRIMARY KEY, btree (child_id)
Foreign-key constraints:
"child_tbl_parent_fk_fkey" FOREIGN KEY (parent_fk) REFERENCES parent_tbl(parent_val) ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED
BEGIN;
INSERT INTO child_tbl VALUES (1, 'dog', 'cat');
SELECT * FROM child_tbl ;
child_id | parent_fk | child_val
----------+-----------+-----------
1 | dog | cat
(1 row)
SELECT * FROM parent_tbl ;
parent_id | parent_val
-----------+------------
(0 rows)
INSERT INTO parent_tbl VALUES (1, 'dog');
SELECT * FROM parent_tbl ;
parent_id | parent_val
-----------+------------
1 | dog
COMMIT;
The key to using DEFERRABLE is that the individual data entry statements need to be bundled into the same transaction, the BEGIN;/COMMIT;. This allows DEFERRABLE INITIALLY DEFERRED to work as the constraint check is deferred until the end of the transaction. For more ways you can manipulate this see SET CONSTRAINTS.

PostgreSQL querying through schemas

I want a query that lists all Customers who's status is "active". This query would return a list of customers who are marked as active. My problem is that I am lost on querying tables that reference other tables. Here is my schema.
CREATE TABLE Customer (
ID BIGSERIAL PRIMARY KEY NOT NULL,
fNAME TEXT NOT NULL,
lNAME TEXT NOT NULL,
create_date DATE NOT NULL DEFAULT NOW()
);
CREATE TABLE CustomerStatus (
recordID BIGSERIAL NOT NULL,
ID BIGSERIAL REFERENCES Customer NOT NULL,
status TEXT NOT NULL,
create_date DATE NOT NULL DEFAULT NOW()
);
INSERT INTO Customer (fNAME, lNAME) VALUES ('MARK', 'JOHNSON'), ('ERICK', 'DAWN'), ('MAY', 'ERICKSON'), ('JESS', 'MARTIN');
INSERT INTO CustomerStatus (ID, status) VALUES (1, 'pending'), (1, 'active');
INSERT INTO CustomerStatus (ID, status) VALUES (2, 'pending'), (2, 'active'), (2, 'cancelled');
INSERT INTO CustomerStatus (ID, status) VALUES (3, 'pending'), (3, 'active');
INSERT INTO CustomerStatus (ID, status) VALUES (4, 'pending');
I took courage to assume that record_id is serial => the latest id would be the last, to produce this qry:
t=# with a as (
select *, max(recordid) over (partition by cs.id)
from Customer c
join CustomerStatus cs on cs.id = c.id
)
select *
from a
where recordid=max and status = 'active';
id | fname | lname | create_date | recordid | id | status | create_date | max
----+-------+----------+-------------+----------+----+--------+-------------+-----
1 | MARK | JOHNSON | 2017-04-27 | 2 | 1 | active | 2017-04-27 | 2
3 | MAY | ERICKSON | 2017-04-27 | 7 | 3 | active | 2017-04-27 | 7
(2 rows)
Time: 0.450 ms

Postgresql constraint where child table has date and foreign key to table that has date range such that parent's date range is valid for child's date

I'm using PostgreSQL 9.4. Let's say I have 2 tables:
CREATE TABLE parents(
id serial not null,
parent_daterange daterange not null,
constraint "parent_id" primary key ("id")
);
CREATE TABLE children(
id serial not null,
child_date date not null,
parent_id int4 not null references parents(id),
constraint "child_id" primary key ("id")
);
I want to make sure children.child_date is always inside of its respective parents.parent_daterange.
So, this would be valid:
select * from children;
id | child_date | parent_id
---+------------+-----------
1 | 2016-01-01 | 1
select * from parents;
id | parent_daterange
---+------------------
1 | [2015-12-31,2016-01-10]
This wouldn't be valid and in this case, I'd want the INSERT statement on the children table to fail:
select * from children;
id | child_date | parent_id
---+------------+-----------
1 | 2016-01-11 | 1
select * from parents;
id | parent_daterange
---+------------------
1 | [2015-12-31,2016-01-10]

Insert into table, return id and then insert into another table with stored id

I have the following three tables:
Please note that the below DDL came models generated by Django then grabbed out of Postgresql after they were created. So modifying the tables is not an option.
CREATE TABLE "parentTeacherCon_grade"
(
id INTEGER PRIMARY KEY NOT NULL,
"currentGrade" VARCHAR(2) NOT NULL
);
CREATE TABLE "parentTeacherCon_parent"
(
id INTEGER PRIMARY KEY NOT NULL,
name VARCHAR(50) NOT NULL,
grade_id INTEGER NOT NULL
);
CREATE TABLE "parentTeacherCon_teacher"
(
id INTEGER PRIMARY KEY NOT NULL,
name VARCHAR(50) NOT NULL
);
CREATE TABLE "parentTeacherCon_teacher_grade"
(
id INTEGER PRIMARY KEY NOT NULL,
teacher_id INTEGER NOT NULL,
grade_id INTEGER NOT NULL
);
ALTER TABLE "parentTeacherCon_parent" ADD FOREIGN KEY (grade_id) REFERENCES "parentTeacherCon_grade" (id);
CREATE INDEX "parentTeacherCon_parent_5c853be8" ON "parentTeacherCon_parent" (grade_id);
CREATE INDEX "parentTeacherCon_teacher_5c853be8" ON "parentTeacherCon_teacher" (grade_id);
ALTER TABLE "parentTeacherCon_teacher_grade" ADD FOREIGN KEY (teacher_id) REFERENCES "parentTeacherCon_teacher" (id);
ALTER TABLE "parentTeacherCon_teacher_grade" ADD FOREIGN KEY (grade_id) REFERENCES "parentTeacherCon_grade" (id);
CREATE UNIQUE INDEX "parentTeacherCon_teacher_grade_teacher_id_20e07c38_uniq" ON "parentTeacherCon_teacher_grade" (teacher_id, grade_id);
CREATE INDEX "parentTeacherCon_teacher_grade_d9614d40" ON "parentTeacherCon_teacher_grade" (teacher_id);
CREATE INDEX "parentTeacherCon_teacher_grade_5c853be8" ON "parentTeacherCon_teacher_grade" (grade_id);
My Question is: How do I write an insert statement (or statements) where I do not have keep track of the IDs? More specifically I have a teacher table, where teachers can teach relate to more than one grade and I am attempting to write my insert statements to start populating my DB. Such that I am only declaring a teacher's name, and grades they relate to.
For example, if I have a teacher that belong to only one grade then the insert statement looks like this.
INSERT INTO "parentTeacherCon_teacher" (name, grade_id) VALUES ('foo bar', 1 );
Where grades K-12 are enumerated 0,12
But Need to do something like (I realize this does not work)
INSERT INTO "parentTeacherCon_teacher" (name, grade_id) VALUES ('foo bar', (0,1,3) );
To indicate that this teacher relates to K, 1, and 3 grades
leaving me with this table for the parentTeacherCon_teacher_grade
+----+------------+----------+
| id | teacher_id | grade_id |
+----+------------+----------+
| 1 | 3 | 0 |
| 2 | 3 | 1 |
| 3 | 3 | 3 |
+----+------------+----------+
This is how I can currently (successfully) insert into the Teacher Table.
INSERT INTO public."parentTeacherCon_teacher" (id, name) VALUES (3, 'Foo Bar');
Then into the grade table
INSERT INTO public.parentTeacherCon_teacher_grade (id, teacher_id, grade_id) VALUES (1, 3, 0);
INSERT INTO public.parentTeacherCon_teacher_grade (id, teacher_id, grade_id) VALUES (2, 3, 1);
INSERT INTO public.parentTeacherCon_teacher_grade (id, teacher_id, grade_id) VALUES (3, 3, 3);
A bit more information.
Here is a diagram of the database
Other things I have tried.
WITH i1 AS (INSERT INTO "parentTeacherCon_teacher" (name) VALUES ('foo bar')
RETURNING id) INSERT INTO "parentTeacherCon_teacher_grade"
SELECT
i1.id
, v.val
FROM i1, (VALUES (1), (2), (3)) v(val);
Then I get this error.
[2016-08-10 16:07:46] [23502] ERROR: null value in column "grade_id" violates not-null constraint
Detail: Failing row contains (6, 1, null).
If you want to insert all three rows in one statement, you can use:
INSERT INTO "parentTeacherCon_teacher" (name, grade_id)
SELECT 'foo bar', g.grade_id
FROM (SELECT 0 as grade_id UNION ALL SELECT 1 UNION ALL SELECT 3) g;
Or, if you prefer:
INSERT INTO "parentTeacherCon_teacher" (name, grade_id)
SELECT 'foo bar', g.grade_id
FROM (VALUES (0), (2), (3)) g(grade_id);
EDIT:
In Postgres, you can have data modification statements as a CTE:
WITH i as (
INSERT INTO public."parentTeacherCon_teacher" (id, name)
VALUES (3, 'Foo Bar')
RETURNING *
)
INSERT INTO "parentTeacherCon_teacher" (name, teacher_id, grade_id)
SELECT 'foo bar', i.id, g.grade_id
FROM (VALUES (0), (2), (3)) g(grade_id) CROSS JOIN
i

PostgreSQL: dynamically create result columns

I want to "dynamically" create the result columns in a PostgreSQL query. I have these tables:
CREATE SEQUENCE users_id;
CREATE TABLE users (
id INT PRIMARY KEY NOT NULL DEFAULT NEXTVAL('users_id'),
name VARCHAR(128) NOT NULL
);
CREATE SEQUENCE quota_rules_id;
CREATE TABLE quota_rules (
id INT PRIMARY KEY NOT NULL DEFAULT NEXTVAL('quota_rules_id'),
user_id INT REFERENCES users(id),
rule VARCHAR(255) NOT NULL
);
CREATE INDEX user_id_index ON quota_rules(user_id);
INSERT INTO users (name) VALUES ('myname'); -- id=1
INSERT INTO quota_rules (user_id, rule) VALUES (1, 'a');
INSERT INTO quota_rules (user_id, rule) VALUES (1, 'b');
INSERT INTO quota_rules (user_id, rule) VALUES (1, 'c');
And want a query that returns this (1 row):
SELECT ............ user_id = 1;
name | quota_rule | quota_rule2 | quota_rule3
myname | a | b | c
Check out the crosstab function of the tablefunc module