I'm having an issue with this code when I try to input values into the transactions table - postgresql

So I'm setting up a schema in which I can input transactions of a journal entry independent of each other but also that rely on each other (mainly to ensure that debits = credits). I set up the tables, function, and trigger. Then, when I try to input values into the transactions table, I get the error below. I'm doing all of this in pgAdmin4.
CREATE TABLE transactions (
transactions_id UUID PRIMARY KEY DEFAULT uuid_generate_v1(),
entry_id INTEGER NOT NULL,
post_date DATE NOT NULL,
account_id INTEGER NOT NULL,
contact_id INTEGER NULL,
description TEXT NOT NULL,
reference_id UUID NULL,
document_id UUID NULL,
amount NUMERIC(12,2) NOT NULL
);
CREATE TABLE entries (
id UUID PRIMARY KEY,
test_date DATE NOT NULL,
balance NUMERIC(12,2)
CHECK (balance = 0.00)
);
CREATE OR REPLACE FUNCTION transactions_biut()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
BEGIN
EXECUTE 'INSERT INTO entries (id,test_date,balance)
SELECT
entry_id,
post_date,
SUM(amount) AS ''balance''
FROM
transactions
GROUP BY
entry_id;';
END;
$$;
CREATE TRIGGER transactions_biut
BEFORE INSERT OR UPDATE ON transactions
FOR EACH ROW EXECUTE PROCEDURE transactions_biut();
INSERT INTO transactions (
entry_id,
post_date,
account_id,
description,
amount
)
VALUES
(
'1',
'2019-10-01',
'101',
'MISC DEBIT: PAID FOR FACEBOOK ADS',
-200.00
),
(
'1',
'2019-10-01',
'505',
'MISC DEBIT: PAID FOR FACEBOOK ADS',
200.00
);
After I execute this input, I get the following error:
ERROR: column "id" of relation "entries" does not exist
LINE 1: INSERT INTO entries (id,test_date,balance)
^
QUERY: INSERT INTO entries (id,test_date,balance)
SELECT
entry_id,
post_date,
SUM(amount) AS "balance"
FROM
transactions
GROUP BY
entry_id;
CONTEXT: PL/pgSQL function transactions_biut() line 2 at EXECUTE
SQL state: 42703

There are a few problems here:
You're not returning anything from the trigger function => should probably be return NEW or return OLD since you're not modifying anything
Since you're executing the trigger before each row, it's bound to fail for any transaction that isn't 0 => maybe you want a deferred constraint trigger?
You're not grouping by post_date, so your select should fail
You've defined entry_id as INTEGER, but entries.id is of type UUID
Also note that this isn't really going to scale (you're summing up all transactions of all days, so this will get slower and slower...)

#chirs I was able to figure out how to create a functioning solution using statement-level triggers:
CREATE TABLE transactions (
transactions_id UUID PRIMARY KEY DEFAULT uuid_generate_v1(),
entry_id INTEGER NOT NULL,
post_date DATE NOT NULL,
account_id INTEGER NOT NULL,
contact_id INTEGER NULL,
description TEXT NOT NULL,
reference_id UUID NULL,
document_id UUID NULL,
amount NUMERIC(12,2) NOT NULL
);
CREATE TABLE entries (
entry_id INTEGER PRIMARY KEY,
post_date DATE NOT NULL,
balance NUMERIC(12,2),
CHECK (balance = 0.00)
);
CREATE OR REPLACE FUNCTION transactions_entries() RETURNS TRIGGER AS $$
BEGIN
IF (TG_OP = 'DELETE') THEN
INSERT INTO entries
SELECT o.entry_id, o.post_date, SUM(o.amount) FROM old_table o GROUP BY o.entry_id, o.post_date;
ELSIF (TG_OP = 'UPDATE') THEN
INSERT INTO entries
SELECT o.entry_id, n.post_date, SUM(n.amount) FROM new_table n, old_table o GROUP BY o.entry_id, n.post_date;
ELSIF (TG_OP = 'INSERT') THEN
INSERT INTO entries
SELECT n.entry_id,n.post_date, SUM(n.amount) FROM new_table n GROUP BY n.entry_id, n.post_date;
END IF;
RETURN NULL; -- result is ignored since this is an AFTER trigger
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER transactions_ins
AFTER INSERT ON transactions
REFERENCING NEW TABLE AS new_table
FOR EACH STATEMENT EXECUTE PROCEDURE transactions_entries();
CREATE TRIGGER transactions_upd
AFTER UPDATE ON transactions
REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
FOR EACH STATEMENT EXECUTE PROCEDURE transactions_entries();
CREATE TRIGGER transactions_del
AFTER DELETE ON transactions
REFERENCING OLD TABLE AS old_table
FOR EACH STATEMENT EXECUTE PROCEDURE transactions_entries();
Any thoughts on optimization?

Related

Trying to automatically insert into a table using triggers in POSTGRESQL

I am trying to make a trigger and function that inserts into the table purchases the values which have been inserted into the table customers.
Columns of table customers
1-customer_id serial PK references customer_id in purchases
2-c_name VARCHAR
3-amount DOUBLE PRECISION
Columns of table purchases
1- customer_id serial PK 2- amount DOUBLE PRECISION
The code for the trigger and the function:
CREATE OR REPLACE FUNCTION auto_insert_purchases()
RETURNS TRIGGER
LANGUAGE PLPGSQL
AS
$body$
BEGIN
insert into purchases(customer_id,purchase) values
(NEW.customer_id,NEW.purchase);
END
$body$
CREATE TRIGGER tr_auto_insert_purchases
AFTER INSERT ON customers
EXECUTE PROCEDURE auto_insert_purchases()
As you can see its supposed to take the new row data and insert it into the table but after doing and insertion to customers like this:
insert into customers values(2,'Stewie Griffin',4.99);
I get this error message:
ERROR: null value in column "customer_id" of relation "purchases" violates not-null
constraint
DETAIL: Failing row contains (null, null).
CONTEXT: SQL statement "insert into purchases(customer_id,purchase) values
(NEW.customer_id,NEW.purchase)"
auto_insert_purchases() PL/pgSQL fonksiyonu, 3. satır, SQL ifadesi içinde
SQL state: 23502
Why does the failing row contain null? Am I using the NEW keyword incorrectly?
CREATE TABLE customers (
customer_id int4 NULL,
c_name varchar NULL,
amount float8 NULL
);
CREATE TABLE purchases (
customer_id int4 NULL,
amount float8 NULL
);
CREATE OR REPLACE FUNCTION auto_insert_purchases()
RETURNS trigger
LANGUAGE plpgsql
AS $function$
BEGIN
insert into purchases(customer_id, amount) values
(NEW.customer_id, NEW.amount);
return new;
END;
$function$
;
create trigger tr_auto_insert_purchases
after insert ON customers
for each row
execute procedure auto_insert_purchases();
insert into customers(customer_id, c_name, amount) values (2,'Stewie Griffin', 4.99);
select * from purchases;
-- Result:
customer_id|amount|
-----------+------+
2| 4.99|
May be you just forgot to write for each row statement after CREATE TRIGGER tr_auto_insert_purchases AFTER INSERT ON customers

trigger to set date automatic after update

Some background info: i have a table named defects which has column named status_id and another column named date_closed ,i want to set date_closed after status_id has been updated
i already try to do this using after update trigger with the following code:
after update on eba_bt_sw_defects
for each row
declare
l_status number(20) := null;
begin
select status_id into l_status from eba_bt_sw_defects D,eba_bt_status S where D.status_id = S.id;
if l_status in ( select id from eba_bt_status where is_open = 'N' and NVL(is_enhancement,'N')='N') then
:NEW.DATE_CLOSED := LOCALTIMESTAMP ;
end if;
end;
but an error occured ( subquery not allowed in this contextCompilation failed)
i want a help
A couple of things that need fixing in your code:
In a trigger do not select from the table the trigger you're on. This will probably raise a ORA-04091: table name is mutating, trigger/function may not see it error.
IF l_variable IN (SELECT ...) is not a valid oracle syntax. It raises PLS-00405: subquery not allowed in this context
I don't have your data so here is a similar example:
drop table todos;
drop table statuses;
-- create tables
create table statuses (
id number generated by default on null as identity
constraint statuses_id_pk primary key,
status varchar2(60 char),
is_open varchar2(1 char) constraint statuses_is_open_ck
check (is_open in ('Y','N'))
)
;
create table todos (
id number generated by default on null as identity
constraint todos_id_pk primary key,
name varchar2(255 char) not null,
close_date timestamp with local time zone,
status_id number
constraint todos_status_id_fk
references statuses on delete cascade
)
;
-- load data
insert into statuses (id, status, is_open ) values (1, 'OPEN', 'Y' );
insert into statuses (id, status, is_open ) values (2, 'COMPLETE', 'N' );
insert into statuses (id, status, is_open ) values (3, 'ON HOLD', 'Y' );
insert into statuses (id, status, is_open ) values (4, 'CANCELLED', 'N' );
commit;
insert into todos (name, close_date, status_id ) values ( 'Y2 Security Review', NULL, 1 );
-- triggers
CREATE OR REPLACE TRIGGER todos_biu BEFORE
INSERT OR UPDATE ON todos
FOR EACH ROW
DECLARE
l_dummy NUMBER;
BEGIN
SELECT
1
INTO l_dummy
FROM
statuses
WHERE
is_open = 'N' AND
id = :new.status_id;
:new.close_date := localtimestamp;
EXCEPTION
WHEN no_data_found THEN
-- I'm assuming you want close_date to NULL if todo is re-opened.
:new.close_date := NULL;
END todos_biu;
/
update todos set status_id = 2;
select * from todos;
id name close_date status_id
1 Y2 Security Review 11-MAY-22 05.27.04.987117000 PM 2

PostgreSQL -- JOIN UNNEST output with CTE INSERT ID -- INSERT many to many

In a PostgreSQL function, is it possible to join the result of UNNEST, which is an integer array from function input, with an ID returned from a CTE INSERT?
I have PostgreSQL tables like:
CREATE TABLE public.message (
id SERIAL PRIMARY KEY,
content TEXT
);
CREATE TABLE public.message_tag (
id SERIAL PRIMARY KEY,
message_id INTEGER NOT NULL CONSTRAINT message_tag_message_id_fkey REFERENCES public.message(id) ON DELETE CASCADE,
tag_id INTEGER NOT NULL CONSTRAINT message_tag_tag_id_fkey REFERENCES public.tag(id) ON DELETE CASCADE
);
I want to create a PostgreSQL function which takes input of content and an array of tag_id. This is for graphile. I want to do it all in one function, so I get a mutation.
Here's what I got so far. I don't know how to join an UNNEST across an id returned from a CTE.
CREATE FUNCTION public.create_message(content text, tags Int[])
RETURNS public.message
AS $$
-- insert to get primary key of message, for many to many message_id
WITH moved_rows AS (
INSERT INTO public.message (content)
RETURNING *;
)
-- many to many relation
INSERT INTO public.message_tag
SELECT moved_rows.id as message_id, tagInput.tag_id FROM moved_rows, UNNEST(tags) as tagInput;
RETURNING *
$$ LANGUAGE sql VOLATILE STRICT;
You're not that far from your goal:
the semicolon placement in the CTE is wrong
the first INSERT statement lacks a SELECT or VALUES clause to specify what should be inserted
the INSERT into tag_message should specify the columns in which to insert (especially if you have that unnecessary serial id)
you specified a relation alias for the UNNEST call already, but none for the column tag_id
your function was RETURNING a set of message_tag rows but was specified to return a single message row
To fix these:
CREATE FUNCTION public.create_message(content text, tags Int[])
RETURNS public.message
AS $$
-- insert to get primary key of message, for many to many message_id
WITH moved_rows AS (
INSERT INTO public.message (content)
VALUES ($1)
RETURNING *
),
-- many to many relation
_ AS (
INSERT INTO public.message_tag (message_id, tag_id)
SELECT moved_rows.id, tagInput.tag_id
FROM moved_rows, UNNEST($2) as tagInput(tag_id)
)
TABLE moved_rows;
$$ LANGUAGE sql VOLATILE STRICT;
(Online demo)

Postgres exclude using gist across different tables

I have 2 tables like this
drop table if exists public.table_1;
drop table if exists public.table_2;
CREATE TABLE public.table_1 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
CREATE TABLE public.table_2 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
alter table public.table_1
add constraint my_constraint_1
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
alter table public.table_2
add constraint my_constraint_2
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
Every table contains rows which are related to a user, and all the rows of the same user cannot overlap in range. In addition, some rows may be logically deleted, so I added a where condition.
So far it's working w/o problems, but the 2 constraints work separately for each table.
I need to create a constraint which cover the 2 set of tables, so that a single daterange (of the same user and not deleted), may appaer only once across the 2 different tables.
Does the EXCLUDE notation be extended to work with different tables or do I need to check it with a trigger? If the trigger is the answer, which is the simplier way to do this? Create a temporary table with the union of the 2, add the constraint on it and check if fails?
Starting from #Laurenz Albe suggestion, this is what I made
-- #################### SETUP SAMPLE TABLES ####################
drop table if exists public.table_1;
drop table if exists public.table_2;
CREATE TABLE public.table_1 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
CREATE TABLE public.table_2 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
alter table public.table_1
add constraint my_constraint_1
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
alter table public.table_2
add constraint my_constraint_2
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
-- #################### SETUP TRIGGER ####################
create or REPLACE FUNCTION check_date_overlap_trigger_hook()
RETURNS trigger as
$body$
DECLARE
l_table text;
l_sql text;
l_row record;
begin
l_table := TG_ARGV[0];
l_sql := format('
select *
from public.%s as t
where
t.user_id = %s -- Include only records of the same user
and t.status != ''deleted'' -- Include only records that are active
', l_table, new.user_id);
for l_row in execute l_sql
loop
IF daterange(l_row.date_start, COALESCE(l_row.date_end, 'infinity'::date)) && daterange(new.date_start, COALESCE(new.date_end, 'infinity'::date))
THEN
RAISE EXCEPTION 'Date interval is overlapping with another one in table %', l_table
USING HINT = 'You can''t have the same interval across table1 AND table2';
END IF;
end loop;
RETURN NEW;
end
$body$
LANGUAGE plpgsql;
-- #################### INSTALL TRIGGER ####################
create trigger check_date_overlap
BEFORE insert or update
ON public.table_1
FOR EACH row
EXECUTE PROCEDURE check_date_overlap_trigger_hook('table_2');
create trigger check_date_overlap
BEFORE insert or update
ON public.table_2
FOR EACH row
EXECUTE PROCEDURE check_date_overlap_trigger_hook('table_1');
-- #################### INSERT DEMO ROWS ####################
insert into public.table_1 (user_id, status, date_start, date_end) values (1, 'active', '2020-12-10', '2020-12-20');
insert into public.table_1 (user_id, status, date_start, date_end) values (1, 'deleted', '2020-12-15', '2020-12-25');
insert into public.table_1 (user_id, status, date_start, date_end) values (2, 'active', '2020-12-10', '2020-12-20');
insert into public.table_1 (user_id, status, date_start, date_end) values (2, 'deleted', '2020-12-15', '2020-12-25');
-- This will fail for overlap on the same table
-- insert into public.table_1 (user_id, status, date_start, date_end) values (1, 'active', '2020-12-15', '2020-12-25');
-- This will fail as the user 1 already has an overlapping period on table 1
-- insert into public.table_2 (user_id, status, date_start, date_end) values (1, 'active', '2020-12-15', '2020-12-25');
-- This will fail as the user 1 already has an overlapping period on table 1
insert into public.table_2 (user_id, status, date_start, date_end) values (1, 'deleted', '2020-12-15', '2020-12-25');
update public.table_2 set status = 'active' where id = 1;
select 'table_1' as src_table, * from public.table_1
union
select 'table_2', * from public.table_2
You can probably use a trigger, but triggers are always vulnerable to race conditions (unless you are using SERIALIZABLE isolation).
If your tables really have the same columns, why don't you use a single table (and perhaps add a type column to disambiguate)?

Need foreign key as array

CREATE TABLE test ( id int PRIMARY KEY , name );
CREATE TABLE test1 ( id integer[] REFERENCES test , rollid int );
ERROR: foreign key constraint "test3_id_fkey" cannot be implemented
DETAIL: Key columns "id" and "id" are of incompatible types: integer[] and integer.
after that I try to another way also
CREATE TABLE test1 ( id integer[] , rollid int);
ALTER TABLE test1 ADD CONSTRAINT foreignkeyarray FOREIGN KEY (id) REFERENCES test;
ERROR: foreign key constraint "fkarray" cannot be implemented
DETAIL: Key columns "id" and "id" are of incompatible types: integer[] and integer.
so I try create a foreign key array means it say error. please tell me anyone?
postgresql version is 9.1.
What you're trying to do simply can't be done. At all. No ifs, no buts.
Create a new table, test1_test, containing two fields, test1_id, test_id. Put the foreign keys as needed on that one, and make test1's id an integer.
Using arrays with foreign element keys is usually a sign of incorrect design. You need to do separate table with one to many relationship.
But technically it is possible. Example of checking array values without triggers. One reusable function with paramethers and dynamic sql. Tested on PostgreSQL 10.5
create schema if not exists test;
CREATE OR REPLACE FUNCTION test.check_foreign_key_array(data anyarray, ref_schema text, ref_table text, ref_column text)
RETURNS BOOL
RETURNS NULL ON NULL INPUT
LANGUAGE plpgsql
AS
$body$
DECLARE
fake_id text;
sql text default format($$
select id::text
from unnest($1) as x(id)
where id is not null
and id not in (select %3$I
from %1$I.%2$I
where %3$I = any($1))
limit 1;
$$, ref_schema, ref_table, ref_column);
BEGIN
EXECUTE sql USING data INTO fake_id;
IF (fake_id IS NOT NULL) THEN
RAISE NOTICE 'Array element value % does not exist in column %.%.%', fake_id, ref_schema, ref_table, ref_column;
RETURN false;
END IF;
RETURN true;
END
$body$;
drop table if exists test.t1, test.t2;
create table test.t1 (
id integer generated by default as identity primary key
);
create table test.t2 (
id integer generated by default as identity primary key,
t1_ids integer[] not null check (test.check_foreign_key_array(t1_ids, 'test', 't1', 'id'))
);
insert into test.t1 (id) values (default), (default), (default); --ok
insert into test.t2 (id, t1_ids) values (default, array[1,2,3]); --ok
insert into test.t2 (id, t1_ids) values (default, array[1,2,3,555]); --error
If you are able to put there just values from test.id, then you can try this:
CREATE OR REPLACE FUNCTION test_trigger() RETURNS trigger
LANGUAGE plpgsql AS $BODY$
DECLARE
val integer;
BEGIN
SELECT id INTO val
FROM (
SELECT UNNEST(id) AS id
FROM test1
) AS q
WHERE id = OLD.id;
IF val IS NULL THEN RETURN OLD;
ELSE
RAISE 'Integrity Constraint Violation: ID "%" in Test1', val USING ERRCODE = '23000';
RETURN NULL;
END IF;
END; $BODY$;
-- DROP TRIGGER test_delete_trigger ON test;
CREATE TRIGGER test_delete_trigger BEFORE DELETE OR UPDATE OF id ON test
FOR EACH ROW EXECUTE PROCEDURE test_trigger();