PostgreSQL Insert with failed unique constraint - postgresql

CREATE TABLE s_etpta.sfphierg (
hierar VARCHAR(10) NOT NULL,
libelle VARCHAR(40),
typfct VARCHAR(1),
utilcre VARCHAR(10),
datcre DATE,
utilmod VARCHAR(10),
datmod DATE,
CONSTRAINT i_sfphierg PRIMARY KEY(hierar)
)
CREATE TABLE s_etpta.hopsech (
horsect VARCHAR(40) NOT NULL,
libelle VARCHAR(40),
libcourt VARCHAR(20),
horcode VARCHAR(10),
CONSTRAINT i_hopsech PRIMARY KEY(horsect)
)
BEGIN
delete from SFPHIERG;
INSERT INTO SFPHIERG ("hierar", "libelle", "typfct", "utilcre", "datcre",
"utilmod", "datmod")
select '01'||horcode, E'Hircuit standard'||horcode, E'1', E'HQS', E'2007-01-
29', E' ', E'1900-01-01'
FROM HOPSECH where HOPSECH.IJIGHSUPPM like '1'
and not exists (select hierar from SFPHIERG where hierar like '01'||horcode);
INSERT INTO SFPHIERG ("hierar", "libelle", "typfct", "utilcre", "datcre",
"utilmod", "datmod")
select '00'||horcode, E'Circuit cascade'||horcode, E'1', E'HQS', E'2007-01-
29', E' ', E'1900-01-01'
FROM HOPSECH where HOPSECH.IJIGHSUPPR like '1'
and not exists (select hierar from SFPHIERG where hierar like '00'||horcode);
END;
In my functions's body I have two insert queries executed after a delete query.
The rows are correctly inserted when my first column doesn't exist, when it does I have the failed unique constraint error, when this error occur nothing is added into my table.
Is there a way I can stop this error from blocking all the inserts and only the inserts where my first column exists?

Related

When I run this code a syntax error keeps appearing but I am unable to find where it would be

When I run this code, I receive a very vague syntax error: database: syntax error at or near "(". I am unable to find where this syntax error would be.
I have also been told that animal_adoption_history is not an associative entity when it was designed as one.
What have I done wrong when writing it?
The code:
DROP TABLE IF EXISTS customer;
DROP TABLE IF EXISTS animal;
DROP TABLE IF EXISTS animal_adoption_history;
create table customer (
customer_id CHAR(9) NOT NULL,
c_first_name VARCHAR(25),
c_last_name VARCHAR(50),
c_gender CHAR(1),
c_phone_number VARCHAR(20),
c_email_address VARCHAR(45),
c_date_of_birth DATE,
c_address_number VARCHAR(10),
c_street_name VARCHAR(30),
c_city VARCHAR(50),
c_state CHAR(3),
c_postcode CHAR(4),
c_has_adopted_before CHAR(1),
constraint customer_PK PRIMARY KEY (customer_id)
);
create table animal (
animal_id CHAR(9) NOT NULL,
a_animal_type VARCHAR(20) ,
a_breed VARCHAR(50),
a_colour VARCHAR(30),
a_size VARCHAR(20),
a_weight_kg VARCHAR(10),
a_description VARCHAR(75),
a_name VARCHAR(30),
a_date_of_birth DATE,
a_sex CHAR(1),
a_animal_cost INT(10),
a_microchip_status CHAR(1),
a_vaccination_status CHAR(1),
constraint animal_PK PRIMARY KEY (animal_id)
);
create table animal_adoption_history (
health_conditions VARCHAR(100),
is_available_to_adopt CHAR(1),
has_been_adopted_previously CHAR(1),
reason_for_entry VARCHAR(75),
date_entered DATE,
animal_id CHAR(9) NOT NULL,
customer_id CHAR(9) NOT NULL,
constraint animal_adoption_history_PK PRIMARY KEY (animal_id, customer_id),
constraint animal_adoption_history_FK1 FOREIGN KEY (animal) references animal(animal_id)
constraint animal_adoption_history_FK2 FOREIGN KEY (customer_id) references customer(customer_id)
);
INSERT INTO customer (customer_id,c_first_name,c_last_name,c_gender,c_phone_number,c_email_address,c_date_of_birth,c_address_number,c_street_name,c_city,c_state,c_postcode,c_has_adopted_before)
VALUES ('C00000001','Olivia','Smith','F','0422425392','olivia.smith#gmail.com','1980-06-22','2','Henderson Street','Bondi','NSW','2092','Yes');
INSERT INTO customer (customer_id,c_first_name,c_last_name,c_gender,c_phone_number,c_email_address,c_date_of_birth,c_address_number,c_street_name,c_city,c_state,c_postcode,c_has_adopted_before)
VALUES ('C00000002','Taylor','Brown','F','0422435394','taylor.brown#gmail.com','1999-02-24','62','Ultimo Avenue','Bondi','NSW','2092','No');
INSERT INTO customer (customer_id,c_first_name,c_last_name,c_gender,c_phone_number,c_email_address,c_date_of_birth,c_address_number,c_street_name,c_city,c_state,c_postcode,c_has_adopted_before)
VALUES ('C00000003','Sarah','Li','F','0422425342','sarah.li#gmail.com','1997-02-22','27','Winchester Street','Epping','NSW','2092','Yes');
INSERT INTO customer (customer_id,c_first_name,c_last_name,c_gender,c_phone_number,c_email_address,c_date_of_birth,c_address_number,c_street_name,c_city,c_state,c_postcode,c_has_adopted_before)
VALUES ('C00000004','Charlie','Swift','M','0432425392','charlie.swift#gmail.com','1998-02-22','22','Henderson Lane','Lindfield','NSW','2092','No');
INSERT INTO customer (customer_id,c_first_name,c_last_name,c_gender,c_phone_number,c_email_address,c_date_of_birth,c_address_number,c_street_name,c_city,c_state,c_postcode,c_has_adopted_before)
VALUES ('C00000005','Heath','Davidson','M','0422425911','heath.davidson#gmail.com','2003-01-22','22','Station Street','Manly','NSW','2092','Yes');
INSERT INTO animal (animal_id,a_animal_type,a_breed,a_colour,a_size,a_weight_kg,a_description,a_name,a_date_of_birth,a_sex,a_animal_cost,a_microchip_status,a_vaccination_status)
VALUES ('A00000001','Dog','Pug','Light brown','Small','5','Playful yet enjoys cuddles','Mia','2020-02-22','F','3100','Y','Y');
INSERT INTO animal (animal_id,a_animal_type,a_breed,a_colour,a_size,a_weight_kg,a_description,a_name,a_date_of_birth,a_sex,a_animal_cost,a_microchip_status,a_vaccination_status)
VALUES ('A00000002','Cat','Tabby','Orange','Small','4','Quiet and loves the sun','Garfield','2010-04-28','M','1400','Y','N');
INSERT INTO animal (animal_id,a_animal_type,a_breed,a_colour,a_size,a_weight_kg,a_description,a_name,a_date_of_birth,a_sex,a_animal_cost,a_microchip_status,a_vaccination_status)
VALUES ('A00000003','Bird','Budgie','Green and yellow','Extra Small','0.035','Very loud when hungry','Roody','11-14','F','1200','N','Y');
INSERT INTO animal (animal_id,a_animal_type,a_breed,a_colour,a_size,a_weight_kg,a_description,a_name,a_date_of_birth,a_sex,a_animal_cost,a_microchip_status,a_vaccination_status)
VALUES ('A00000004','Rabbit','Holland Lop','Light brown and white','Small','5','Fluffy and enjoys lettuce snacks','Thumper','2018-19-04','F','900','N','N');
INSERT INTO animal (animal_id,a_animal_type,a_breed,a_colour,a_size,a_weight_kg,a_description,a_name,a_date_of_birth,a_sex,a_animal_cost,a_microchip_status,a_vaccination_status)
VALUES ('A00000005','Dog','Golden Retriever','Dark blonde','Large','32','Loves going for long walks','Milo','2014-05-30','M','2500','Y','Y');
INSERT INTO animal_adoption_history (health_conditions,is_available_to_adopt,has_been_adopted_previously,reason_for_entry,date_entered,animal_id,customer_id)
VALUES ('None','Y','Y','Owner moved away','2021-08-18','A00000001','C00000001');
INSERT INTO animal_adoption_history (health_conditions,is_available_to_adopt,has_been_adopted_previously,reason_for_entry,date_entered,animal_id,customer_id)
VALUES ('None','N','N','Newborn looking for home','2022-07-13','A00000003','C00000005');
INSERT INTO animal_adoption_history (health_conditions,is_available_to_adopt,has_been_adopted_previously,reason_for_entry,date_entered,animal_id,customer_id)
VALUES ('Diabetes','Y','Y','Owner passed away','2019-11-01','A00000004','C00000001');
INSERT INTO animal_adoption_history (health_conditions,is_available_to_adopt,has_been_adopted_previously,reason_for_entry,date_entered,animal_id,customer_id)
VALUES ('None','Y','N','Previous household abuse','2014-09-19','A00000002','C00000004');
INSERT INTO animal_adoption_history (health_conditions,is_available_to_adopt,has_been_adopted_previously,reason_for_entry,date_entered,animal_id,customer_id)
VALUES ('Arthritis','Y','Y','Newborn looking for home','2016-04-26','A00000005','C00000002');
Thanks!
INT(10) is not valid datatype:
a_animal_cost INT(10)
There are other issues as well, you have to fix this.
You have a number of errors in your code. When running PostgreSQL only lists the first one, which in this case is the line a_animal_cost INT(10),. Integers are a fixed size so it is wrong to attempt to specify a size for it.
Also do not use char(1) for Yes ('Y') No ('N') fields. PostgreSQL has a native boolean data type for this purpose, so use it.
When deleting tables, you need to delete the lowest tables in the hierarchy first (foreign key etc).
Finally, you can chain multiple inserts as values together.
Putting all this together, I would recommend that you use something like this:
DROP TABLE IF EXISTS animal_adoption_history;
DROP TABLE IF EXISTS animal;
DROP TABLE IF EXISTS customer;
create table customer (
customer_id CHAR(9) NOT NULL,
c_first_name VARCHAR(25),
c_last_name VARCHAR(50),
c_gender CHAR(1),
c_phone_number VARCHAR(20),
c_email_address VARCHAR(45),
c_date_of_birth DATE,
c_address_number VARCHAR(10),
c_street_name VARCHAR(30),
c_city VARCHAR(50),
c_state CHAR(3),
c_postcode CHAR(4),
c_has_adopted_before boolean,
constraint customer_PK PRIMARY KEY (customer_id)
);
create table animal (
animal_id CHAR(9) NOT NULL,
a_animal_type VARCHAR(20) ,
a_breed VARCHAR(50),
a_colour VARCHAR(30),
a_size VARCHAR(20),
a_weight_kg VARCHAR(10),
a_description VARCHAR(75),
a_name VARCHAR(30),
a_date_of_birth DATE,
a_sex CHAR(1),
a_animal_cost INT,
a_microchip_status boolean,
a_vaccination_status boolean,
constraint animal_PK PRIMARY KEY (animal_id)
);
create table animal_adoption_history (
health_conditions VARCHAR(100),
is_available_to_adopt boolean,
has_been_adopted_previously boolean,
reason_for_entry VARCHAR(75),
date_entered DATE,
animal_id CHAR(9) NOT NULL,
customer_id CHAR(9) NOT NULL,
constraint animal_adoption_history_PK PRIMARY KEY (animal_id, customer_id),
constraint animal_adoption_history_FK1 FOREIGN KEY (animal_id) references animal(animal_id),
constraint animal_adoption_history_FK2 FOREIGN KEY (customer_id) references customer(customer_id)
);
INSERT INTO customer (customer_id,c_first_name,c_last_name,c_gender,c_phone_number,c_email_address,c_date_of_birth,c_address_number,
c_street_name,c_city,c_state,c_postcode,c_has_adopted_before) VALUES
('C00000001','Olivia','Smith','F','0422425392','olivia.smith#gmail.com','1980-06-22','2',
'Henderson Street','Bondi','NSW','2092',true),
('C00000002','Taylor','Brown','F','0422435394','taylor.brown#gmail.com','1999-02-24','62',
'Ultimo Avenue','Bondi','NSW','2092',false),
('C00000003','Sarah','Li','F','0422425342','sarah.li#gmail.com','1997-02-22','27',
'Winchester Street','Epping','NSW','2092',true),
('C00000004','Charlie','Swift','M','0432425392','charlie.swift#gmail.com','1998-02-22','22',
'Henderson Lane','Lindfield','NSW','2092',false),
('C00000005','Heath','Davidson','M','0422425911','heath.davidson#gmail.com','2003-01-22','22',
'Station Street','Manly','NSW','2092',true);
INSERT INTO animal (animal_id,a_animal_type,a_breed,a_colour,a_size,a_weight_kg,a_description,a_name,a_date_of_birth,
a_sex,a_animal_cost,a_microchip_status,a_vaccination_status) VALUES
('A00000001','Dog','Pug','Light brown','Small','5','Playful yet enjoys cuddles','Mia','2020-02-22',
'F','3100',true,true),
('A00000002','Cat','Tabby','Orange','Small','4','Quiet and loves the sun','Garfield','2010-04-28',
'M','1400',true,false),
('A00000003','Bird','Budgie','Green and yellow','Extra Small','0.035','Very loud when hungry','Roody','2020-11-14',
'F','1200',false,true),
('A00000004','Rabbit','Holland Lop','Light brown and white','Small','5','Fluffy and enjoys lettuce snacks','Thumper','2018-04-19',
'F','900',false,false),
('A00000005','Dog','Golden Retriever','Dark blonde','Large','32','Loves going for long walks','Milo','2014-05-30',
'M','2500',true,true);
INSERT INTO animal_adoption_history (health_conditions,is_available_to_adopt,has_been_adopted_previously,reason_for_entry,
date_entered,animal_id,customer_id) VALUES
('None',true,true,'Owner moved away','2021-08-18','A00000001','C00000001'),
('None',false,false,'Newborn looking for home','2022-07-13','A00000003','C00000005'),
('Diabetes',true,true,'Owner passed away','2019-11-01','A00000004','C00000001'),
('None',true,false,'Previous household abuse','2014-09-19','A00000002','C00000004'),
('Arthritis',true,true,'Newborn looking for home','2016-04-26','A00000005','C00000002');

Postgres exclude using gist across different tables

I have 2 tables like this
drop table if exists public.table_1;
drop table if exists public.table_2;
CREATE TABLE public.table_1 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
CREATE TABLE public.table_2 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
alter table public.table_1
add constraint my_constraint_1
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
alter table public.table_2
add constraint my_constraint_2
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
Every table contains rows which are related to a user, and all the rows of the same user cannot overlap in range. In addition, some rows may be logically deleted, so I added a where condition.
So far it's working w/o problems, but the 2 constraints work separately for each table.
I need to create a constraint which cover the 2 set of tables, so that a single daterange (of the same user and not deleted), may appaer only once across the 2 different tables.
Does the EXCLUDE notation be extended to work with different tables or do I need to check it with a trigger? If the trigger is the answer, which is the simplier way to do this? Create a temporary table with the union of the 2, add the constraint on it and check if fails?
Starting from #Laurenz Albe suggestion, this is what I made
-- #################### SETUP SAMPLE TABLES ####################
drop table if exists public.table_1;
drop table if exists public.table_2;
CREATE TABLE public.table_1 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
CREATE TABLE public.table_2 (
id serial NOT NULL,
user_id bigint not null,
status varchar(255) not null,
date_start date NOT NULL,
date_end date NULL
);
alter table public.table_1
add constraint my_constraint_1
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
alter table public.table_2
add constraint my_constraint_2
EXCLUDE USING gist (user_id with =, daterange(date_start, date_end, '[]') WITH &&)
where (status != 'deleted');
-- #################### SETUP TRIGGER ####################
create or REPLACE FUNCTION check_date_overlap_trigger_hook()
RETURNS trigger as
$body$
DECLARE
l_table text;
l_sql text;
l_row record;
begin
l_table := TG_ARGV[0];
l_sql := format('
select *
from public.%s as t
where
t.user_id = %s -- Include only records of the same user
and t.status != ''deleted'' -- Include only records that are active
', l_table, new.user_id);
for l_row in execute l_sql
loop
IF daterange(l_row.date_start, COALESCE(l_row.date_end, 'infinity'::date)) && daterange(new.date_start, COALESCE(new.date_end, 'infinity'::date))
THEN
RAISE EXCEPTION 'Date interval is overlapping with another one in table %', l_table
USING HINT = 'You can''t have the same interval across table1 AND table2';
END IF;
end loop;
RETURN NEW;
end
$body$
LANGUAGE plpgsql;
-- #################### INSTALL TRIGGER ####################
create trigger check_date_overlap
BEFORE insert or update
ON public.table_1
FOR EACH row
EXECUTE PROCEDURE check_date_overlap_trigger_hook('table_2');
create trigger check_date_overlap
BEFORE insert or update
ON public.table_2
FOR EACH row
EXECUTE PROCEDURE check_date_overlap_trigger_hook('table_1');
-- #################### INSERT DEMO ROWS ####################
insert into public.table_1 (user_id, status, date_start, date_end) values (1, 'active', '2020-12-10', '2020-12-20');
insert into public.table_1 (user_id, status, date_start, date_end) values (1, 'deleted', '2020-12-15', '2020-12-25');
insert into public.table_1 (user_id, status, date_start, date_end) values (2, 'active', '2020-12-10', '2020-12-20');
insert into public.table_1 (user_id, status, date_start, date_end) values (2, 'deleted', '2020-12-15', '2020-12-25');
-- This will fail for overlap on the same table
-- insert into public.table_1 (user_id, status, date_start, date_end) values (1, 'active', '2020-12-15', '2020-12-25');
-- This will fail as the user 1 already has an overlapping period on table 1
-- insert into public.table_2 (user_id, status, date_start, date_end) values (1, 'active', '2020-12-15', '2020-12-25');
-- This will fail as the user 1 already has an overlapping period on table 1
insert into public.table_2 (user_id, status, date_start, date_end) values (1, 'deleted', '2020-12-15', '2020-12-25');
update public.table_2 set status = 'active' where id = 1;
select 'table_1' as src_table, * from public.table_1
union
select 'table_2', * from public.table_2
You can probably use a trigger, but triggers are always vulnerable to race conditions (unless you are using SERIALIZABLE isolation).
If your tables really have the same columns, why don't you use a single table (and perhaps add a type column to disambiguate)?

I'm having an issue with this code when I try to input values into the transactions table

So I'm setting up a schema in which I can input transactions of a journal entry independent of each other but also that rely on each other (mainly to ensure that debits = credits). I set up the tables, function, and trigger. Then, when I try to input values into the transactions table, I get the error below. I'm doing all of this in pgAdmin4.
CREATE TABLE transactions (
transactions_id UUID PRIMARY KEY DEFAULT uuid_generate_v1(),
entry_id INTEGER NOT NULL,
post_date DATE NOT NULL,
account_id INTEGER NOT NULL,
contact_id INTEGER NULL,
description TEXT NOT NULL,
reference_id UUID NULL,
document_id UUID NULL,
amount NUMERIC(12,2) NOT NULL
);
CREATE TABLE entries (
id UUID PRIMARY KEY,
test_date DATE NOT NULL,
balance NUMERIC(12,2)
CHECK (balance = 0.00)
);
CREATE OR REPLACE FUNCTION transactions_biut()
RETURNS TRIGGER
LANGUAGE plpgsql
AS $$
BEGIN
EXECUTE 'INSERT INTO entries (id,test_date,balance)
SELECT
entry_id,
post_date,
SUM(amount) AS ''balance''
FROM
transactions
GROUP BY
entry_id;';
END;
$$;
CREATE TRIGGER transactions_biut
BEFORE INSERT OR UPDATE ON transactions
FOR EACH ROW EXECUTE PROCEDURE transactions_biut();
INSERT INTO transactions (
entry_id,
post_date,
account_id,
description,
amount
)
VALUES
(
'1',
'2019-10-01',
'101',
'MISC DEBIT: PAID FOR FACEBOOK ADS',
-200.00
),
(
'1',
'2019-10-01',
'505',
'MISC DEBIT: PAID FOR FACEBOOK ADS',
200.00
);
After I execute this input, I get the following error:
ERROR: column "id" of relation "entries" does not exist
LINE 1: INSERT INTO entries (id,test_date,balance)
^
QUERY: INSERT INTO entries (id,test_date,balance)
SELECT
entry_id,
post_date,
SUM(amount) AS "balance"
FROM
transactions
GROUP BY
entry_id;
CONTEXT: PL/pgSQL function transactions_biut() line 2 at EXECUTE
SQL state: 42703
There are a few problems here:
You're not returning anything from the trigger function => should probably be return NEW or return OLD since you're not modifying anything
Since you're executing the trigger before each row, it's bound to fail for any transaction that isn't 0 => maybe you want a deferred constraint trigger?
You're not grouping by post_date, so your select should fail
You've defined entry_id as INTEGER, but entries.id is of type UUID
Also note that this isn't really going to scale (you're summing up all transactions of all days, so this will get slower and slower...)
#chirs I was able to figure out how to create a functioning solution using statement-level triggers:
CREATE TABLE transactions (
transactions_id UUID PRIMARY KEY DEFAULT uuid_generate_v1(),
entry_id INTEGER NOT NULL,
post_date DATE NOT NULL,
account_id INTEGER NOT NULL,
contact_id INTEGER NULL,
description TEXT NOT NULL,
reference_id UUID NULL,
document_id UUID NULL,
amount NUMERIC(12,2) NOT NULL
);
CREATE TABLE entries (
entry_id INTEGER PRIMARY KEY,
post_date DATE NOT NULL,
balance NUMERIC(12,2),
CHECK (balance = 0.00)
);
CREATE OR REPLACE FUNCTION transactions_entries() RETURNS TRIGGER AS $$
BEGIN
IF (TG_OP = 'DELETE') THEN
INSERT INTO entries
SELECT o.entry_id, o.post_date, SUM(o.amount) FROM old_table o GROUP BY o.entry_id, o.post_date;
ELSIF (TG_OP = 'UPDATE') THEN
INSERT INTO entries
SELECT o.entry_id, n.post_date, SUM(n.amount) FROM new_table n, old_table o GROUP BY o.entry_id, n.post_date;
ELSIF (TG_OP = 'INSERT') THEN
INSERT INTO entries
SELECT n.entry_id,n.post_date, SUM(n.amount) FROM new_table n GROUP BY n.entry_id, n.post_date;
END IF;
RETURN NULL; -- result is ignored since this is an AFTER trigger
END;
$$ LANGUAGE plpgsql;
CREATE TRIGGER transactions_ins
AFTER INSERT ON transactions
REFERENCING NEW TABLE AS new_table
FOR EACH STATEMENT EXECUTE PROCEDURE transactions_entries();
CREATE TRIGGER transactions_upd
AFTER UPDATE ON transactions
REFERENCING OLD TABLE AS old_table NEW TABLE AS new_table
FOR EACH STATEMENT EXECUTE PROCEDURE transactions_entries();
CREATE TRIGGER transactions_del
AFTER DELETE ON transactions
REFERENCING OLD TABLE AS old_table
FOR EACH STATEMENT EXECUTE PROCEDURE transactions_entries();
Any thoughts on optimization?

I want to insert rows into history tables from actual tables (parent and child) on insertion of row into actual tables using postgresql trigger

For Example:
Consider my original tables are:
CREATE TABLE employees(
id serial primary key,
first_name varchar(40) NOT NULL,
last_name varchar(40) NOT NULL
);
CREATE TABLE employees_detail
(
eid integer NOT NULL,
id integer,
first_name character varying(40),
last_name character varying(40),
CONSTRAINT employees_detail_pkey PRIMARY KEY (eid),
CONSTRAINT employees_detail_id_fkey FOREIGN KEY (id)
REFERENCES employees (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
)
The above tables have foreign key relation. So when I insert a row into both the tables, at the same time I want to insert them into history tables.
consider history tables are:
CREATE TABLE employee_audits (
id serial primary key,
employee_id int4 NOT NULL,
last_name varchar(40) NOT NULL,
changed_on timestamp(6) NOT NULL
)
CREATE TABLE employee_audits_detail
(
eid integer NOT NULL,
id integer,
last_name character varying(40),
changed_on timestamp(6) NOT NULL,
CONSTRAINT employee_audits_detail_pkey PRIMARY KEY (eid),
CONSTRAINT employee_audits_detail_fkey FOREIGN KEY (id)
REFERENCES employee_audits (id) MATCH SIMPLE
ON UPDATE NO ACTION ON DELETE NO ACTION
)
I have created below trigger to insert into original parent table(employees) to history parent table(employee_audits). My trigger is as follows:
CREATE OR REPLACE FUNCTION log_last_name_changes()
RETURNS trigger AS
$BODY$
BEGIN
IF NEW.last_name <> OLD.last_name THEN
INSERT INTO employee_audits(employee_id,last_name,changed_on)
VALUES(OLD.id,OLD.last_name,now());
END IF;
RETURN NEW;
END;
$BODY$
LANGUAGE plpgsql
CREATE TRIGGER last_name_changes
BEFORE UPDATE
ON employees
FOR EACH ROW
EXECUTE PROCEDURE log_last_name_changes();
Same way I want to insert original child table rows into history child table.

Access database, Sql query , Error "Syntax error in DROP TABLE or DROP INDEX."

This is the query , running this in C#.
n getting above error
"DROP TABLE IF EXISTS `NATIONAL_ID_ISSUANCE_CENTER`;
CREATE TABLE `NATIONAL_ID_ISSUANCE_CENTER` (
`ID` INTEGER NOT NULL AUTO_INCREMENT,
`NAME` VARCHAR(100),
`APPLICATION_ID` INTEGER,
`STATUS` INTEGER,
`CREATED_BY` INTEGER,
`UPDATED_BY` INTEGER,
`CREATED_DATE` DATETIME,
`UPDATED_DATE` DATETIME,
`THIRD_PARTY_ID` INTEGER,
`PROVINCE_ID` INTEGER,
INDEX (`APPLICATION_ID`),
PRIMARY KEY (`ID`),
INDEX (`PROVINCE_ID`),
INDEX (`THIRD_PARTY_ID`)
)"
You can't put an IF statement inside Drop and Create statements. Anytime you want to drop a table that you're not sure exists, use the following:
IF(OBJECT_ID('[Database].[Schema].[TableName]') is not null)
BEGIN
DROP TABLE [Database].[Schema].[TableName];
END;
Please note you should replace [Database], [Schema], and [TableName] with the appropriate database, schema, and table names, respectively.