invalid input syntax for integer: "0A000" CONTEXT: PL/pgSQL function trigger update - postgresql

I have an update and insert function triggers, bvtrigger_fct_tr_people_upd and bvtrigger_fct_tr_people_ins
When I updated the information on the main table I encountered the error
ERROR: invalid input syntax for integer: "0A000"
CONTEXT: PL/pgSQL function bvtrigger_fct_tr_people_upd() line 75 at assignment
SQL state: 22P02
here are my tables:
CREATE TABLE pkihtts.bv_tb_people
(
peopleid integer NOT NULL DEFAULT nextval('pkihtts.bvsq_peopleid'),
tokenid integer,
auid character varying(25) COLLATE pg_catalog."default" NOT NULL,
upn character varying(256) COLLATE pg_catalog."default" NOT NULL,
firstname character varying(64) COLLATE pg_catalog."default",
middlename character varying(64) COLLATE pg_catalog."default",
lastname character varying(64) COLLATE pg_catalog."default" NOT NULL,
genqual character varying(4) COLLATE pg_catalog."default",
serialnumber character varying(25) COLLATE pg_catalog."default",
agency character varying(50) COLLATE pg_catalog."default",
subagency character varying(64) COLLATE pg_catalog."default",
affiliation character varying(10) COLLATE pg_catalog."default",
subscribertype character varying(64) COLLATE pg_catalog."default",
countryofcitizenship character varying(2) COLLATE pg_catalog."default",
emailaddress character varying(256) COLLATE pg_catalog."default" NOT NULL,
dateregistered timestamp without time zone NOT NULL,
comments character varying(255) COLLATE pg_catalog."default",
isdeleted character varying(1) COLLATE pg_catalog."default" NOT NULL DEFAULT 'n'::character varying,
modifiedby double precision NOT NULL,
modifieddate timestamp(0) without time zone,
profileid integer,
last_logon_date timestamp without time zone,
status character varying(25) COLLATE pg_catalog."default",
role character varying(15) COLLATE pg_catalog."default",
componentid integer,
CONSTRAINT bv_pk_peopleid PRIMARY KEY (peopleid)
USING INDEX TABLESPACE pkihtts_data,
CONSTRAINT tokenid_uniq UNIQUE (tokenid)
INCLUDE(tokenid)
USING INDEX TABLESPACE pkihtts_data,
CONSTRAINT fk_tokenid FOREIGN KEY (tokenid)
REFERENCES pkihtts.tb_token (tokenid) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION
NOT VALID
)
TABLESPACE pkihtts_data;
ALTER TABLE pkihtts.bv_tb_people
OWNER to pkihtts;
bv_tb_people_his:
CREATE TABLE pkihtts.bv_tb_people_his
(
javaid integer NOT NULL DEFAULT nextval('pkihtts.bvsq_peoplehis_javaid'),
peopleid integer NOT NULL,
tokenid integer,
auid character varying(25) COLLATE pg_catalog."default" NOT NULL,
upn character varying(256) COLLATE pg_catalog."default" NOT NULL,
firstname character varying(64) COLLATE pg_catalog."default",
middlename character varying(64) COLLATE pg_catalog."default",
lastname character varying(64) COLLATE pg_catalog."default" NOT NULL,
genqual character varying(4) COLLATE pg_catalog."default",
agency character varying(50) COLLATE pg_catalog."default",
subagency character varying(64) COLLATE pg_catalog."default",
affiliation character varying(10) COLLATE pg_catalog."default",
subscribertype character varying(64) COLLATE pg_catalog."default",
countryofcitizenship character varying(2) COLLATE pg_catalog."default",
emailaddress character varying(256) COLLATE pg_catalog."default" NOT NULL,
dateregistered timestamp(0) without time zone NOT NULL,
comments character varying(255) COLLATE pg_catalog."default",
isdeleted character varying(1) COLLATE pg_catalog."default" NOT NULL DEFAULT 'n'::character varying,
modifiedby integer NOT NULL,
modifieddate timestamp(0) without time zone,
profileid integer,
status character varying(25) COLLATE pg_catalog."default",
component character varying(100) COLLATE pg_catalog."default",
role character varying(15) COLLATE pg_catalog."default",
componentid integer,
CONSTRAINT bv_pk_peoplehis_javaid PRIMARY KEY (javaid)
USING INDEX TABLESPACE pkihtts_data
)
TABLESPACE pkihtts_data;
ALTER TABLE pkihtts.tb_people_his
OWNER to "pkihtts";
tb_token:
CREATE TABLE pkihtts.bv_tb_token
(
tokenid integer NOT NULL DEFAULT nextval('pkihtts.bvsq_tokenid'::regclass),
internalshipmentid integer,
tokenassignmentid integer,
tokenserialno character varying(25) COLLATE pg_catalog."default" NOT NULL,
alphakey character varying(10) COLLATE pg_catalog."default",
statusdate timestamp without time zone NOT NULL,
comments character varying(255) COLLATE pg_catalog."default",
isdeleted character varying(1) COLLATE pg_catalog."default" NOT NULL DEFAULT 'n'::character varying,
modifiedby integer NOT NULL,
modifieddate timestamp without time zone,
identity_verified character varying(1) COLLATE pg_catalog."default",
new_token_required character varying(1) COLLATE pg_catalog."default",
isreassigned smallint NOT NULL DEFAULT 0,
identityverified smallint NOT NULL DEFAULT 0,
newtokenrequired smallint NOT NULL DEFAULT 0,
status character varying(25) COLLATE pg_catalog."default",
prevstatus character varying(25) COLLATE pg_catalog."default",
reason character varying(25) COLLATE pg_catalog."default",
CONSTRAINT bv_pk_tokenid PRIMARY KEY (tokenid)
USING INDEX TABLESPACE pkihtts_data
)
TABLESPACE pkihtts_data;
ALTER TABLE pkihtts.bv_tb_token
OWNER to pkihtts;
sequences:
peopleid sequence:
CREATE SEQUENCE pkihtts.bvsq_peopleid
INCREMENT 1
START 50
MINVALUE 1
MAXVALUE 9223372036854775807
CACHE 1;
ALTER SEQUENCE pkihtts.bvsq_peopleid
OWNER TO "pkihtts";
javaid sequence:
CREATE SEQUENCE pkihtts.bvsq_peoplehis_javaid
INCREMENT 1
START 50
MINVALUE 1
MAXVALUE 9223372036854775807
CACHE 1;
ALTER SEQUENCE pkihtts.bvsq_peoplehis_javaid
OWNER TO "pkihtts";
here are my functions/triggers:
function tb_people_upd:
CREATE FUNCTION pkihtts.bvtrigger_fct_tr_people_upd()
RETURNS trigger
LANGUAGE 'plpgsql'
COST 100
VOLATILE NOT LEAKPROOF
AS $BODY$
DECLARE
v_ErrorCode int;
v_ErrorMsg varchar(512);
v_Module varchar(31) = 'BVTR_PEOPLE_UPD';
v_Os_User varchar(30);
v_Host varchar(40);
BEGIN
---
-- Copy the record from tb_people_his to tb_peop;e
---
INSERT INTO pkihtts.bv_tb_people_his (
peopleid,
role,
status,
tokenid,
auid,
upn,
firstname,
middlename,
lastname,
genqual,
agency,
subagency,
affiliation,
subscribertype,
countryofcitizenship,
emailaddress,
dateregistered,
comments,
isdeleted,
modifiedby,
modifieddate,
profileid,
componentid
)
VALUES (
old.peopleid,
old.role,
old.status,
old.tokenid,
old.auid,
old.upn,
old.firstname,
old.middlename,
old.lastname,
old.genqual,
old.agency,
old.subagency,
old.affiliation,
old.subscribertype,
old.countryofcitizenship,
old.emailaddress,
old.dateregistered,
old.comments,
old.isdeleted,
old.modifiedby,
old.modifieddate.
old.profileid,
old.componentid
)
;
RETURN NEW;
---
-- Exception error handler
---
exception
when others then
v_ErrorCode := SQLSTATE;
v_ErrorMsg := SQLERRM;
v_Os_User := CURRENT_USER;
v_Host := inet_server_addr();
INSERT INTO pkihtts.bv_tb_errorlog( tstamp, os_user, host, module, errorcode, errormsg )
VALUES ( current_timestamp, v_Os_User, v_Host, v_Module, v_ErrorCode, v_ErrorMsg );
RETURN NEW;
END;
$BODY$;
CREATE TRIGGER bvtr_people_upd
BEFORE UPDATE
ON pkihtts.bv_tb_people
FOR EACH ROW
EXECUTE PROCEDURE pkihtts.bvtrigger_fct_tr_people_upd();
tb_people_ins:
CREATE FUNCTION pkihtts.bvtrigger_fct_tr_people_ins()
RETURNS trigger
LANGUAGE 'plpgsql'
COST 100
VOLATILE NOT LEAKPROOF
AS $BODY$
DECLARE
v_peopleid int = 0;
v_ErrorCode int;
v_ErrorMsg varchar(512);
v_Module varchar(31) = 'TR_PEOPLE_INS';
v_Os_User varchar(30);
v_Host varchar(40);
BEGIN
if TG_OP = 'INSERT' then
IF new.peopleid IS null THEN
SELECT nextval('pkihtts."bvsq_peopleid"') INTO v_peopleid;
new.peopleid := v_peopleid;
END IF;
IF new.modifieddate is null then
new.modifieddate := CURRENT_TIMESTAMP;
END IF;
RETURN NEW;
END IF;
---
-- Exception error handler
---
exception
when others then
v_ErrorCode := SQLSTATE;
v_ErrorMsg := SQLERRM;
v_Os_User := CURRENT_USER;
v_Host := inet_server_addr();
INSERT INTO pkihtts.bv_tb_errorlog( tstamp, os_user, host, module, errorcode, errormsg )
VALUES ( current_timestamp, v_Os_User, v_Host, v_Module, v_ErrorCode, v_ErrorMsg );
RETURN NEW;
END;
$BODY$;
CREATE TRIGGER bvtr_people_ins
BEFORE INSERT
ON pkihtts.bv_tb_people
FOR EACH ROW
EXECUTE PROCEDURE pkihtts.bvtrigger_fct_tr_people_ins();
trigger tb_people_his_ins:
CREATE FUNCTION pkihtts.bvtrigger_fct_tr_peoplehis_ins()
RETURNS trigger
LANGUAGE 'plpgsql'
COST 100
VOLATILE NOT LEAKPROOF
AS $BODY$
DECLARE
v_javaid int = 0;
v_ErrorCode int;
v_ErrorMsg varchar(512);
v_Module varchar(31) = 'BVTR_PEOPLEHIS_INS';
v_Os_User varchar(30);
v_Host varchar(40);
BEGIN
if TG_OP = 'INSERT' then
IF new.javaid IS null THEN
SELECT nextval('pkihtts."bvsq_peoplehis_javaid"') INTO v_javaid;
new.javaid := v_javaid;
END IF;
RETURN NEW;
END IF;
---
-- Exception error handler
---
exception
when others then
v_ErrorCode := SQLSTAT;
v_ErrorMsg := SQLERRM;
v_Os_User := CURRENT_USER;
v_Host := inet_server_addr();
INSERT INTO pkihtts.bv_tb_errorlog( tstamp, os_user, host, module, errorcode, errormsg )
VALUES ( current_timestamp, v_Os_User, v_Host, v_Module, v_ErrorCode, v_ErrorMsg );
RETURN NEW;
END;
$BODY$;
CREATE TRIGGER bvtr_peoplehis_ins
BEFORE INSERT
ON pkihtts.bv_tb_people_his
FOR EACH ROW
EXECUTE PROCEDURE pkihtts.bvtrigger_fct_tr_peoplehis_ins();
the max for javaid and peopleid sequences are both 50 and I altered the current value at 51.
select max(peopleid) from pkihtts.bv_tb_people; 50
select max(javaid) from pkihtts.bv_tb_people_his; 50
when I update tb_people record, I keep getting the 22P02 error in AWS only
UPDATE pkihtts.bv_tb_people
SET lastname='TEST'
WHERE tokenid='49';
ERROR: invalid input syntax for integer: "0A000"
CONTEXT: PL/pgSQL function bvtrigger_fct_tr_people_upd() line 75 at assignment
SQL state: 22P02
but it updated fine in REL staging server and it didn't insert into the history table (tb_people_his) when I update tb_people record.
why the error popping in AWS not in Redhat? How can I resolve the problem?

because AWS has non-numeric error codes and you are trying to save a non numeric value("0A000") into an int variable v_ErrorCode

Related

Is there any solution for error problems when uploading csv file like this one I experienced?

So I already write code like this :
-- SCHEMA: Portofolio2
-- DROP SCHEMA IF EXISTS "Portofolio2" ;
CREATE SCHEMA IF NOT EXISTS "Portofolio2"
AUTHORIZATION postgres;
-- Table: Portofolio2.vidgames
-- DROP TABLE IF EXISTS "Portofolio2"."vidgames";
CREATE TABLE IF NOT EXISTS "Portofolio2"."vidgames"
(
index character varying COLLATE pg_catalog."default",
Rank character varying COLLATE pg_catalog."default",
Game_Title character varying COLLATE pg_catalog."default",
Platform character varying COLLATE pg_catalog."default",
Year character varying COLLATE pg_catalog."default",
Genre character varying COLLATE pg_catalog."default",
Publisher character varying COLLATE pg_catalog."default",
North_America character varying COLLATE pg_catalog."default",
Europe character varying COLLATE pg_catalog."default",
Japan character varying COLLATE pg_catalog."default",
Rest_of_World character varying COLLATE pg_catalog."default",
Global character varying COLLATE pg_catalog."default",
Review character varying COLLATE pg_catalog."default"
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE IF EXISTS "Portofolio2"."vidgames"
OWNER to postgres;
Copy "Portofolio2"."vidgames" ("index","rank","game_title","platform","year","genre","publisher","north_america","europe","japan","rest_of_world","global","review") from 'C:\Users\Admin\Downloads\Portofolio\Video Games Sales\Video Games Sales.csv' WITH DELIMITER ',' CSV HEADER QUOTE '''' ;
But the error exist :
NOTICE: schema "Portofolio2" already exists, skipping
NOTICE: relation "vidgames" already exists, skipping
ERROR: extra data after last expected column
CONTEXT: COPY vidgames, line 1261: ""1259,1260,""WarioWare, Inc.: Mega MicroGame$"",GBA,2003.0,Puzzle,Nintendo,0.4,0.11,0.7,0.02,1.23,76..."
SQL state: 22P04
Anyone can explain why ?

type "hstore" is only a shell

I am trying to setup automatic audit Logging in Postgres Using Triggers and Trigger Functions. For this i want to create the table logged_actions in audit schema. When i run the following query :
CREATE TABLE IF NOT EXISTS audit.logged_actions
(
event_id bigint NOT NULL DEFAULT nextval('audit.logged_actions_event_id_seq'::regclass),
schema_name text COLLATE pg_catalog."default" NOT NULL,
table_name text COLLATE pg_catalog."default" NOT NULL,
relid oid NOT NULL,
session_user_name text COLLATE pg_catalog."default",
action_tstamp_tx timestamp with time zone NOT NULL,
action_tstamp_stm timestamp with time zone NOT NULL,
action_tstamp_clk timestamp with time zone NOT NULL,
transaction_id bigint,
application_name text COLLATE pg_catalog."default",
client_addr inet,
client_port integer,
client_query text COLLATE pg_catalog."default",
action text COLLATE pg_catalog."default" NOT NULL,
row_data hstore,
changed_fields hstore,
statement_only boolean NOT NULL,
CONSTRAINT logged_actions_pkey PRIMARY KEY (event_id),
CONSTRAINT logged_actions_action_check CHECK (action = ANY (ARRAY['I'::text, 'D'::text, 'U'::text, 'T'::text]))
)
I have already created the extension "hstore" and query is not executed and error message appears stating that
ERROR: type "hstore" is only a shell LINE 17: row_data hstore
That's a cryptic way of saying the hstore extension isn't loaded. You need to create extension hstore before you can use it.
Note that jsonb more-or-less makes hstore obsolete.

ANALYZE fails on postgres 12 with COLLATE issue

PostgreSQL 12.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39), 64-bit
I have a table with definition like this:
CREATE TABLE public.e13010
(
id integer NOT NULL DEFAULT nextval('seq_e13010'::regclass),
directive integer NOT NULL DEFAULT 3,
guid uuid NOT NULL,
objectid uuid NOT NULL DEFAULT '00000000-0000-0000-0000-000000000000'::uuid,
patchid integer NOT NULL DEFAULT 0,
usercode character varying(1024) COLLATE pg_catalog."default",
documentbegindate timestamp(0) without time zone NOT NULL,
documentenddate timestamp(0) without time zone NOT NULL,
actualbegindate timestamp(0) without time zone,
actualenddate timestamp(0) without time zone,
state integer,
state_tmp integer,
statedate timestamp(0) without time zone,
lastchangedate timestamp(0) without time zone,
automaticallyadded smallint NOT NULL DEFAULT 0,
removeleft smallint NOT NULL DEFAULT 0,
removeright smallint NOT NULL DEFAULT 0,
objectdel integer NOT NULL DEFAULT 0,
recordtype integer NOT NULL DEFAULT 0,
c1783376938461729511 character varying(370) COLLATE pg_catalog."default",
c8592504141572910447 character varying(50) COLLATE pg_catalog."default",
c3151508878674189688 character varying(4000) COLLATE pg_catalog."default",
c5466475963604233069 uuid,
c2451458188274224198 character varying(14) COLLATE pg_catalog."default",
c1334152289464091201 character varying(12) COLLATE pg_catalog."default",
c8454855336468783092 character varying(50) COLLATE pg_catalog."default",
c6935852433552846825 character varying(50) COLLATE pg_catalog."default",
c4829935149242607681 character varying(10) COLLATE pg_catalog."default",
c7130228457076369754 timestamp(0) without time zone,
c7401857868825271194 character varying(100) COLLATE pg_catalog."default",
c4321258757347250429 character varying(50) COLLATE pg_catalog."default",
c7827471130056322929 character varying(100) COLLATE pg_catalog."default",
c2981260259601811850 uuid,
c3564911216734280263 uuid,
c6652429068516103637 uuid,
c6755579201904122041 uuid,
c4812253808410943281 integer,
c8041209163526932724 uuid,
c2437585203524691466 character varying(4000) COLLATE pg_catalog."default",
c4137474161116073468 character varying(4000) COLLATE pg_catalog."default",
c8768589751025308865 character varying(4000) COLLATE pg_catalog."default",
c6556487635275527483 character varying(4000) COLLATE pg_catalog."default",
CONSTRAINT pk13010 PRIMARY KEY (id)
)
TABLESPACE pg_default;
ALTER TABLE public.e13010
OWNER to postgres;
-- Index: idx_e13010_code
-- DROP INDEX public.idx_e13010_code;
CREATE INDEX idx_e13010_code
ON public.e13010 USING btree
(c1783376938461729511 COLLATE pg_catalog."default" ASC NULLS LAST)
TABLESPACE pg_default;
-- Index: idx_e13010_doc
-- DROP INDEX public.idx_e13010_doc;
CREATE INDEX idx_e13010_doc
ON public.e13010 USING btree
(c8592504141572910447 COLLATE pg_catalog."default" ASC NULLS LAST, c8454855336468783092 COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate, c8041209163526932724)
TABLESPACE pg_default;
-- Index: idx_e13010_esiaid
-- DROP INDEX public.idx_e13010_esiaid;
CREATE INDEX idx_e13010_esiaid
ON public.e13010 USING btree
(c4321258757347250429 COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate)
TABLESPACE pg_default;
-- Index: idx_e13010_guid
-- DROP INDEX public.idx_e13010_guid;
CREATE UNIQUE INDEX idx_e13010_guid
ON public.e13010 USING btree
(guid ASC NULLS LAST)
TABLESPACE pg_default;
-- Index: idx_e13010_inn
-- DROP INDEX public.idx_e13010_inn;
CREATE INDEX idx_e13010_inn
ON public.e13010 USING btree
(c1334152289464091201 COLLATE pg_catalog."default" ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate, state)
TABLESPACE pg_default;
-- Index: idx_e13010_key_state
-- DROP INDEX public.idx_e13010_key_state;
CREATE INDEX idx_e13010_key_state
ON public.e13010 USING btree
(usercode COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(objectid, guid, patchid, documentbegindate, documentenddate, state_tmp)
TABLESPACE pg_default;
-- Index: idx_e13010_objectid
-- DROP INDEX public.idx_e13010_objectid;
CREATE INDEX idx_e13010_objectid
ON public.e13010 USING btree
(objectid ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(id, guid, patchid, documentbegindate, documentenddate, removeleft, removeright, objectdel, state_tmp, usercode)
TABLESPACE pg_default;
-- Index: idx_e13010_pid_key_id
-- DROP INDEX public.idx_e13010_pid_key_id;
CREATE INDEX idx_e13010_pid_key_id
ON public.e13010 USING btree
(patchid ASC NULLS LAST, usercode COLLATE pg_catalog."default" ASC NULLS LAST, id ASC NULLS LAST)
INCLUDE(guid, objectid, documentbegindate, documentenddate, state, state_tmp, automaticallyadded, directive, recordtype, objectdel)
TABLESPACE pg_default;
-- Index: idx_e13010_pid_oid_id
-- DROP INDEX public.idx_e13010_pid_oid_id;
CREATE INDEX idx_e13010_pid_oid_id
ON public.e13010 USING btree
(patchid ASC NULLS LAST, objectid ASC NULLS LAST, id ASC NULLS LAST)
INCLUDE(documentbegindate, documentenddate, removeleft, removeright, objectdel)
TABLESPACE pg_default;
-- Index: idx_e13010_select_patch_pos
-- DROP INDEX public.idx_e13010_select_patch_pos;
CREATE INDEX idx_e13010_select_patch_pos
ON public.e13010 USING btree
(patchid ASC NULLS LAST, id ASC NULLS LAST)
INCLUDE(documentbegindate, documentenddate, automaticallyadded, objectid)
TABLESPACE pg_default;
-- Index: idx_e13010_snils
-- DROP INDEX public.idx_e13010_snils;
CREATE INDEX idx_e13010_snils
ON public.e13010 USING btree
(c2451458188274224198 COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate)
TABLESPACE pg_default;
It has 300k rows within and of course have a TOAST-table. When I try to execute
ANALYZE e13010
I get an error:
ERROR: could not determine which collation to use for string comparison
HINT: Use the COLLATE clause to set the collation explicitly.
SQL state: 42P22
Please tell what the cause may affect ANALYZE this way? I think it somehow relates to TOAST-functionality but have no clue how.

postgresql unique constraint allows duplicate

I have users table like below
CREATE TABLE public.users
(
id integer NOT NULL DEFAULT nextval('users_id_seq'::regclass),
uid uuid DEFAULT (md5(((random())::text || (clock_timestamp())::text)))::uuid,
createdon timestamp without time zone DEFAULT now(),
createdby integer,
modifiedon timestamp without time zone,
modifiedby integer,
comments boolean DEFAULT false,
verified boolean DEFAULT false,
active boolean DEFAULT true,
deleted boolean DEFAULT false,
tags text[] COLLATE pg_catalog."default",
user_type user_types NOT NULL,
fullname character varying(100) COLLATE pg_catalog."default" NOT NULL,
email character varying(84) COLLATE pg_catalog."default" NOT NULL,
pword character varying(32) COLLATE pg_catalog."default",
salt character varying(32) COLLATE pg_catalog."default",
hash text COLLATE pg_catalog."default",
source character varying(100) COLLATE pg_catalog."default",
reference character varying(100) COLLATE pg_catalog."default",
CONSTRAINT users_pkey PRIMARY KEY (id),
CONSTRAINT email_unique UNIQUE (email)
,
CONSTRAINT users_createdby_fkey FOREIGN KEY (createdby)
REFERENCES public.users (id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION,
CONSTRAINT users_modifiedby_fkey FOREIGN KEY (modifiedby)
REFERENCES public.users (id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION
)
email field is set to unique
when I try to insert record twice on pgadmin, I got the error.
however, if the same query run over my nodejs app via pg library, records are inserted
what is the reason of this misoperation?
the query object that used in app:
{ text: 'INSERT INTO public.players ( createdby, user_type, fullname, email, pword, reference, source, salt, hash ) \n VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9 ) RETURNING id',
values:
[ null,
'player',
'James De Souza',
'james#desouza.com',
'4297f44b13955235245b2497399d7a93',
'organic',
'on-site',
'07ecab28a4bab8f1bf63208ac8961053',
'25571c0618c701087495069cb4e45bf4fb07197e5ff301d963b670a9e033d2044557eb46537cee27a51da8b1fd0c8987b68ad7e8e47f48a06bcb1f44e6d3678d2c875d3dd4311a506a75eabf6fff23b65deba6a202606cc6d6b42abe4b25d136faffed8bd0046620f4e10ef0b974b108b27511a42c150983e268e1f7522ad678f0699848747a9e2f4a0cafc66704915a38966fbc76647678d907ca960533a5dc4de983167fafb7807e583dd5affcc2e14900295c6f396e768a32f106a4c636be78a6df96268216bc9410373fcc2528eb7984e2cb91ae62c3c65660dc477db3c3bfeadfacb214a055a48a1e9ed0c169ee54fcc6e7b24435cb53c3596e19bedbfef2c289ffb784f6fce18b9623253260e17aca5b3d810248ece6c51d810f3b44b1eb95225d5170cde0f3c9fda8ceefd9a287016c785576264f95ee961254bc371fed8671a7497456ce439d7318f21e539ce5940bd2fd73a350fc5d139cbe06bda568663a35488ceb7c62dadf3ee6d5810b5248abe447472b9c294a13c30144271a06e10b6a7f070df5bd7e804b13b1ab541c65de65dc5b85cf3199d7b13431095aff83de6939afc2d72d187597bf8214bf45f356591f7e513e7026322a20beed430966fbd3cbe4ec2c95b54d081c032f5e2ba930019857bb63e7c631668e3f607559b4ffffc1de6c957f687930f2900fb27123aaaf5f55a06844586cee94d10757' ] }
NOTE: public.players is inherited from public.users
CREATE TABLE public.players (
"username" character varying(100) UNIQUE DEFAULT concat('player', (random() * 100000000)::int::text),
"location" int REFERENCES public.list_locations ON DELETE RESTRICT,
"address" text,
"bio" text
) INHERITS (public.users);
just realized that unique constraint not working over inherited table
is there any solution or workaround for this problem(or whatever)?

.net identityUser datetimeoffset mismatch postgresql

I'm using dotnet core with postgresql and all of a sudden (i guess there was an update to something) it all stoped working.
This is the entity my user entity is inheriting from.
Here lockoutend is DateTimeOffset? and in my postgres table:
CREATE TABLE public.users
(
id integer NOT NULL DEFAULT nextval('users_id_seq'::regclass),
accessfailedcount integer NOT NULL,
concurrencystamp character varying(255) COLLATE pg_catalog."default",
email character varying(128) COLLATE pg_catalog."default",
emailconfirmed boolean NOT NULL,
lockoutenabled boolean NOT NULL,
lockoutend timestamp with time zone,
name character varying(128) COLLATE pg_catalog."default",
normalizedemail character varying(128) COLLATE pg_catalog."default",
normalizedusername character varying(128) COLLATE pg_catalog."default",
passwordhash character varying(512) COLLATE pg_catalog."default",
phonenumber character varying(50) COLLATE pg_catalog."default",
phonenumberconfirmed boolean NOT NULL,
securitystamp character varying(255) COLLATE pg_catalog."default",
twofactorenabled boolean NOT NULL,
username character varying(50) COLLATE pg_catalog."default",
locale integer NOT NULL DEFAULT 1,
CONSTRAINT users_pkey PRIMARY KEY (id)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE public.users
OWNER to notifiedlocal;
CREATE INDEX emailindex
ON public.users USING btree
(normalizedemail COLLATE pg_catalog."default")
TABLESPACE pg_default;
CREATE UNIQUE INDEX usernameindex
ON public.users USING btree
(normalizedusername COLLATE pg_catalog."default")
TABLESPACE pg_default;
This is the error i get when i try to do a simple get from the user table:
System.InvalidOperationException: 'An exception occurred while reading a database value. The expected type was 'System.Nullable`1[System.DateTimeOffset]'
"An exception occurred while reading a database value. The expected type was
'System.Nullable`1[System.DateTimeOffset]' but the actual value was of type
'System.DateTime'."
So have i accidentally updated postgres and .net identity so that one of the sides changed?
This used to work and i haven't changed anything on purpose.
Is it possible to change the identity entity to use normal datetime instead?