PostgreSQL 12.2 on x86_64-pc-linux-gnu, compiled by gcc (GCC) 4.8.5 20150623 (Red Hat 4.8.5-39), 64-bit
I have a table with definition like this:
CREATE TABLE public.e13010
(
id integer NOT NULL DEFAULT nextval('seq_e13010'::regclass),
directive integer NOT NULL DEFAULT 3,
guid uuid NOT NULL,
objectid uuid NOT NULL DEFAULT '00000000-0000-0000-0000-000000000000'::uuid,
patchid integer NOT NULL DEFAULT 0,
usercode character varying(1024) COLLATE pg_catalog."default",
documentbegindate timestamp(0) without time zone NOT NULL,
documentenddate timestamp(0) without time zone NOT NULL,
actualbegindate timestamp(0) without time zone,
actualenddate timestamp(0) without time zone,
state integer,
state_tmp integer,
statedate timestamp(0) without time zone,
lastchangedate timestamp(0) without time zone,
automaticallyadded smallint NOT NULL DEFAULT 0,
removeleft smallint NOT NULL DEFAULT 0,
removeright smallint NOT NULL DEFAULT 0,
objectdel integer NOT NULL DEFAULT 0,
recordtype integer NOT NULL DEFAULT 0,
c1783376938461729511 character varying(370) COLLATE pg_catalog."default",
c8592504141572910447 character varying(50) COLLATE pg_catalog."default",
c3151508878674189688 character varying(4000) COLLATE pg_catalog."default",
c5466475963604233069 uuid,
c2451458188274224198 character varying(14) COLLATE pg_catalog."default",
c1334152289464091201 character varying(12) COLLATE pg_catalog."default",
c8454855336468783092 character varying(50) COLLATE pg_catalog."default",
c6935852433552846825 character varying(50) COLLATE pg_catalog."default",
c4829935149242607681 character varying(10) COLLATE pg_catalog."default",
c7130228457076369754 timestamp(0) without time zone,
c7401857868825271194 character varying(100) COLLATE pg_catalog."default",
c4321258757347250429 character varying(50) COLLATE pg_catalog."default",
c7827471130056322929 character varying(100) COLLATE pg_catalog."default",
c2981260259601811850 uuid,
c3564911216734280263 uuid,
c6652429068516103637 uuid,
c6755579201904122041 uuid,
c4812253808410943281 integer,
c8041209163526932724 uuid,
c2437585203524691466 character varying(4000) COLLATE pg_catalog."default",
c4137474161116073468 character varying(4000) COLLATE pg_catalog."default",
c8768589751025308865 character varying(4000) COLLATE pg_catalog."default",
c6556487635275527483 character varying(4000) COLLATE pg_catalog."default",
CONSTRAINT pk13010 PRIMARY KEY (id)
)
TABLESPACE pg_default;
ALTER TABLE public.e13010
OWNER to postgres;
-- Index: idx_e13010_code
-- DROP INDEX public.idx_e13010_code;
CREATE INDEX idx_e13010_code
ON public.e13010 USING btree
(c1783376938461729511 COLLATE pg_catalog."default" ASC NULLS LAST)
TABLESPACE pg_default;
-- Index: idx_e13010_doc
-- DROP INDEX public.idx_e13010_doc;
CREATE INDEX idx_e13010_doc
ON public.e13010 USING btree
(c8592504141572910447 COLLATE pg_catalog."default" ASC NULLS LAST, c8454855336468783092 COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate, c8041209163526932724)
TABLESPACE pg_default;
-- Index: idx_e13010_esiaid
-- DROP INDEX public.idx_e13010_esiaid;
CREATE INDEX idx_e13010_esiaid
ON public.e13010 USING btree
(c4321258757347250429 COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate)
TABLESPACE pg_default;
-- Index: idx_e13010_guid
-- DROP INDEX public.idx_e13010_guid;
CREATE UNIQUE INDEX idx_e13010_guid
ON public.e13010 USING btree
(guid ASC NULLS LAST)
TABLESPACE pg_default;
-- Index: idx_e13010_inn
-- DROP INDEX public.idx_e13010_inn;
CREATE INDEX idx_e13010_inn
ON public.e13010 USING btree
(c1334152289464091201 COLLATE pg_catalog."default" ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate, state)
TABLESPACE pg_default;
-- Index: idx_e13010_key_state
-- DROP INDEX public.idx_e13010_key_state;
CREATE INDEX idx_e13010_key_state
ON public.e13010 USING btree
(usercode COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(objectid, guid, patchid, documentbegindate, documentenddate, state_tmp)
TABLESPACE pg_default;
-- Index: idx_e13010_objectid
-- DROP INDEX public.idx_e13010_objectid;
CREATE INDEX idx_e13010_objectid
ON public.e13010 USING btree
(objectid ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(id, guid, patchid, documentbegindate, documentenddate, removeleft, removeright, objectdel, state_tmp, usercode)
TABLESPACE pg_default;
-- Index: idx_e13010_pid_key_id
-- DROP INDEX public.idx_e13010_pid_key_id;
CREATE INDEX idx_e13010_pid_key_id
ON public.e13010 USING btree
(patchid ASC NULLS LAST, usercode COLLATE pg_catalog."default" ASC NULLS LAST, id ASC NULLS LAST)
INCLUDE(guid, objectid, documentbegindate, documentenddate, state, state_tmp, automaticallyadded, directive, recordtype, objectdel)
TABLESPACE pg_default;
-- Index: idx_e13010_pid_oid_id
-- DROP INDEX public.idx_e13010_pid_oid_id;
CREATE INDEX idx_e13010_pid_oid_id
ON public.e13010 USING btree
(patchid ASC NULLS LAST, objectid ASC NULLS LAST, id ASC NULLS LAST)
INCLUDE(documentbegindate, documentenddate, removeleft, removeright, objectdel)
TABLESPACE pg_default;
-- Index: idx_e13010_select_patch_pos
-- DROP INDEX public.idx_e13010_select_patch_pos;
CREATE INDEX idx_e13010_select_patch_pos
ON public.e13010 USING btree
(patchid ASC NULLS LAST, id ASC NULLS LAST)
INCLUDE(documentbegindate, documentenddate, automaticallyadded, objectid)
TABLESPACE pg_default;
-- Index: idx_e13010_snils
-- DROP INDEX public.idx_e13010_snils;
CREATE INDEX idx_e13010_snils
ON public.e13010 USING btree
(c2451458188274224198 COLLATE pg_catalog."default" ASC NULLS LAST, state ASC NULLS LAST)
INCLUDE(usercode, documentbegindate, documentenddate)
TABLESPACE pg_default;
It has 300k rows within and of course have a TOAST-table. When I try to execute
ANALYZE e13010
I get an error:
ERROR: could not determine which collation to use for string comparison
HINT: Use the COLLATE clause to set the collation explicitly.
SQL state: 42P22
Please tell what the cause may affect ANALYZE this way? I think it somehow relates to TOAST-functionality but have no clue how.
Related
I have two different databases containing a table called feed_items
Lets call them source and destination.
I want to copy some 250k rows from the source feed_items table to the destination feed_items table
Structure of source feed_items table
CREATE TABLE IF NOT EXISTS public.feed_items
(
feed_item_id uuid NOT NULL,
pubdate timestamp with time zone NOT NULL DEFAULT CURRENT_TIMESTAMP,
link character varying COLLATE pg_catalog."default" NOT NULL,
guid character varying COLLATE pg_catalog."default",
title text COLLATE pg_catalog."default" NOT NULL,
summary text COLLATE pg_catalog."default",
content text COLLATE pg_catalog."default",
author character varying(63) COLLATE pg_catalog."default",
feed_id integer NOT NULL,
tags character varying(127)[] COLLATE pg_catalog."default" DEFAULT '{}'::character varying[],
title_vector tsvector,
summary_vector tsvector,
CONSTRAINT feed_items_pkey PRIMARY KEY (feed_item_id),
CONSTRAINT fkey_feed_item_feed FOREIGN KEY (feed_id)
REFERENCES public.feeds (feed_id) MATCH SIMPLE
ON UPDATE CASCADE
ON DELETE CASCADE
)
TABLESPACE pg_default;
ALTER TABLE IF EXISTS public.feed_items
OWNER to ch_v3_root;
CREATE INDEX IF NOT EXISTS idx_feed_items_pubdate_feed_item_id_desc
ON public.feed_items USING btree
(pubdate DESC NULLS FIRST, feed_item_id DESC NULLS FIRST)
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS idx_summary_vector
ON public.feed_items USING gin
(summary_vector)
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS idx_tags_array
ON public.feed_items USING gin
(tags COLLATE pg_catalog."default")
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS idx_title_vector
ON public.feed_items USING gin
(title_vector)
TABLESPACE pg_default;
CREATE TRIGGER on_insert_feed_items
AFTER INSERT
ON public.feed_items
FOR EACH ROW
EXECUTE FUNCTION public.notify_change_feed_items();
CREATE TRIGGER on_update_feed_items
AFTER UPDATE
ON public.feed_items
FOR EACH ROW
WHEN (new.title <> old.title OR new.summary <> old.summary OR new.content <> old.content)
EXECUTE FUNCTION public.notify_change_feed_items();
CREATE TRIGGER trigger_update_summary_vector
BEFORE INSERT OR UPDATE
ON public.feed_items
FOR EACH ROW
EXECUTE FUNCTION tsvector_update_trigger('summary_vector', 'pg_catalog.english', 'summary');
CREATE TRIGGER trigger_update_title_vector
BEFORE INSERT OR UPDATE
ON public.feed_items
FOR EACH ROW
EXECUTE FUNCTION tsvector_update_trigger('title_vector', 'pg_catalog.english', 'title');
Structure of the destination feed_items table
CREATE TABLE IF NOT EXISTS public.feed_items
(
id uuid NOT NULL,
author character varying(255) COLLATE pg_catalog."default",
content text COLLATE pg_catalog."default",
guid character varying(2047) COLLATE pg_catalog."default",
link character varying(2047) COLLATE pg_catalog."default" NOT NULL,
pubdate timestamp with time zone NOT NULL DEFAULT now(),
searchable tsvector GENERATED ALWAYS AS (((setweight(to_tsvector('english'::regconfig, COALESCE(title, (''::character varying)::text)), 'A'::"char") || setweight(to_tsvector('english'::regconfig, COALESCE(summary, ''::text)), 'B'::"char")) || setweight(to_tsvector('english'::regconfig, COALESCE(content, (''::character varying)::text)), 'C'::"char"))) STORED,
summary text COLLATE pg_catalog."default",
tags character varying(255)[] COLLATE pg_catalog."default" NOT NULL DEFAULT (ARRAY[]::character varying[])::character varying(255)[],
title text COLLATE pg_catalog."default" NOT NULL,
feed integer NOT NULL,
CONSTRAINT feed_items_pkey PRIMARY KEY (id),
CONSTRAINT feed_items_link_key UNIQUE (link),
CONSTRAINT feed_items_feed_fkey FOREIGN KEY (feed)
REFERENCES public.feeds (id) MATCH SIMPLE
ON UPDATE CASCADE
ON DELETE CASCADE
)
TABLESPACE pg_default;
ALTER TABLE IF EXISTS public.feed_items
OWNER to ch_api_user;
CREATE INDEX IF NOT EXISTS feed_items_pubdate_id
ON public.feed_items USING btree
(pubdate DESC NULLS FIRST, id DESC NULLS FIRST)
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS feed_items_searchable
ON public.feed_items USING gin
(searchable)
TABLESPACE pg_default;
CREATE INDEX IF NOT EXISTS feed_items_tags
ON public.feed_items USING gin
(tags COLLATE pg_catalog."default")
TABLESPACE pg_default;
The names of a few columns have changed, the order of columns has changed and the table is assigned to a different database user. How do I pg_restore from source to destination?
I have a table that stores logs from an Electronic Invoicing System webservice, this is my SQL Structure
CREATE TABLE public.eis_transactions
(
id bigint NOT NULL DEFAULT nextval('eis_transactions_id_seq'::regclass),
operation_type character varying COLLATE pg_catalog."default",
sale_id integer,
delivery_note_id integer,
sale_credit_note_id integer,
debit_note_id integer,
cdc text COLLATE pg_catalog."default",
transaction_id text COLLATE pg_catalog."default",
response_code character varying COLLATE pg_catalog."default",
response_description text COLLATE pg_catalog."default",
xml text COLLATE pg_catalog."default",
response_xml text COLLATE pg_catalog."default",
response_datetime timestamp without time zone,
created timestamp without time zone,
modified timestamp without time zone,
user_id integer,
async boolean DEFAULT false,
url character varying COLLATE pg_catalog."default",
final_xml text COLLATE pg_catalog."default",
CONSTRAINT eis_transactions_pkey PRIMARY KEY (id),
CONSTRAINT eis_transactions_debit_note_id_fkey FOREIGN KEY (debit_note_id)
REFERENCES public.debit_notes (id) MATCH SIMPLE
ON UPDATE RESTRICT
ON DELETE RESTRICT,
CONSTRAINT eis_transactions_delivery_note_id_fkey FOREIGN KEY (delivery_note_id)
REFERENCES public.delivery_notes (id) MATCH SIMPLE
ON UPDATE RESTRICT
ON DELETE RESTRICT,
CONSTRAINT eis_transactions_sale_credit_note_id_fkey FOREIGN KEY (sale_credit_note_id)
REFERENCES public.sale_credit_notes (id) MATCH SIMPLE
ON UPDATE RESTRICT
ON DELETE RESTRICT,
CONSTRAINT eis_transactions_sale_id_fkey FOREIGN KEY (sale_id)
REFERENCES public.sales (id) MATCH SIMPLE
ON UPDATE RESTRICT
ON DELETE RESTRICT,
CONSTRAINT eis_transactions_user_id_fkey FOREIGN KEY (user_id)
REFERENCES public.users (id) MATCH SIMPLE
ON UPDATE RESTRICT
ON DELETE RESTRICT
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE public.eis_transactions
OWNER to postgres;
-- Index: eis_transactions_id_idx
-- DROP INDEX public.eis_transactions_id_idx;
CREATE INDEX eis_transactions_id_idx
ON public.eis_transactions USING btree
(id ASC NULLS LAST)
TABLESPACE pg_default;
-- Index: eis_transactions_id_idx1
-- DROP INDEX public.eis_transactions_id_idx1;
CREATE INDEX eis_transactions_id_idx1
ON public.eis_transactions USING btree
(id ASC NULLS FIRST)
TABLESPACE pg_default;
-- Index: eis_transactions_id_idx2
-- DROP INDEX public.eis_transactions_id_idx2;
CREATE INDEX eis_transactions_id_idx2
ON public.eis_transactions USING btree
(id DESC NULLS FIRST)
TABLESPACE pg_default;
-- Index: eis_transactions_sale_id_delivery_note_id_sale_credit_note__idx
-- DROP INDEX public.eis_transactions_sale_id_delivery_note_id_sale_credit_note__idx;
CREATE INDEX eis_transactions_sale_id_delivery_note_id_sale_credit_note__idx
ON public.eis_transactions USING btree
(sale_id ASC NULLS LAST, delivery_note_id ASC NULLS LAST, sale_credit_note_id ASC NULLS LAST, debit_note_id ASC NULLS LAST, user_id ASC NULLS LAST)
TABLESPACE pg_default;
Cointains ~800 rows, this is the query:
SELECT * FROM eis_transactions LIMIT 1000;
It takes more than 60 seconds to complete the query.
And this is the EXPLAIN ANALYZE result i got:
EXPLAIN (ANALYZE, BUFFERS) SELECT * FROM eis_transactions LIMIT 100;
Limit (cost=0.00..15.94 rows=100 width=1108) (actual time=0.013..0.121 rows=100 loops=1)
Buffers: shared read=15
-> Seq Scan on eis_transactions (cost=0.00..128.03 rows=803 width=1108) (actual time=0.012..0.106 rows=100 loops=1)
Buffers: shared read=15
Total runtime: 0.180 ms
But doing a SELECT * FROM eis_transactions (With or without LIMIT) will take more than 60 seconds. While i have other tables with more than 1000 and they don't take so long as this particular table.
What could be wrong ?
Thank you !
I have users table like below
CREATE TABLE public.users
(
id integer NOT NULL DEFAULT nextval('users_id_seq'::regclass),
uid uuid DEFAULT (md5(((random())::text || (clock_timestamp())::text)))::uuid,
createdon timestamp without time zone DEFAULT now(),
createdby integer,
modifiedon timestamp without time zone,
modifiedby integer,
comments boolean DEFAULT false,
verified boolean DEFAULT false,
active boolean DEFAULT true,
deleted boolean DEFAULT false,
tags text[] COLLATE pg_catalog."default",
user_type user_types NOT NULL,
fullname character varying(100) COLLATE pg_catalog."default" NOT NULL,
email character varying(84) COLLATE pg_catalog."default" NOT NULL,
pword character varying(32) COLLATE pg_catalog."default",
salt character varying(32) COLLATE pg_catalog."default",
hash text COLLATE pg_catalog."default",
source character varying(100) COLLATE pg_catalog."default",
reference character varying(100) COLLATE pg_catalog."default",
CONSTRAINT users_pkey PRIMARY KEY (id),
CONSTRAINT email_unique UNIQUE (email)
,
CONSTRAINT users_createdby_fkey FOREIGN KEY (createdby)
REFERENCES public.users (id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION,
CONSTRAINT users_modifiedby_fkey FOREIGN KEY (modifiedby)
REFERENCES public.users (id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION
)
email field is set to unique
when I try to insert record twice on pgadmin, I got the error.
however, if the same query run over my nodejs app via pg library, records are inserted
what is the reason of this misoperation?
the query object that used in app:
{ text: 'INSERT INTO public.players ( createdby, user_type, fullname, email, pword, reference, source, salt, hash ) \n VALUES ( $1, $2, $3, $4, $5, $6, $7, $8, $9 ) RETURNING id',
values:
[ null,
'player',
'James De Souza',
'james#desouza.com',
'4297f44b13955235245b2497399d7a93',
'organic',
'on-site',
'07ecab28a4bab8f1bf63208ac8961053',
'25571c0618c701087495069cb4e45bf4fb07197e5ff301d963b670a9e033d2044557eb46537cee27a51da8b1fd0c8987b68ad7e8e47f48a06bcb1f44e6d3678d2c875d3dd4311a506a75eabf6fff23b65deba6a202606cc6d6b42abe4b25d136faffed8bd0046620f4e10ef0b974b108b27511a42c150983e268e1f7522ad678f0699848747a9e2f4a0cafc66704915a38966fbc76647678d907ca960533a5dc4de983167fafb7807e583dd5affcc2e14900295c6f396e768a32f106a4c636be78a6df96268216bc9410373fcc2528eb7984e2cb91ae62c3c65660dc477db3c3bfeadfacb214a055a48a1e9ed0c169ee54fcc6e7b24435cb53c3596e19bedbfef2c289ffb784f6fce18b9623253260e17aca5b3d810248ece6c51d810f3b44b1eb95225d5170cde0f3c9fda8ceefd9a287016c785576264f95ee961254bc371fed8671a7497456ce439d7318f21e539ce5940bd2fd73a350fc5d139cbe06bda568663a35488ceb7c62dadf3ee6d5810b5248abe447472b9c294a13c30144271a06e10b6a7f070df5bd7e804b13b1ab541c65de65dc5b85cf3199d7b13431095aff83de6939afc2d72d187597bf8214bf45f356591f7e513e7026322a20beed430966fbd3cbe4ec2c95b54d081c032f5e2ba930019857bb63e7c631668e3f607559b4ffffc1de6c957f687930f2900fb27123aaaf5f55a06844586cee94d10757' ] }
NOTE: public.players is inherited from public.users
CREATE TABLE public.players (
"username" character varying(100) UNIQUE DEFAULT concat('player', (random() * 100000000)::int::text),
"location" int REFERENCES public.list_locations ON DELETE RESTRICT,
"address" text,
"bio" text
) INHERITS (public.users);
just realized that unique constraint not working over inherited table
is there any solution or workaround for this problem(or whatever)?
I am dealing with a weird issue where a date based query runs much slower when using >= vs <=. The execution plans are here:
Slow
Fast
It looks like when it is doing the slow one, it does 3 nested loops and when it is doing the fast one it does a join but I don't get why. I've done vacuum, analyze etc to no result.
Here are the SQLs too
-- Table: public.hfj_spidx_date
-- DROP TABLE public.hfj_spidx_date;
CREATE TABLE public.hfj_spidx_date
(
sp_id bigint NOT NULL,
sp_missing boolean,
sp_name character varying(100) COLLATE pg_catalog."default" NOT NULL,
res_id bigint,
res_type character varying(255) COLLATE pg_catalog."default" NOT NULL,
sp_updated timestamp without time zone,
hash_identity bigint,
sp_value_high timestamp without time zone,
sp_value_low timestamp without time zone,
CONSTRAINT hfj_spidx_date_pkey PRIMARY KEY (sp_id),
CONSTRAINT fk17s70oa59rm9n61k9thjqrsqm FOREIGN KEY (res_id)
REFERENCES public.hfj_resource (res_id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE public.hfj_spidx_date
OWNER to dbadmin;
-- Index: idx_sp_date_hash
-- DROP INDEX public.idx_sp_date_hash;
CREATE INDEX idx_sp_date_hash
ON public.hfj_spidx_date USING btree
(hash_identity, sp_value_low, sp_value_high)
TABLESPACE pg_default;
-- Index: idx_sp_date_resid
-- DROP INDEX public.idx_sp_date_resid;
CREATE INDEX idx_sp_date_resid
ON public.hfj_spidx_date USING btree
(res_id)
TABLESPACE pg_default;
-- Index: idx_sp_date_updated
-- DROP INDEX public.idx_sp_date_updated;
CREATE INDEX idx_sp_date_updated
ON public.hfj_spidx_date USING btree
(sp_updated)
TABLESPACE pg_default;
-------------------------------------
-- Table: public.hfj_res_link
-- DROP TABLE public.hfj_res_link;
CREATE TABLE public.hfj_res_link
(
pid bigint NOT NULL,
src_path character varying(200) COLLATE pg_catalog."default" NOT NULL,
src_resource_id bigint NOT NULL,
source_resource_type character varying(30) COLLATE pg_catalog."default" NOT NULL,
target_resource_id bigint,
target_resource_type character varying(30) COLLATE pg_catalog."default" NOT NULL,
target_resource_url character varying(200) COLLATE pg_catalog."default",
sp_updated timestamp without time zone,
CONSTRAINT hfj_res_link_pkey PRIMARY KEY (pid),
CONSTRAINT fk_reslink_source FOREIGN KEY (src_resource_id)
REFERENCES public.hfj_resource (res_id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION,
CONSTRAINT fk_reslink_target FOREIGN KEY (target_resource_id)
REFERENCES public.hfj_resource (res_id) MATCH SIMPLE
ON UPDATE NO ACTION
ON DELETE NO ACTION
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE public.hfj_res_link
OWNER to dbadmin;
-- Index: idx_rl_dest
-- DROP INDEX public.idx_rl_dest;
CREATE INDEX idx_rl_dest
ON public.hfj_res_link USING btree
(target_resource_id)
TABLESPACE pg_default;
-- Index: idx_rl_src
-- DROP INDEX public.idx_rl_src;
CREATE INDEX idx_rl_src
ON public.hfj_res_link USING btree
(src_resource_id)
TABLESPACE pg_default;
-- Index: idx_rl_tpathres
-- DROP INDEX public.idx_rl_tpathres;
CREATE INDEX idx_rl_tpathres
ON public.hfj_res_link USING btree
(src_path COLLATE pg_catalog."default", target_resource_id)
TABLESPACE pg_default;
As I said in my answer to what is pretty much the same question, the problem is the bad estimate in the slow query.
In the fast query PostgreSQL obviously doesn't make the mistake to think that the condition is very selective, so it chooses a different and better plan.
I'm using dotnet core with postgresql and all of a sudden (i guess there was an update to something) it all stoped working.
This is the entity my user entity is inheriting from.
Here lockoutend is DateTimeOffset? and in my postgres table:
CREATE TABLE public.users
(
id integer NOT NULL DEFAULT nextval('users_id_seq'::regclass),
accessfailedcount integer NOT NULL,
concurrencystamp character varying(255) COLLATE pg_catalog."default",
email character varying(128) COLLATE pg_catalog."default",
emailconfirmed boolean NOT NULL,
lockoutenabled boolean NOT NULL,
lockoutend timestamp with time zone,
name character varying(128) COLLATE pg_catalog."default",
normalizedemail character varying(128) COLLATE pg_catalog."default",
normalizedusername character varying(128) COLLATE pg_catalog."default",
passwordhash character varying(512) COLLATE pg_catalog."default",
phonenumber character varying(50) COLLATE pg_catalog."default",
phonenumberconfirmed boolean NOT NULL,
securitystamp character varying(255) COLLATE pg_catalog."default",
twofactorenabled boolean NOT NULL,
username character varying(50) COLLATE pg_catalog."default",
locale integer NOT NULL DEFAULT 1,
CONSTRAINT users_pkey PRIMARY KEY (id)
)
WITH (
OIDS = FALSE
)
TABLESPACE pg_default;
ALTER TABLE public.users
OWNER to notifiedlocal;
CREATE INDEX emailindex
ON public.users USING btree
(normalizedemail COLLATE pg_catalog."default")
TABLESPACE pg_default;
CREATE UNIQUE INDEX usernameindex
ON public.users USING btree
(normalizedusername COLLATE pg_catalog."default")
TABLESPACE pg_default;
This is the error i get when i try to do a simple get from the user table:
System.InvalidOperationException: 'An exception occurred while reading a database value. The expected type was 'System.Nullable`1[System.DateTimeOffset]'
"An exception occurred while reading a database value. The expected type was
'System.Nullable`1[System.DateTimeOffset]' but the actual value was of type
'System.DateTime'."
So have i accidentally updated postgres and .net identity so that one of the sides changed?
This used to work and i haven't changed anything on purpose.
Is it possible to change the identity entity to use normal datetime instead?