Avoid conversion of compound types to text - postgresql

My compound types when returned from PLPGSQL function are converted to text:
dev=# select * from app.user_query_test(3);
user_record | udata_record
-----------------+-------------------
(3,875227490,t) | (3,3,"Bob Smith")
(1 row)
dev=#
I don't want this, I want to receive them on the client side as nested object of data, like this:
{
"user_record": {
"user_id": 3,
"identity_id": 875227490,
"utype": t
},
"udata_record": {
"udata_id": 3,
"user_id": 3,
"full_name": "Bob Smith"
}
}
But, I also don't want JSON, because decoding/encoding to JSON format will require processing time and will affect the performance of my App. So how do I achieve this? I mean how do I put the data on the Client in the exact structure as it is returned by PLPGSQL function without any decoding/encoding process?
My source files are:
DROP TYPE IF EXISTS app.user_reply_t CASCADE;
CREATE TYPE app.user_reply_t AS (
user_id integer,
identity_id integer,
utype boolean
);
DROP TYPE IF EXISTS app.udata_reply_t CASCADE;
CREATE TYPE app.udata_reply_t AS (
udata_id integer,
user_id integer,
full_name varchar(64)
);
DROP TYPE IF EXISTS app.user_info_t CASCADE;
CREATE TYPE app.user_info_t AS (
user_record app.user_reply_t,
udata_record app.udata_reply_t
);
CREATE OR REPLACE FUNCTION app.user_query_test(p_user_id integer)
RETURNS app.user_info_t AS
$$
DECLARE
rec app.user_info_t;
BEGIN
SELECT user_id,identity_id,utype FROM "comp-158572724".users WHERE user_id=p_user_id INTO rec.user_record;
SELECT udata_id,user_id,full_name FROM "comp-158572724".udata WHERE user_id=p_user_id INTO rec.udata_record;
RETURN rec;
END;
$$ LANGUAGE plpgsql;
Tested with Node.js:
src $ node usertest.js
result={ command: 'SELECT',
rowCount: 1,
rows:
[ { user_record: '(3,875227490,t)',
udata_record: '(3,3,"Bob Smith")' } ],
fields:
[ { name: 'user_record', dataTypeID: 19862 },
{ name: 'udata_record', dataTypeID: 19865 } ] }
^C
src $
Source of Client code:
src $ cat usertest.js
const util = require('util');
pg = require('pg').native
var Pool = pg.Pool
var Client = pg.Client
var pool=new Pool({
user: 'dev_user',
password: 'dev',
host: 'localhost',
database: 'dev'
});
pool.query('select * from app.user_query_test(3)',function(err, result) {
console.log('result=' + util.inspect(result));
}
);
function wait() {
console.log('wating...');
setTimeout(wait,3000);
}
setTimeout(wait,3000);
src $

Related

How to insert 'NULL' values for 'int' column tupes in Aurora PostgreSQL db using Python boto3 client

I have a CSV file (MS SQL server table export) and I would like to import it to Aurora Serverless PostgreSQL database table. I did a basic preprocessing of the CSV file to replace all of the NULL values in it (i.e. '') to "NULL". The file looks like that:
CSV file:
ID,DRAW_WORKS
10000002,NULL
10000005,NULL
10000004,FLEXRIG3
10000003,FLEXRIG3
The PostgreSQL table has the following schema:
CREATE TABLE T_RIG_ACTIVITY_STATUS_DATE (
ID varchar(20) NOT NULL,
DRAW_WORKS_RATING int NULL
)
The code I am using to read and insert the CSV file is the following:
import boto3
import csv
rds_client = boto3.client('rds-data')
...
def batch_execute_statement(sql, sql_parameter_sets, transaction_id=None):
parameters = {
'secretArn': db_credentials_secrets_store_arn,
'database': database_name,
'resourceArn': db_cluster_arn,
'sql': sql,
'parameterSets': sql_parameter_sets
}
if transaction_id is not None:
parameters['transactionId'] = transaction_id
response = rds_client.batch_execute_statement(**parameters)
return response
transaction = rds_client.begin_transaction(
secretArn=db_credentials_secrets_store_arn,
resourceArn=db_cluster_arn,
database=database_name)
sql = 'INSERT INTO T_RIG_ACTIVITY_STATUS_DATE VALUES (:ID, :DRAW_WORKS);'
parameter_set = []
with open('test.csv', 'r') as file:
reader = csv.DictReader(file, delimiter=',')
for row in reader:
entry = [
{'name': 'ID','value': {'stringValue': row['RIG_ID']}},
{'name': 'DRAW_WORKS', 'value': {'longValue': row['DRAW_WORKS']}}
]
parameter_set.append(entry)
response = batch_execute_statement(
sql, parameter_set, transaction['transactionId'])
However, there is an error that gets returned suggests that there is a type mismatch:
Invalid type for parameter parameterSets[0][5].value.longValue,
value: NULL, type: <class 'str'>, valid types: <class 'int'>"
Is there a way to configure Aurora to accept NULL values for types such as int?
Reading the boto3 documentation more carefully I found that we can use isNull value set to True in case a field is NULL. The bellow code snippet shows how to insert null value to the database:
...
entry = [
{'name': 'ID','value': {'stringValue': row['ID']}}
]
if row['DRAW_WORKS'] == 'NULL':
entry.append({'name': 'DRAW_WORKS', 'value': {'isNull': True}})
else:
entry.append({'name': 'DRAW_WORKS_RATING', 'value': {'longValue': int(row['DRAW_WORKS'])}})
parameter_set.append(entry)

sequelize.findOrCreate did not save even though the table is empty

My app is saving a socket related information into a socketlist table. The idea is if the entry with the socket_id does not exist, then create an entry in postgres table. otherwise to do nothing. Here is the code:
await SocketList.findOrCreate({ where: {socket_id : socket_id, active: true}, default: {event_id: event_id, user_id: user_id, server_id: server_id}});
sequelizejs 4.42.0 is used. The socketlist table is empty but the above code throws the error below:
Executing (325bc621-414d-4e3f-9f2b-230f66537631): START TRANSACTION;
Executing (325bc621-414d-4e3f-9f2b-230f66537631): SELECT "id", "user_id", "socket_id", "event_id", "server_id", "active" FROM "socketlists" AS "socketlist" WHERE "socketlist"."socket_id" = '2TPk6DpsxPwttaemAAAA' AND "socketlist"."active" = true LIMIT 1;
Executing (325bc621-414d-4e3f-9f2b-230f66537631): CREATE OR REPLACE FUNCTION pg_temp.testfunc(OUT response "socketlists", OUT sequelize_caught_exception text) RETURNS RECORD AS $func_4fe094a05f394fe8a0ec032506b86e21$ BEGIN INSERT INTO "socketlists" ("id","socket_id","active") VALUES (NULL,'2TPk6DpsxPwttaemAAAA',true) RETURNING * INTO response; EXCEPTION WHEN unique_violation THEN GET STACKED DIAGNOSTICS sequelize_caught_exception = PG_EXCEPTION_DETAIL; END $func_4fe094a05f394fe8a0ec032506b86e21$ LANGUAGE plpgsql; SELECT (testfunc.response).*, testfunc.sequelize_caught_exception FROM pg_temp.testfunc(); DROP FUNCTION IF EXISTS pg_temp.testfunc();
Executing (325bc621-414d-4e3f-9f2b-230f66537631): COMMIT;
Socket was not saved for userId: 1 { SequelizeDatabaseError: null value in column "id" violates not-null constraint
at Query.formatError (C:\d\code\js\emps_bbone\node_modules\sequelize\lib\dialects\postgres\query.js:363:16)
at query.catch.err (C:\d\code\js\emps_bbone\node_modules\sequelize\lib\dialects\postgres\query.js:86:18)
at tryCatcher (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\util.js:16:23)
at Promise._settlePromiseFromHandler (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\promise.js:512:31)
at Promise._settlePromise (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\promise.js:569:18)
at Promise._settlePromise0 (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\promise.js:614:10)
at Promise._settlePromises (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\promise.js:690:18)
at _drainQueueStep (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\async.js:138:12)
at _drainQueue (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\async.js:131:9)
at Async._drainQueues (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\async.js:147:5)
at Immediate.Async.drainQueues [as _onImmediate] (C:\d\code\js\emps_bbone\node_modules\bluebird\js\release\async.js:17:14)
at runCallback (timers.js:705:18)
at tryOnImmediate (timers.js:676:5)
at processImmediate (timers.js:658:5)
name: 'SequelizeDatabaseError',
parent:
{ error: null value in column "id" violates not-null constraint
at Connection.parseE (C:\d\code\js\emps_bbone\node_modules\pg\lib\connection.js:601:11)
at Connection.parseMessage (C:\d\code\js\emps_bbone\node_modules\pg\lib\connection.js:398:19)
at Socket.<anonymous> (C:\d\code\js\emps_bbone\node_modules\pg\lib\connection.js:120:22)
at Socket.emit (events.js:182:13)
at addChunk (_stream_readable.js:283:12)
at readableAddChunk (_stream_readable.js:264:11)
at Socket.Readable.push (_stream_readable.js:219:10)
at TCP.onStreamRead [as onread] (internal/stream_base_commons.js:94:17)
name: 'error',
length: 465,
severity: 'ERROR',
code: '23502',
detail:
'Failing row contains (null, null, 2TPk6DpsxPwttaemAAAA, null, null, t).',
hint: undefined,
position: undefined,
internalPosition: undefined,
internalQuery: undefined,
where:
'SQL statement "INSERT INTO "socketlists" ("id","socket_id","active") VALUES (NULL,\'2TPk6DpsxPwttaemAAAA\',true) RETURNING *"\nPL/pgSQL function pg_temp_3.testfunc() line 1 at SQL statement',
schema: 'public',
table: 'socketlists',
column: 'id',
dataType: undefined,
constraint: undefined,
file:
'd:\\pginstaller.auto\\postgres.windows-x64\\src\\backend\\executor\\execmain.c',
line: '2041',
routine: 'ExecConstraints',
sql:
'CREATE OR REPLACE FUNCTION pg_temp.testfunc(OUT response "socketlists", OUT sequelize_caught_exception text) RETURNS RECORD AS $func_4fe094a05f394fe8a0ec032506b86e21$ BEGIN INSERT INTO "socketlists" ("id","socket_id","active") VALUES (NULL,\'2TPk6DpsxPwttaemAAAA\',true) RETURNING * INTO response; EXCEPTION WHEN unique_violation THEN GET STACKED DIAGNOSTICS sequelize_caught_exception = PG_EXCEPTION_DETAIL; END $func_4fe094a05f394fe8a0ec032506b86e21$ LANGUAGE plpgsql; SELECT (testfunc.response).*, testfunc.sequelize_caught_exception FROM pg_temp.testfunc(); DROP FUNCTION IF EXISTS pg_temp.testfunc();' },
original:
{ error: null value in column "id" violates not-null constraint
at Connection.parseE (C:\d\code\js\emps_bbone\node_modules\pg\lib\connection.js:601:11)
at Connection.parseMessage (C:\d\code\js\emps_bbone\node_modules\pg\lib\connection.js:398:19)
at Socket.<anonymous> (C:\d\code\js\emps_bbone\node_modules\pg\lib\connection.js:120:22)
at Socket.emit (events.js:182:13)
at addChunk (_stream_readable.js:283:12)
at readableAddChunk (_stream_readable.js:264:11)
at Socket.Readable.push (_stream_readable.js:219:10)
at TCP.onStreamRead [as onread] (internal/stream_base_commons.js:94:17)
name: 'error',
length: 465,
severity: 'ERROR',
code: '23502',
detail:
'Failing row contains (null, null, 2TPk6DpsxPwttaemAAAA, null, null, t).',
hint: undefined,
position: undefined,
internalPosition: undefined,
internalQuery: undefined,
where:
'SQL statement "INSERT INTO "socketlists" ("id","socket_id","active") VALUES (NULL,\'2TPk6DpsxPwttaemAAAA\',true) RETURNING *"\nPL/pgSQL function pg_temp_3.testfunc() line 1 at SQL statement',
schema: 'public',
table: 'socketlists',
column: 'id',
dataType: undefined,
constraint: undefined,
file:
'd:\\pginstaller.auto\\postgres.windows-x64\\src\\backend\\executor\\execmain.c',
line: '2041',
routine: 'ExecConstraints',
sql:
'CREATE OR REPLACE FUNCTION pg_temp.testfunc(OUT response "socketlists", OUT sequelize_caught_exception text) RETURNS RECORD AS $func_4fe094a05f394fe8a0ec032506b86e21$ BEGIN INSERT INTO "socketlists" ("id","socket_id","active") VALUES (NULL,\'2TPk6DpsxPwttaemAAAA\',true) RETURNING * INTO response; EXCEPTION WHEN unique_violation THEN GET STACKED DIAGNOSTICS sequelize_caught_exception = PG_EXCEPTION_DETAIL; END $func_4fe094a05f394fe8a0ec032506b86e21$ LANGUAGE plpgsql; SELECT (testfunc.response).*, testfunc.sequelize_caught_exception FROM pg_temp.testfunc(); DROP FUNCTION IF EXISTS pg_temp.testfunc();' },
sql:
'CREATE OR REPLACE FUNCTION pg_temp.testfunc(OUT response "socketlists", OUT sequelize_caught_exception text) RETURNS RECORD AS $func_4fe094a05f394fe8a0ec032506b86e21$ BEGIN INSERT INTO "socketlists" ("id","socket_id","active") VALUES (NULL,\'2TPk6DpsxPwttaemAAAA\',true) RETURNING * INTO response; EXCEPTION WHEN unique_violation THEN GET STACKED DIAGNOSTICS sequelize_caught_exception = PG_EXCEPTION_DETAIL; END $func_4fe094a05f394fe8a0ec032506b86e21$ LANGUAGE plpgsql; SELECT (testfunc.response).*, testfunc.sequelize_caught_exception FROM pg_temp.testfunc(); DROP FUNCTION IF EXISTS pg_temp.testfunc();' }
Here is the model definition:
const SocketList = db.define('socketlist', {
id: {type: Sql.INTEGER,
primaryKey:true,
min: 1
},
user_id: { type: Sql.INTEGER
},
socket_id: {type: Sql.STRING,
unique: true,
min: 1
},
event_id: {type: Sql.INTEGER,
min: 1
},
server_id: {type: Sql.STRING
},
active: {type: Sql.BOOLEAN,
defaultValue: true,
},
}....
you have to create data include id
what about try this?
id: {
type: Sql.INTEGER,
primaryKey:true,
autoIncrement: true
}

Get Error: can't acquire lock when trying to migrate with golang-migrate to postgres database

i am trying to migrate with golang to postgres database using library
"github.com/golang-migrate/migrate/v4"
I got some error like code below.
base_test.go:79: Migrating postgres://db:db#localhost:65432/testdb?sslmode=disable from file:///home/user/migrations/test
base_test.go:88: can't acquire lock
here is preparedb function for migrations, this function i called in test for test migrate:
func PrepareDB() {
if dbConnPool != nil {
return
}
log.SetFlags(log.Lshortfile)
configFile := os.Getenv("CONFIG_FILE")
if configFile == "" {
configFile = "./configs/config.json"
}
viper.SetConfigFile(configFile)
if err := viper.ReadInConfig(); err != nil {
panic(err)
}
conf := viper.Sub("dev")
port := uint16(conf.GetInt("dbPort"))
host := conf.GetString("dbHost")
user := conf.GetString("dbUser")
pass := conf.GetString("dbPass")
dbName = conf.GetString("dbName")
redisHost := conf.GetString("redisHost")
redisPort := conf.GetString("redisPort")
hostOvr := os.Getenv("_DBHOST")
if hostOvr != "" {
host = hostOvr
}
portOvr := os.Getenv("_DBPORT")
if portOvr != "" {
portX, err := strconv.Atoi(portOvr)
if err != nil {
log.Fatal(err)
}
port = uint16(portX)
}
migrationPath := "file://" + os.Getenv("MIGRATION_PATH")
if migrationPath == "file://" {
log.Println("MIGRATION_PATH environment is not set, using default value")
cwd, _ := filepath.Abs(filepath.Dir(os.Args[0]))
migrationPath = "file://" + cwd + "/migrations/test"
}
url := fmt.Sprintf("postgres://%s:%s#%s:%d/%s?sslmode=disable", user, pass, host, port, dbName)
log.Println(fmt.Sprintf("Migrating %s from %s", url, migrationPath))
m, err := migrate.New(
migrationPath,
url)
if err != nil {
log.Fatal(err)
}
err = m.Drop()
if err != nil && err.Error() != "no change" {
log.Fatal(err)
}
err = m.Up()
if err != nil && err.Error() != "no change" {
log.Fatal(err)
}
pgxConf := &pgx.ConnConfig{
Port: port,
Host: host,
User: user,
Password: pass,
Database: dbName,
}
pgxPoolConf := pgx.ConnPoolConfig{
ConnConfig: *pgxConf,
MaxConnections: 5,
}
dbConnPool, err = pgx.NewConnPool(pgxPoolConf)
if err != nil {
panic(err)
}
}
and here is my database schema, i create this from postgres cli then i export it with pg_dump for try igrating to database again:
--
-- PostgreSQL database dump
--
-- Dumped from database version 11.2
-- Dumped by pg_dump version 11.2
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET client_min_messages = warning;
SET row_security = off;
--
-- Name: pgcrypto; Type: EXTENSION; Schema: -; Owner:
--
CREATE EXTENSION IF NOT EXISTS pgcrypto WITH SCHEMA public;
--
-- Name: EXTENSION pgcrypto; Type: COMMENT; Schema: -; Owner:
--
COMMENT ON EXTENSION pgcrypto IS 'cryptographic functions';
SET default_tablespace = '';
SET default_with_oids = false;
--
-- Name: variant; Type: TABLE; Schema: public; Owner: clouds
--
CREATE TABLE public.variant (
id integer NOT NULL,
project_id character varying(255) NOT NULL,
variant_name character varying(255) NOT NULL,
variant_release_version character varying(255) NOT NULL,
repo_url character varying(255) NOT NULL,
state character varying(255) NOT NULL,
created_at timestamp without time zone DEFAULT CURRENT_TIMESTAMP
);
ALTER TABLE public.variant OWNER TO clouds;
--
-- Name: variant_id_seq; Type: SEQUENCE; Schema: public; Owner: clouds
--
CREATE SEQUENCE public.variant_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER TABLE public.variant_id_seq OWNER TO clouds;
--
-- Name: variant_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: clouds
--
ALTER SEQUENCE public.variant_id_seq OWNED BY public.variant.id;
--
-- Name: variant id; Type: DEFAULT; Schema: public; Owner: clouds
--
ALTER TABLE ONLY public.variant ALTER COLUMN id SET DEFAULT nextval('public.variant_id_seq'::regclass);
--
-- Data for Name: variant; Type: TABLE DATA; Schema: public; Owner: clouds
--
COPY public.variant (id, project_id, variant_name, variant_release_version, repo_url, state, created_at) FROM stdin;
1 123 auth stagging https://repo.dev.citra.digital test 2019-04-02 17:46:30.886344
2 345 release release https://repo.dev.citra.digital test 2019-04-02 17:47:03.258495
\.
--
-- Name: variant_id_seq; Type: SEQUENCE SET; Schema: public; Owner: clouds
--
SELECT pg_catalog.setval('public.variant_id_seq', 2, true);
--
-- Name: variant variant_pkey; Type: CONSTRAINT; Schema: public; Owner: clouds
--
ALTER TABLE ONLY public.variant
ADD CONSTRAINT variant_pkey PRIMARY KEY (id);
--
-- PostgreSQL database dump complete
--

Duplicate key value violates unique constraint with Postgres, Knex, and Promises

I'm having a very weird issue. When I insert five roles into my "repository" table with unique ids, the following error below comes up multiple times (same id being mentioned!). I'm not using autoincrement for PK.
Error saving repo { error: duplicate key value violates unique constraint "repository_pkey"
at Connection.parseE (/Users/macintosh/node-projects/risingstack/node_modules/pg/lib/connection.js:554:11)
at Connection.parseMessage (/Users/macintosh/node-projects/risingstack/node_modules/pg/lib/connection.js:379:19)
at Socket.<anonymous> (/Users/macintosh/node-projects/risingstack/node_modules/pg/lib/connection.js:119:22)
at emitOne (events.js:116:13)
at Socket.emit (events.js:211:7)
at addChunk (_stream_readable.js:263:12)
at readableAddChunk (_stream_readable.js:250:11)
at Socket.Readable.push (_stream_readable.js:208:10)
at TCP.onread (net.js:601:20)
name: 'error',
length: 202,
severity: 'ERROR',
code: '23505',
detail: 'Key (id)=(80073079) already exists.',
hint: undefined,
position: undefined,
internalPosition: undefined,
internalQuery: undefined,
where: undefined,
schema: 'public',
table: 'repository',
column: undefined,
dataType: undefined,
constraint: 'repository_pkey',
file: 'nbtinsert.c',
line: '434',
routine: '_bt_check_unique' }
Postgres code generated by knex:
insert into "repository" ("description", "full_name", "html_url", "id", "language", "owner_id", "stargazers_count") values ('Node.js JavaScript runtime :sparkles::turtle::rocket::sparkles:', 'nodejs/node', 'https://github.com/nodejs/node', 27193779, 'JavaScript', 9950313, 56009)
insert into "repository" ("description", "full_name", "html_url", "id", "language", "owner_id", "stargazers_count") values (':closed_book:《Node.js 包教不包会》 by alsotang', 'alsotang/node-lessons', 'https://github.com/alsotang/node-lessons', 24812854, 'JavaScript', 1147375, 13989)
insert into "repository" ("description", "full_name", "html_url", "id", "language", "owner_id", "stargazers_count") values ('Node.js based forum software built for the modern web', 'NodeBB/NodeBB', 'https://github.com/NodeBB/NodeBB', 9603889, 'JavaScript', 4449608, 9399)
insert into "repository" ("description", "full_name", "html_url", "id", "language", "owner_id", "stargazers_count") values (':baby_chick:Nodeclub 是使用 Node.js 和 MongoDB 开发的社区系统', 'cnodejs/nodeclub', 'https://github.com/cnodejs/nodeclub', 3447593, 'JavaScript', 1455983, 7907)
insert into "repository" ("description", "full_name", "html_url", "id", "language", "owner_id", "stargazers_count") values ('Mysterium Node - VPN server and client for Mysterium Network', 'mysteriumnetwork/node', 'https://github.com/mysteriumnetwork/node', 80073079, 'Go', 23056638, 478)
Knex schema for repository:
return knex.schema.createTable('repository', (table) => {
table.integer('id').primary();
table.integer('owner_id');
table.foreign('owner_id').references('user.id').onDelete('CASCADE').onUpdate('CASCADE');
table.string('full_name');
table.string('description');
table.string('html_url');
table.string('language');
table.integer('stargazers_count');
})
Code run to insert Repository:
const fn = composeMany(withOwner, removeIrrelevantProperties, defaultLanguageAndDescToString, saveAndPublish);
const tRepos = r.map(fn);
return Promise.all(tRepos);
const saveAndPublish = (r) => {
return User
.insert(r.owner)
.catch(e => console.log('Error saving User', e))
.then(() => {
const { owner, ...repo } = r;
const q = Repository.insert(repo);
console.log(q.toQuery());
return q;
})
.catch(e => {
console.log('Error saving repo', e)}
);
Sounds like your database already had a row inserted with primary key id == 80073079.
To be sure about it try to query DB rows with that key just before inserting. I just wonder how are those ids generated, since you are clearly not using id sequence for it.
It is possible that input data, where IDs were fetched is corrupted and has duplicate ids

Sequelize and 2 join tables, trouble

We've got a problem. We cannot execute a SQL SELECT on below structure using sequelize.
This is our structure, see below image.
We are trying to execute this sequelize query:
query = {
'attributes': [models.sequelize.fn('count', models.sequelize.col('sent_deals.user_id'))],
order: [['alert.keywords', ' DESC']],
attributes: ['alert.keywords'],
include: [
{
model: models.alert,
attributes: ['keywords']
}, {
where: {
'products.sent_deal.created_at': {
between: [moment(start).startOf('day').format('YYYY-MM-DD HH:mm'), moment(end).endOf('day').format('YYYY-MM-DD HH:mm')]
}
},
model: models.product,
attributes: ['id']
}
]
};
models.userAlerts.findAll(query)
But then we receive the following error: "error: missing FROM-clause entry for table "alert"", because we're trying to select the attribute alert.keywords also outside the include[], but still it DID work in the previous version of Sequelize JS! And now we cannot order by alert anymore.... :( It always returns ONE alert because of the "belongs to" connection, so theoretically it has to work.
My guess is because the query doesn't execute a direct join, but does a SELECT FROM on a subquery, see below:
SELECT "userAlerts".*
,"alert"."id" AS "alert.id"
,"alert"."keywords" AS "alert.keywords"
,"products"."id" AS "products.id"
,"products.sent_deal"."user_alert_id" AS "products.sent_deal.user_alert_id"
,"products.sent_deal"."deal_id" AS "products.sent_deal.deal_id"
,"products.sent_deal"."created_at" AS "products.sent_deal.created_at"
FROM (
SELECT "userAlerts"."id"
,"userAlerts"."user_id"
,"userAlerts"."alert_id"
,"userAlerts"."activationToken"
,"userAlerts"."activatedAt"
,"userAlerts"."createdAt"
,"userAlerts"."updatedAt"
FROM "user_alerts" AS "userAlerts"
WHERE (
SELECT "products.sent_deal"."user_alert_id"
FROM "sent_deals" AS "products.sent_deal"
INNER JOIN "deals" AS "products" ON "products"."id" = "products.sent_deal"."deal_id"
WHERE "userAlerts"."id" = "products.sent_deal"."user_alert_id"
AND (
"products.sent_deal"."created_at" BETWEEN '2014-11-17 00:00'
AND '2014-12-02 23:59'
) LIMIT 1
) IS NOT NULL
ORDER BY "alert"."keywords" DESC LIMIT 1
) AS "userAlerts"
LEFT JOIN "alerts" AS "alert" ON "alert"."id" = "userAlerts"."alert_id"
INNER JOIN (
"sent_deals" AS "products.sent_deal" INNER JOIN "deals" AS "products" ON "products"."id" = "products.sent_deal"."deal_id"
) ON "userAlerts"."id" = "products.sent_deal"."user_alert_id"
AND (
"products.sent_deal"."created_at" BETWEEN '2014-11-17 00:00'
AND '2014-12-02 23:59'
)
ORDER BY "alert"."keywords" DESC
Having the keywords attribute on the include, should be enough, there is no need to add it to the outer query
attributes: [models.sequelize.fn('count', models.sequelize.col('sent_deals.user_id'))],
order: [['alert.keywords', ' DESC']],
attributes: ['alert.keywords'], <-- This line is superflous
include: [
{
model: models.alert,
attributes: ['keywords']
}, {
where: {
'products.sent_deal.created_at': {
between: [moment(start).startOf('day').format('YYYY-MM-DD HH:mm'), moment(end).endOf('day').format('YYYY-MM-DD HH:mm')]
}
},
model: models.product,
attributes: ['id']
}
]
Shouldn't you be including SentDeal in order to query products.sent_deal? This issue is probably better suite for an issue on the sequelize GH