Sails.js + Postgres: issue with transactions - postgresql

I am trying to implement a transaction in Sails 0.10.5 using Postgres as database, but the operations are not committed (or rolled back) at the end.
This is a simple transaction scenario I wrote down as a test (using async.js):
testTransaction: function(uri) {
var testFile = {
uri: uri,
details: { 'firstDetail': 'value' }
};
async.auto({
begin_transaction: function(callback) {
DataFile.query('BEGIN TRANSACTION;', callback);
},
new_data_file: ['begin_transaction', function(callback) {
DataFile.create(testFile).exec(callback);
}],
}, function(error, results) {
if (error) {
console.log(error.message);
DataFile.query('ROLLBACK;', function(e, r) {
return error.message;
});
return;
}
DataFile.query('COMMIT;', function(e, r) {
console.log("Saved file: " + results.new_data_file);
if (e) {
return "Error during commit";
}
return results.new_data_file;
});
});
}
Then I run the service without errors; but no new DataFile enetity is added to the Postgres table. If I check the Postgres log, I find out that:
2014-12-04 10:35:01 GMT 7984 548038d0.1f30LOG: statement: BEGIN TRANSACTION;
2014-12-04 10:35:01 GMT 7977 548038d0.1f29LOG: execute <unnamed>: INSERT INTO "data_file" ("uri", "details", "created_at", "updated_at") values ($1, $2, $3, $4) RETURNING *
2014-12-04 10:35:01 GMT 7977 548038d0.1f29DETAIL: parameters: $1 = '/just/another/test/uri', $2 = '{"firstDetail":"value"}', $3 = '2014-12-04 10:35:01+00', $4 = '2014-12-04 10:35:01+00'
2014-12-04 10:35:01 GMT 7983 548038d0.1f2fLOG: statement: COMMIT;
2014-12-04 10:35:01 GMT 7983 548038d0.1f2fWARNING: there is no transaction in progress
So I get the warning: "there is no transaction in progress"
The third element on the log is the Process ID.
Apparently the COMMIT statement is issued by a different process (7983) than the one that issued the BEGIN (7984). Might this be the issue?. How can I force to use the same process when dealing with transactions in Sails?

Transactions rely on several queries being made with the same connection, but Waterline uses a different connection for every query, which is why you're getting the "there is no transaction in progress" message - the second query is using a different connection, which doesn't have an in-progress transaction.
There's no way to work around this, other than to set the poolSize to 1 (so every query must use the same connection).
Here's the library we use for transactions - https://github.com/Shyp/pg-transactions. You won't have access to the helper methods - .find(), .update() etc but at least it's possible.

Related

I want to insert with mikro-orm, but it dont find my table :c (TableNotFoundException)

So
Console:
yarn dev
yarn run v1.22.10
$ nodemon dist/index.js
[nodemon] 2.0.7
[nodemon] to restart at any time, enter `rs`
[nodemon] watching path(s): *.*
[nodemon] watching extensions: js,mjs,json
[nodemon] starting `node dist/index.js`
[discovery] ORM entity discovery started, using ReflectMetadataProvider
[discovery] - processing entity Post
[discovery] - entity discovery finished, found 1 entities, took 21 ms
[info] MikroORM successfully connected to database postgres on postgresql://postgres:*****#127.0.0.1:5432
[query] begin
[query] insert into "post" ("created_at", "title", "updated_at") values ('2021-04-05T21:04:23.126Z', 'my first post', '2021-04-05T21:04:23.126Z') returning "_id" [took 12 ms]
[query] rollback
TableNotFoundException: insert into "post" ("created_at", "title", "updated_at") values ('2021-04-05T21:04:23.126Z', 'my first post', '2021-04-05T21:04:23.126Z') returning "_id" - relation "post" does not exist
at PostgreSqlExceptionConverter.convertException (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\postgresql\PostgreSqlExceptionConverter.js:36:24)
at PostgreSqlDriver.convertException (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\drivers\DatabaseDriver.js:194:54)
at P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\drivers\DatabaseDriver.js:198:24
at processTicksAndRejections (internal/process/task_queues.js:93:5)
at async PostgreSqlDriver.nativeInsert (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\knex\AbstractSqlDriver.js:150:21)
at async ChangeSetPersister.persistNewEntity (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\unit-of-work\ChangeSetPersister.js:55:21)
at async ChangeSetPersister.executeInserts (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\unit-of-work\ChangeSetPersister.js:24:13)
at async UnitOfWork.commitCreateChangeSets (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\unit-of-work\UnitOfWork.js:496:9)
at async UnitOfWork.persistToDatabase (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\unit-of-work\UnitOfWork.js:458:13)
at async PostgreSqlConnection.transactional (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\knex\AbstractSqlConnection.js:53:25)
at async UnitOfWork.commit (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\unit-of-work\UnitOfWork.js:183:17)
at async SqlEntityManager.flush (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\EntityManager.js:486:9)
at async SqlEntityManager.persistAndFlush (P:\.Projektek\lireddit-server\node_modules\#mikro-orm\core\EntityManager.js:438:9)
previous error: insert into "post" ("created_at", "title", "updated_at") values ('2021-04-05T21:04:23.126Z', 'my
first post', '2021-04-05T21:04:23.126Z') returning "_id" - relation "post" does not exist
at Parser.parseErrorMessage (P:\.Projektek\lireddit-server\node_modules\pg-protocol\dist\parser.js:278:15)
at Parser.handlePacket (P:\.Projektek\lireddit-server\node_modules\pg-protocol\dist\parser.js:126:29)
at Parser.parse (P:\.Projektek\lireddit-server\node_modules\pg-protocol\dist\parser.js:39:38)
at Socket.<anonymous> (P:\.Projektek\lireddit-server\node_modules\pg-protocol\dist\index.js:10:42)
at Socket.emit (events.js:315:20)
at Socket.EventEmitter.emit (domain.js:467:12)
at addChunk (internal/streams/readable.js:309:12)
at readableAddChunk (internal/streams/readable.js:284:9)
at Socket.Readable.push (internal/streams/readable.js:223:10)
at TCP.onStreamRead (internal/stream_base_commons.js:188:23) {
length: 166,
severity: 'ERROR',
code: '42P01',
detail: undefined,
hint: undefined,
position: '13',
internalPosition: undefined,
internalQuery: undefined,
where: undefined,
schema: undefined,
table: undefined,
column: undefined,
dataType: undefined,
constraint: undefined,
file: 'd:\\pginstaller_13.auto\\postgres.windows-x64\\src\\backend\\parser\\parse_relation.c',
line: '1376',
routine: 'parserOpenTable'
}
Index.ts:
import { MikroORM } from "#mikro-orm/core";
import { __prod__ } from "./constants";
import { Post } from "./entities/Post";
import mikroConfig from "./mikro-orm.config";
const main = async () => {
const orm = await MikroORM.init(mikroConfig);
await orm.getMigrator().up;
const post = orm.em.create(Post, { title: "my first post" });
await orm.em.persistAndFlush(post);
};
main().catch((err) => {
console.error(err);
});
Post.ts:
import { Entity, PrimaryKey, Property } from "#mikro-orm/core";
#Entity()
export class Post {
#PrimaryKey()
_id!: number;
#Property({ type: "date" })
createdAt = new Date();
#Property({ type: "date", onUpdate: () => new Date() })
updatedAt = new Date();
#Property({ type: "text" })
title!: string;
}
mikro-orm.config.ts:
import { __prod__ } from "./constants";
import { Post } from "./entities/Post";
import { MikroORM } from "#mikro-orm/core";
import path from "path";
export default {
migrations: {
path: path.join(__dirname, "./migrations"),
pattern: /^[\w-]+\d+\.[tj]s$/,
},
entities: [Post],
dbName: "postgres",
debug: !__prod__,
type: "postgresql",
password: "hellothere",
} as Parameters<typeof MikroORM.init>[0];
And the migration I created with npx mikro-orm migration:create:
import { Migration } from '#mikro-orm/migrations';
export class Migration20210405205411 extends Migration {
async up(): Promise<void> {
this.addSql('create table "post" ("_id" serial primary key, "created_at" timestamptz(0) not null, "updated_at" timestamptz(0) not null, "title" text not null);');
}
}
After that im compiling it to js btw, but I guess the problem will be somewhere at my code or idk plz help me, I can give you more info just plz help, I've been trying to fix this bug for 5 hours :/
Btw Im doin Ben Awad's 14 hour fullstack tutorial if its matter.
The TableNotFoundException happens when you try to add data before initializing the table's schema (or structure).
Passing the --initial as in Mosh's Answer did not work for me, possibly because I am passing a username and password in ./mikro-orm.config.ts.
I used Mikro-ORM's SchemaGenerator to initialize the table as seen here in the official docs.
Add the following lines before adding data to post in your main function in index.ts:
const generator = orm.getSchemaGenerator();
await generator.updateSchema();
The main function in index.ts should now look like this:
const main = async () => {
const orm = await MikroORM.init(mikroConfig);
await orm.getMigrator().up;
const generator = orm.getSchemaGenerator();
await generator.updateSchema();
const post = orm.em.create(Post, { title: "my first post" });
await orm.em.persistAndFlush(post);
};
updateSchema creates a table or updates it based on .entities/Post.ts. This could cause issues when the Post file is updated, I haven't run in to any while following Ben's tutorial. Although, I'd still recommend creating ./create-schema.ts and running it when needed as shown in the official docs.
I have had the same issue. This is what I did:
I deleted the migrations folder as well as the dist folder
I ran npx mikro-orm migration:create --initial
After that, I restarted yarn watch and yarn dev and it worked for me.
Notice the --initial flag. I would recommend to check the official documentation. The migrations table is used to keep track of already executed migrations. When you only run npx mikro-orm migration:create, the table will not be created and therefore MikroORM is unable to check if the migration for the Post entity has already been performed (which includes creating the respective table on the database).
Ben does not use the --initial flag in his tutorial, he might have already ran it prior to the tutorial.
I had a similar problem myself (Also doing Ben Awad's tutorial).
I used Mikro-ORM's schema generator to initialize the table like in Fares47's Answer, but the problem still persisted.
It wasn't until I set my user to have Superuser permissions that it started working.
I am using postgresql for my data base which I downloaded using homebrew. If you have a similar set up here is what I did:
Start up psql in your terminal using psql postgres. If you want, you can view your users and check their permissions by typing \du in the shell. Then, to change the permissions for a user use the command ALTER ROLE <username> WITH SUPERUSER;. Make sure you include a semi-colon or else it will not run the command.
Check this article out for more info on psql commands.
I have the same problem i solved by install the ts-node on project
npm i -D ts-node
and set useTsNode on package.json as true.
The problem is the mikro-orm cli only add ts files paths in configPaths if the property useTsNode is true and ts-node is installed.
orther problem that i have is the regex in pattern property in mikro-orm.config.ts was wrong because a typo.
If any of the suggested steps didnt solve it for you, simply...
quit yarn watch and yarn dev
run this command from the command line
npx mikro-orm migration:up
now restart watch and dev and it you should be good.
from https://mikro-orm.io/docs/migrations/#migration-class
I also experienced this. And like Fares47 said it's possibly because I passed the username and password in ./mikro-orm.config.ts.
And my solution is simply execute the sql command that generated in ./src/migrations/Migration<NUMBERS>.ts file in postgresql terminal.
Here is the command that I execute in the database,
create table "post" ("id" serial primary key, "created_at" timestamptz(0) not null, "updated_at" timestamptz(0) not null, "title" text not null);
Just like what they suggested in the doc,
A safe approach would be generating the SQL on development server and
saving it into SQL Migration files that are executed manually on the
production server.

AWS RDS PostgreSQL - copying from/to csv files on EC2 instance

I've run into problem that I can't fix for a few days.
The thing is - I have following architecture:
Two EC2 instances which are nodes running Trifacta application (some kind of application for data scientists),
AWS RDS PostgreSQL instance.
Since the newest version this Trifacta application is using new schema in database which performs some database migrations at the start of application. During the startup some tables are copied into *.csv files and then copied back into tables from this *csv files.
It's all okay when it's run on local database because superuser role in postgresql allows for such actions.
When it comes to performing it on AWS RDS PostgreSQL instance it falls in following errors:
Error running query COPY (select "id" from workspaces) TO '/tmp/workspaces.csv' DELIMITER ',' CSV HEADER; error: must be superuser to COPY to or from a file
at Connection.parseE (/opt/trifacta/migration-framework/node_modules/pg/lib/connection.js:614:13)
at Connection.parseMessage (/opt/trifacta/migration-framework/node_modules/pg/lib/connection.js:413:19)
at Socket.<anonymous> (/opt/trifacta/migration-framework/node_modules/pg/lib/connection.js:129:22)
at Socket.emit (events.js:315:20)
at addChunk (_stream_readable.js:295:12)
at readableAddChunk (_stream_readable.js:271:9)
at Socket.Readable.push (_stream_readable.js:212:10)
at TCP.onStreamRead (internal/stream_base_commons.js:186:23) {
length: 178,
severity: 'ERROR',
code: '42501',
detail: undefined,
hint: "Anyone can COPY to stdout or from stdin. psql's \\copy command also works for anyone.",
position: undefined,
internalPosition: undefined,
internalQuery: undefined,
where: undefined,
schema: undefined,
table: undefined,
column: undefined,
dataType: undefined,
constraint: undefined,
file: 'copy.c',
line: '905',
routine: 'DoCopy'
}
It's just first one, there are a lot of them. I made a research on that and figured why it's happening. AWS is using rds_superuser role instead of superuser and privilleges of this role aren't sufficient for copying from/to local filesystem.
From psql console it can be done with using \copy instead of copy but in my case it isn't any helpful because the way Trifacta does it is executing SQL queries from their *.js files and as far as I know it isn't possible to run \copy query from anywhere else than psql CLI.
With a suggestion of #IMSoP I am adding the code of Trifacta *.js file where the actions are performed:
ConnectUtils.copyQuery = function(query, connection, options = {}) {
ensure.notNull(connection.base.DriverName, 'connection driver name');
ensure.notNull(options.tableName, 'table name');
const table = options.tableName;
const filePath = ConnectUtils.getOutputFilePath(table, options);
if (connection.base.DriverName === DATABASE_JS_TYPE[MYSQL]) {
return `${query} INTO OUTFILE \'${filePath}\' FIELDS TERMINATED BY ',' OPTIONALLY ENCLOSED BY '"' LINES TERMINATED BY '\n'`;
} else if (connection.base.DriverName === DATABASE_JS_TYPE[POSTGRES]) {
return `COPY (${query}) TO '${filePath}' DELIMITER ',' CSV HEADER;`;
} else if (connection.base.DriverName === DATABASE_JS_TYPE[SQLITE]) {
return query;
}
return;
};
ConnectUtils.loadQuery = function(connection, options = {}) {
ensure.notNull(connection.base.DriverName, 'connection driver name');
ensure.notNull(connection.base.Database, 'connection database');
ensure.notNull(options.tableName, 'table name');
const table = options.tableName;
const filePath = ConnectUtils.getOutputFilePath(table, options);
if (connection.base.DriverName === DATABASE_JS_TYPE[MYSQL]) {
return `LOAD DATA INFILE \'${filePath}\' INTO TABLE ${
connection.base.Database
}.${table} FIELDS TERMINATED BY ',' ENCLOSED BY '\"' LINES TERMINATED BY '\n' IGNORE 1 ROWS;`;
} else if (connection.base.DriverName === DATABASE_JS_TYPE[POSTGRES]) {
return `COPY ${table} FROM '${filePath}' DELIMITER ',' CSV HEADER;`;
}
return;
};
${filePath} is path on EC2 instance and ${table} are the tables on AWS RDS EC2 instance. From your answers before editing my question I assume there is no way to workaround this as this script is trying to reach ${filePath} as a path on AWS RDS instance. Right?
Thanks for reading.

Aurora Serverless (Postgresql) - execute statement timeout 502

I am building a lambda api connecting to AWS Aurora Serverless Postgres.
Follow AWS instruction to setup an Aurora Serverless with Postgres compatible (for MySql but still useful for Postgres): https://aws.amazon.com/getting-started/tutorials/configure-connect-serverless-mysql-database-aurora/
I added port 5432 in inbound of security group
I also use data-api-client to query the database (https://github.com/jeremydaly/data-api-client)
Built API lambda in Serverless Framework, set timeout is 1 min, added role AmazonRDSDataFullAccess
My lambda code (built in Serverless framework) is simple:
async function query_db(_sql) {
const data = require('data-api-client')({
secretArn: constants.DBSecretsStoreArn,
resourceArn: constants.DBAuroraClusterArn,
database: constants.DatabaseName
});
try {
let result = await data.query(_sql);
return result.records;
} catch (error) {
console.log('Lambda :: query_db :: Error: ' + error);
return error;
}
}
async function run() {
let sql = 'SELECT * FROM products LIMIT 10';
let result = await query_db(sql);
console.log('result: '+ JSON.stringify(result));
return callback(null, {
headers: {
'Access-Control-Allow-Origin': '*'
},
statusCode: 200,
body: JSON.stringify({msg: 'done})
});
}
Result:
It ran successfully in local (serverless-offline)
After deploying, it ran timeout, returned 502, error: "Internal server error"
Any suggestion is appreciated.
The data-api-client doesn't officially support Postgres, yet.
https://github.com/jeremydaly/data-api-client/issues/27

Delete record based on condition from mongo database using jmeter

I am trying to delete the record from Mongo database using jmeter jsr223 groovy protocol but getting error.
Request:
--delete the records which is retrieved between the insert date
MongoCollection Collection = database.getCollection("TEST_COMM");
Collectionenter code here.deleteMany{
$and: [
{
"insertDate": {
$gte: ISODate("2019-10-15T15:16:45.328+0000")
}
}
,
{
"insertDate": {
$lte: ISODate("2019-10-15T17:02:44.017+0000")
}
}
]
}
Response:
Script1.groovy: 50: expecting '}', found ':' # line 50, column 25.
"insertDate": {
^
Can someone help in resolving the issue.

User Assertion: 1: Update query failed -- RUNNER_DEAD

We are using MongoDB (v2.6.4) to process some data and everything works great except, once in a while, we get a weird RUNNER_DEAD exception...
MongoDB.Driver.WriteConcernException: WriteConcern detected an error ' Update query failed -- RUNNER_DEAD'. (Response was { "lastOp" : { "$timestamp" : NumberLong("6073471510486450182") }, "connectionId" : 49, "err" : " Update query failed -- RUNNER_DEAD", "code" : 1, "n" : 0, "ok" : 1.0 }).
This is the method that causes the exception:
private void UpdateEntityClassName(EntityClassName myEntity) {
var dateTimeNow = DateTime.UtcNow;
var update = Update<EntityClassName>.Set(p => p.Data, myEntity.Data)
...some more Sets...
.Set(p => p.MetaData.LastModifiedDateTime, dateTimeNow);
var result = _myCollection.Update(Query.EQ("_id", myEntity.Identifier), update, UpdateFlags.Upsert);
}
Exception in MongoDB log:
2014-10-23T13:51:29.989-0500 [conn45] update Database.Table query: { _id: "SameID" } update: { $set: { Data: BinData(0, SomeData...), ...more fields... MetaData.LastModifiedDateTime: new Date(1414090294910) } } nmoved:1 nMatched:1 nModified:1 keyUpdates:0 numYields:0 locks(micros) w:2344 2ms
2014-10-23T13:51:29.989-0500 [conn49] User Assertion: 1: Update query failed -- RUNNER_DEAD
2014-10-23T13:51:29.989-0500 [conn46] update Database.Table query: { _id: "SameID" } update: { $set: { Data: BinData(0, SomeData...), ...more fields... MetaData.LastModifiedDateTime: new Date(1414090294926) } } nMatched:1 nModified:1 fastmod:1 keyUpdates:0 numYields:0 locks(micros) w:249 0ms
2014-10-23T13:51:29.989-0500 [conn49] update Database.Table query: { _id: "SameID" } update: { $set: { Data: BinData(0, SomeData...), ...more fields... MetaData.LastModifiedDateTime: new Date(1414090294864) } } nModified:0 keyUpdates:0 exception: Update query failed -- RUNNER_DEAD code:1 numYields:1 locks(micros) w:285 8ms
I found very little documentation about this exception so any help appreciated.
We are running this in a 3 machine replica set if that changes anything.
We've been running this code for a while and we didn't have that issue before (in our original tests) so we went back to MongoDB 2.4.9 (the one we first tested on) and we don't get this exception anymore. Any ideas as to what might have changed that causes this exception?
Why you couldn't use a regular "update" using capped arrays to
limit the size of the array of queries rather than using some custom
logic).
If you have multiple threads that are doing the same thing, your code
doesn't appear thread-safe - let's say that two threads try to update
the same object with _id XYZ but with different changes. Both fetch
the object, both add a new attribute/value to the array and now both
call save - the first one saves, but the second one's save overwrites
the first one.
But that's not likely to be related to your error with RUNNER_DEAD
error - that's more likely a case where either something is killing
the operation or dropping the collection you're writing to (or the
index being used).
Source: #Asya Kamsky's post.