How to return the serial primary key from a db insert to use for another db insert - postgresql

I'm using the express generated template with postgresql and I have 2 rest route methods for creating consignments and tracking.
However i want tracking to be updated on each consignment insert, but i require the serial primary key to do it. So from the createCon function i require it to return the id after the insert, to use for the cid field in the createConTracking.
routes/index.js file
var db = require('../queries');
router.post('/api/cons', db.createCon);
router.post('/api/cons/:id/tracking', db.createConTracking);
queries.js
var promise = require('bluebird');
var options = {
promiseLib: promise
};
var pgp = require('pg-promise')(options);
var db = pgp(connectionString);
function createCon(req, res, next) {
var conid = parseInt(req.body.conid);
db.none('insert into consignments(conid, payterm,........)'+
'values($1, $2, ......)',
[conid, req.body.payterm,........])
.then(function () {
res.status(200)
.json({
status: 'success',
message: 'Inserted one con'
});
})
.catch(function (err) {
return next(err);
});
}
function createConTracking(req, res, next) {
var cid = parseInt(req.params.id);
var userid = req.user.email;
var conid = parseInt(req.body.conid);
db.none('insert into tracking(status, remarks, depot, userid, date, cid, conid)'+
'values($1, $2, $3, $4,$5, $6, $7)',
[req.body.status, req.body.remarks, req.body.depot, userid, req.body.date, cid, conid])
.then(function (data) {
res.status(200)
.json({
data: data,
status: 'success',
message: 'Updated Tracking'
});
})
.catch(function (err) {
return next(err);
});
}
DB
CREATE TABLE consignments (
ID SERIAL PRIMARY KEY,
conId INTEGER,
payTerm VARCHAR,
CREATE TABLE tracking (
ID SERIAL PRIMARY KEY,
status VARCHAR,
remarks VARCHAR,
cid INTEGER
);

I'm the author of pg-promise.
You should execute multiple queries within a task (method task) when not changing data, or transaction (method tx) when changing data. And in case of making two changes to the database, like in your example, it should be a transaction.
You would append RETURNING id to your first insert query and then use method one to indicate that you expect one row back.
function myRequestHandler(req, res, next) {
db.tx(async t => {
const id = await t.one('INSERT INTO consignments(...) VALUES(...) RETURNING id', [param1, etc], c => +c.id);
return t.none('INSERT INTO tracking(...) VALUES(...)', [id, etc]);
})
.then(() => {
res.status(200)
.json({
status: 'success',
message: 'Inserted a consignment + tracking'
});
})
.catch(error => {
return next(error);
});
}
In the example above we execute the two queries inside a transaction. And for the first query we use the third parameter for an easy return value transformation, plus conversion (in case it is a 64-bit like BIGSERIAL).

Simply add a RETURNING clause to your INSERT statement. This clause allows you to return data concerning the actual values in the inserted record.
insert into consignments(conid, payterm,........)
values($1, $2, ......)
returning id;

Related

MongoDB Mutation Upsert - Can't Get Id of New Record on First Submit

I'm executing an upsert mutation on MongoDB to create a new document or update an existing document. If the document exists, the mutation returns the id as expected. If a new document is created, the mutation returns null (in both Apollo sandbox and via console.log) in the initial return then in subsequent returns it will return the id. I need it to return the id of the newly created document immediately (on the first return) so I can use that id in subsequent actions.
Starting from the beginning here's the setup:
TYPEDEF
updateHourByEmployeeIdByJobDate(
jobDate: String
startTime: String
endTime: String
hoursWorked: String
employee: String
): Hour
RESOLVER
updateHourByEmployeeIdByJobDate: async (
parent,
{ jobDate, startTime, endTime, hoursWorked, employee },
context
) => {
// if (context.user) {
console.log("resolver hours update = ");
return Hour.findOneAndUpdate(
{ employee, jobDate },
{
jobDate,
startTime,
endTime,
hoursWorked,
employee,
},
{
upsert: true,
}
);
// }
// throw new AuthenticationError("You need to be logged in!");
},
MUTATION
//UPDATE HOUR RECORD - CREATES DOCUMENT IF IT DOESN'T EXIST OR UPDATES IF IT DOES EXIST VIA THE UPSERT OPTION ON THE RESOLVER
export const UPDATE_HOURS_BYEMPLOYEEID_BYJOBDATE = gql`
mutation UpdateHourByEmployeeIdByJobDate(
$jobDate: String
$startTime: String
$endTime: String
$hoursWorked: String
$employee: String
) {
updateHourByEmployeeIdByJobDate(
jobDate: $jobDate
startTime: $startTime
endTime: $endTime
hoursWorked: $hoursWorked
employee: $employee
) {
_id
}
}
`;
FRONT-END EXECUTION
const [ mostRecentHourUpdateId, setMostRecentHoursUpdateId ] = useState();
const [updateHours] = useMutation(UPDATE_HOURS_BYEMPLOYEEID_BYJOBDATE, {
onCompleted: (data) => {
console.log('mutation result #1 = ', data)
setMostRecentHoursUpdateId(data?.updateHourByEmployeeIdByJobDate?._id);
console.log('mutation result #2 = ', mostRecentHourUpdateId)
},
});
//section update database - this mutation is an upsert...it either updates or creates a record
const handleUpdateDatabase = async (data) => {
console.log(data);
try {
// eslint-disable-next-line
const { data2 } = await updateHours({
variables: {
jobDate: moment(data.date).format("MMMM DD YYYY"), //"January 20 2023"
startTime: `${data.startTime}:00 (MST)`, //"12:00:00 (MST)"
endTime: `${data.endTime}:00 (MST)`, //"13:00:00 (MST)"
hoursWorked: data.hours.toString(), //"3.0"
employee: userId, //"6398fb54494aa98f85992da3"
},
});
console.log('handle update database function = data = ', data2); //fix
} catch (err) {
console.error(err);
}
singleHoursRefetch();
};
I've tried using onComplete as part of the mutation request as well as useEffect not to mention running the mutation in Apollo Sandbox. Same result in all scenarios. The alternative is to re-run the useQuery to get the most recent / last record created but this seems like a challenging solution (if at some point the records are sorted differently) and/or seems like I should be able to get access to the newly created record immediately as part of the mutation results.
You'll want to use { returnNewDocument: true }
Like this:
const getData = async () => {
const returnedRecord = await Hour.findOneAndUpdate( { employee, jobDate }, { jobDate, startTime, endTime, hoursWorked, employee, }, { upsert: true, returnNewDocument: true } );
// do something with returnedRecord
}
getData();
For more information:
https://www.mongodb.com/docs/manual/reference/method/db.collection.findOneAndUpdate/

Saving aws S3 image location links to postgres db table

I have a postgres db via Heroku, with a PUT route to save uploaded s3 bucket image links to the database, however the links are not saving to the database table. There are no errors, I am receiving but the links are simply not saving to the table with the update query I am calling for the db. Can anyone say what could be wrong here?
//Here is the table scheme
CREATE TABLE Users_Channel(
id SERIAL PRIMARY KEY UNIQUE,
userID INT UNIQUE,
FOREIGN KEY(userID) REFERENCES Users(id),
channelName varchar(255) UNIQUE,
FOREIGN KEY(channelName) REFERENCES Users(Username),
Profile_Avatar TEXT NULL,
Slider_Pic1 TEXT NULL,
Slider_Pic2 TEXT NULL,
Slider_Pic3 TEXT NULL,
Subscriber_Count INT NULL,
UNIQUE(channelName, userID)
);
//Database Update Query to updated channel by channel name
async function updateChannel({
channelname,
profile_avatar,
slider_pic1,
slider_pic2,
slider_pic3
}) {
try {
const { rows } = await client.query(
`
UPDATE users_channel
SET profile_avatar=$2, slider_pic1=$3, slider_pic2=$4, slider_pic3=$5
WHERE channelname=$1
RETURNING *;
`,
[channelname, profile_avatar, slider_pic1, slider_pic2, slider_pic3]
);
return rows;
} catch (error) {
throw error;
}
}
//API Put Route
usersRouter.put(
"/myprofile/update/:channelname",
profileUpdate,
requireUser,
async (req, res, next) => {
const { channelname } = req.params;
const pic1 = req.files["avatar"][0];
const pic2 = req.files["slide1"][0];
const pic3 = req.files["slide2"][0];
const pic4 = req.files["slide3"][0];
try {
const result = await uploadFile(pic1);
const result1 = await uploadFile(pic2);
const result2 = await uploadFile(pic3);
const result3 = await uploadFile(pic4);
console.log(result, result1, result2, result3);
const updateData = {
profile_avatar: result.Location,
slider_pic1: result1.Location,
slider_pic2: result2.Location,
slider_pic3: result3.Location,
};
console.log(updateData);
const updatedchannel = await updateChannel(channelname, updateData);
res.send({ channel: updatedchannel });
} catch (error) {
console.error("Could not update user profile", error);
next(error);
}
}
);
Solved it had to rework my update query portion to the following below. Took out the curly brackets and created a variable with curly brackets instead and passed it to the function.
async function updateChannel(channelname, photos) {
const { profile_avatar, slider_pic1, slider_pic2, slider_pic3} = photos;
try {
const { rows } = await client.query(
`
UPDATE users_channel
SET profile_avatar=$2, slider_pic1=$3, slider_pic2=$4, slider_pic3=$5
WHERE channelname=$1
RETURNING *;
`,
[channelname, profile_avatar, slider_pic1, slider_pic2, slider_pic3]
);
return rows;
} catch (error) {
throw error;
}
}

How to alter postgres sql constraint with Knex?

Table is created with:
exports.up = function (knex) {
return knex.schema.createTable('organisations_users', (table) => {
table.uuid('organisation_id').notNullable().references('id').inTable('organisations').onDelete('SET NULL').index();
};
exports.down = function (knex) {
return knex.schema.dropTableIfExists('organisations_users');
};
In another migration file I would like to alter the onDelete command to "CASCADE".
I tried (among other things):
exports.up = function (knex) {
return knex.schema.alterTable('organisations_users', (table) => {
table.uuid('organisation_id').alter().notNullable().references('id').inTable('organisations').onDelete('CASCADE').index();
});
};
But then knex states that the contstraint already exist (which is true, thats why i want to alter it)
What would be the command for this? I'm also fine with a knex.raw string.
Thank you
Solved it by:
exports.up = function (knex) {
return knex.schema.alterTable('organisations_users', async (table) => {
// First drop delete references
await knex.raw('ALTER TABLE organisations_users DROP CONSTRAINT IF EXISTS organisations_users_organisation_id_foreign')
// Add the correct delete command (was SET NULL, now CASCADE)
table.uuid('organisation_id').alter().notNullable().references('id').inTable('organisations').onDelete('CASCADE');
});
};
exports.down = function (knex) {
return knex.schema.dropTableIfExists('organisations_users');
};

Knex not returning join SQL query

function list() {
return knex('restaurants')
.join('owners', 'restaurants.owner_id', 'owners.owner_id')
.select('restaurants.restaurant_name', 'owners.owner.name', 'owners.email')
.orderBy('owners.owner_name');
}
function listAverageRatingByOwner() {
return knex('restaurants')
.join('owners', 'restaurants.owner_id', 'owners.owner_id')
.select('owners.owner_name')
.avg('restaurants.ratings');
}
In controller:
async function listAverageRatingByOwner(req, res, next) {
const averageRatingByOwner = service.listAverageRatingByOwner();
console.log(averageRatingByOwner);
res.json();
}
I'm not sure why it doesn't return the join query result and instead returns that.
Restaurants table:
restaurant_id (primary key)
restaurant_name (required string)
cuisine (required string)
address (required string)
rating (optional numeric)
owner_id (required foreign key)
Owners table:
owner_id (primary key)
owner_name (required string)
email (required string)
address (required string)
Knex query builder returns a Promise, therefore you should await it in order to get your result set.
// controller
async function listAverageRatingByOwner(req, res, next) {
const averageRatingByOwner = await service.listAverageRatingByOwner();
// ---------------------------^
console.log(averageRatingByOwner);
res.json();
}

How to search on a single OR multiple columns with TSVECTOR and TSQUERY

I used some boilerplate code (below) that creates a normalized tsvector _search column of all columns I specify (in searchObjects) that I'd like full-text search on.
For the most part, this is fine. I'm using this in conjunction with Sequelize, so my query looks like:
const articles = await Article.findAndCountAll({
where: {
[Sequelize.Op.and]: Sequelize.fn(
'article._search ## plainto_tsquery',
'english',
Sequelize.literal(':query')
),
[Sequelize.Op.and]: { status: STATUS_TYPE_ACTIVE }
},
replacements: { query: q }
});
Search index setup:
const vectorName = '_search';
const searchObjects = {
articles: ['headline', 'cleaned_body', 'summary'],
brands: ['name', 'cleaned_about'],
products: ['name', 'cleaned_description']
};
module.exports = {
up: async queryInterface =>
await queryInterface.sequelize.transaction(t =>
Promise.all(
Object.keys(searchObjects).map(table =>
queryInterface.sequelize
.query(
`
ALTER TABLE ${table} ADD COLUMN ${vectorName} TSVECTOR;
`,
{ transaction: t }
)
.then(() =>
queryInterface.sequelize.query(
`
UPDATE ${table} SET ${vectorName} = to_tsvector('english', ${searchObjects[
table
].join(" || ' ' || ")});
`,
{ transaction: t }
)
)
.then(() =>
queryInterface.sequelize.query(
`
CREATE INDEX ${table}_search ON ${table} USING gin(${vectorName});
`,
{ transaction: t }
)
)
.then(() =>
queryInterface.sequelize.query(
`
CREATE TRIGGER ${table}_vector_update
BEFORE INSERT OR UPDATE ON ${table}
FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(${vectorName}, 'pg_catalog.english', ${searchObjects[
table
].join(', ')});
`,
{ transaction: t }
)
)
.error(console.log)
)
)
),
down: async queryInterface =>
await queryInterface.sequelize.transaction(t =>
Promise.all(
Object.keys(searchObjects).map(table =>
queryInterface.sequelize
.query(
`
DROP TRIGGER ${table}_vector_update ON ${table};
`,
{ transaction: t }
)
.then(() =>
queryInterface.sequelize.query(
`
DROP INDEX ${table}_search;
`,
{ transaction: t }
)
)
.then(() =>
queryInterface.sequelize.query(
`
ALTER TABLE ${table} DROP COLUMN ${vectorName};
`,
{ transaction: t }
)
)
)
)
)
};
The problem is that because the code concats both columns within each array of searchObjects, what is getting stored is a combined index of all columns in each array.
For example on the articles table: 'headline', 'cleaned_body', 'summary' are all part of that single generated _search vector.
Because of this, I can't really search by ONLY headline or ONLY cleaned_body, etc. I'd like to be able to search each column separately and also together.
The use case is in my search typeahead I only want to search on headline. But on my search results page, I want to search on all columns specified in searchObjects.
Can someone give me a hint on what I need to change? Should I create a new tsvector for each column?
If anyone is curious, here's how you can create a tsvector for each column:
try {
for (const table in searchObjects) {
for (const col of searchObjects[table]) {
await queryInterface.sequelize.query(
`ALTER TABLE ${table} ADD COLUMN ${col + vectorName} TSVECTOR;`,
{ transaction }
);
await queryInterface.sequelize.query(
`UPDATE ${table} SET ${col + vectorName} = to_tsvector('english', ${col});`,
{ transaction }
);
await queryInterface.sequelize.query(
`CREATE INDEX ${table}_${col}_search ON ${table} USING gin(${col +
vectorName});`,
{ transaction }
);
await queryInterface.sequelize.query(
`CREATE TRIGGER ${table}_${col}_vector_update
BEFORE INSERT OR UPDATE ON ${table}
FOR EACH ROW EXECUTE PROCEDURE tsvector_update_trigger(${col +
vectorName}, 'pg_catalog.english', ${col});`,
{ transaction }
);
}
}
await transaction.commit();
} catch (err) {
await transaction.rollback();
throw err;
}