Mongo Shell print is not displaying? - mongodb

So I am trying to compare a simple comma delimited list to the documents in my collection. This is my code:
var file = cat("Price Level V.csv");
var skus = file.split("\n");
for(var i = 0; i < skus.length; i++) {
var vasku = skus[i].split(',');
db.getCollection('skus').findOne({sku:vasku[0]}, function(err, mydoc) {
if(err)
print(err);
if(mydoc == null) {
print('NF');
} else if(mydoc.VA == vasku[1]) {
print('Correct');
} else {
print('Incorrect');
}
});
}
For some reason, I am not seeing anything pop up in the shell for all my print statements. It should at least print 'Incorrect', right?

If the loop is entered and the skus collection is not empty, then this can happen if you misspell the collection name of the model that You try to query (I see that from time to time when someone writes the collection name in camelCase).
It's a long shot but maybe the model name in the db is actually skuss (second 's' added for plural form)?

Related

How to take MongoDB backup without data [duplicate]

I have a mongodb instance with a lot of data, now I need to start up a new instance with the same structure without data.
how to get it done?
You can do that with the "query" option, with a query that does not return any document. Something like:
mongodump -q '{ "foo" : "bar" }'
This will dump all the dbs and indexes, you can then do a mongorestore to recreate them into another mongod instance
See documentation:
http://docs.mongodb.org/manual/reference/program/mongodump/#cmdoption--query
You can login into mongo shell and execute the following code statements to generate creating indexes statements. After that, use the statements to recreate indexes.
var collectionList = db.getCollectionNames();
for(var index in collectionList){
var collection = collectionList[index];
var cur = db.getCollection(collection).getIndexes();
if(cur.length == 1){
continue;
}
for(var index1 in cur){
var next = cur[index1];
if(next["name"] == '_id_'){
continue;
}
var unique=next["unique"]?true:false;
print("try{ db.getCollection(\""+collection+"\").createIndex("+JSON.stringify(next["key"])+",{unique:"+unique+"},{background:1})}catch(e){print(e)}");}}
There is really short and briliant script for create backup of indexes queries:
print(`// Backup indexes of : ${db.getName()} : database`);
print(`use ${db.getName()};`);
db.getCollectionNames().forEach(function (collection) {
indexes = db.getCollection(collection).getIndexes().forEach(function (index) {
if (index.name === '_id_') return; // skip defalut _id indexes
const keys = tojsononeline(index.key);
delete index.id; delete index.key; delete index.v; delete index.ns;
print(`db.${collection}.createIndex(${keys}, ${tojsononeline(index)});`);
});
});
You can run it directly from mongo shell like this:
mongo --quiet mongodb://localhost:27017/mydatabase indexes-backup.js
Output looks like:
db.user.createIndex({"user.email":1}, {"name":"userEmail", "background":true});
Based on Ivan's answer, I improved the script by adding more options like expireAfterSeconds (which was crucial for me) and an flag variable to drop indexes before creating them. dropFirst variable at the top of the script can be set to true to drop every index before creating it. Also, this script keeps existing names of the indexes.
var dropFirst = false;
for(var collection of db.getCollectionNames()) {
var indexes = db.getCollection(collection).getIndexes().filter(i => i.name !== '_id_');
if(indexes.length === 0) continue;
print(`\n// Collection: ${collection}`);
for(var index of indexes) {
var key = JSON.stringify(index.key);
var opts = [`name: "${index.name}"`, 'background: true'];
if(index['unique']) opts.push('unique: true');
if(index['hidden']) opts.push('hidden: true');
if(index['sparse']) opts.push('sparse: true');
if(index['expireAfterSeconds'] !== undefined) opts.push(`expireAfterSeconds: ${index['expireAfterSeconds']}`);
if(dropFirst) {
print(`try { db.getCollection("${collection}").dropIndex(${key}); } catch(e) { print('failed to drop ${key}:', e); }`);
}
print(`try { db.getCollection("${collection}").createIndex(${key}, {${opts.join(', ')}}) } catch(e) { print('failed to create ${key}:', e) }`);
}
}

MongoDB: Need help speeding up my collections comparison script that generates notification objects on diff

I have a primary collection that I use directly with my app, and a secondary collection that has the same data but is updated from our source daily. Each document represents a real-life oil well (relevant for my code sample).
Each daily update to the secondary collection may have changes in some of the properties of the documents compared to the primary collection (e.g. "status" property may go from active to inactive), or there may be entirely new documents added that day.
I have a script where I compare the secondary collection to the primary collection that ultimately updates the primary collection with the changes or newly added documents, but at the same time creates "notification" objects, which I push to user documents to alert them of changes in documents that they "follow" (or any new documents added that are relevant to them for various reasons). I also consolidate them by user at the end, so that I'm pushing just 1-2 combined notifications to each user instead of possibly 10-20.
My question is, is there a way to make my script more efficient, maybe by converting it all to MongoDB Query Language (MQL) vs using Javascript for loops? It currently takes 1-2 hours on our collection of 39k documents.
(Note: I run this script manually in the mongosh shell, currently only once a month, but would like to eventually increase frequency, likely daily since that's what we're capable of.)
(Note 2: I'm currently only tracking document changes for the "operator" and "statusAbbr" properties, and then any "newWells". That is sufficient.)
(Note 3: I'm a newer programmer, and I'm happy to have made it "work", but now I want to learn how to make it better.)
let userNotifications = []
let secondaryCollection = db.secondaryCollection.find().toArray()
// GENERATE NOTIFICATIONS (PER WELL) (multiple notifications per user per well possible)
(async () => {
function notificationType(well, doc) {
if (well) {
if (well.statusAbbr != doc.statusAbbr) {
return "wellStatusChanges"
}
else if (well.operator != doc.operator) {
return "wellOperatorChanges"
}
else {
return ""
}
}
else {
return "newWells"
}
}
for (let doc of secondaryCollection) {
let well = null
let result = ""
if (db.primaryCollection.find({ndic: doc.ndic}).count() > 0) {
// UPDATE DB (well's status/statusHistory || operator/operatorId/operatorHistory)
well = db.primaryCollection.find({ndic: doc.ndic}).toArray()
well = well[0]
doc._id = well._id
result = notificationType(well, doc) // wellStatusChanges || wellOperatorChanges || ""
if (result == "wellStatusChanges") {
doc.statusHistory = [...well.statusHistory].unshift({
date: new Date(),
statusNew: doc.statusAbbr,
statusOld: well.statusAbbr
})
db.primaryCollection.findOneAndUpdate({_id: new ObjectId(doc._id)}, {$set: {statusHistory: doc.statusHistory, statusAbbr: doc.statusAbbr}})
}
else if (result == "wellOperatorChanges") {
if (!doc.operatorId) {
let result = await db.operators.find({name: doc.operator}).toArray()
if (result.length == 1) {
doc.operatorId = result._id
}
else {
let newOperator = await db.operators.insertOne({...operatorTemplate, name: doc.operator})
doc.operatorId = newOperator.insertedId
}
}
doc.operatorHistory = [...well.operatorHistory].unshift({
date: new Date(),
operatorNew: doc.operator,
operatorIdNew: doc.operatorId,
operatorOld: well.operator,
operatorIdOld: well.operatorId,
})
db.primaryCollection.findOneAndUpdate({_id: new ObjectId(doc._id)}, {$set: {operatorHistory: doc.operatorHistory, operator: doc.operator, operatorId: new ObjectId(doc.operatorId)}})
}
}
else {
result = notificationType(well, doc) // should be "newWells"
doc = {...wellTemplate, ...doc}
let createdDoc = await db.primaryCollection.insertOne(doc); //TODO: need to await ??
doc._id = createdDoc.insertedId // in ObjectId form
}
// (2) UPDATE USERNOTIFICATIONS (USER/ACCOUNT)
if (result != "") {
// script that determines who relevant users are for each change / new well added,
// generates an individual notification for each change, and pushes to userNotifications
}
}
})()
// AGGREGATE NOTIFICATIONS (FROM ALL WELLS) & UPDATE USERS' NOTIFICATIONS
(async () => {
// (1) script that consolidates notifications from userNotifications based on user
// (2) FOR EACH CONSOLIDATION/USER, UPDATE USER
await Promise.all(userNotificationsCombined.map(async (notif) => {
try {
let update = await db.users.findOneAndUpdate({_id: new ObjectId(notif.userId)},
{$push: {notifications: notif}}
)
}
catch {
console.log("couldn't find user ", notif.userId)
}
}))
})()

Iterating Over an Array and Returning First Match from MongoDB using Mongoose

I have a collection of Users in my database with corresponding unique ids. I am writing a function that takes an array of ids as an argument, e.g:
["user_id1", "user_id2", "user_id3", "user_id4"].
I want my query to return the first and only the first match. i.e. Using the example above, if user_id2 and user_id4 were the only two matching users in the database, my result would only return user_id2. User ids that are not in the database are ignored.
My current approach is to use a while loop, but I wanted to see if there was a better solution provided by Mongoose.
Current Pseudo Code:
function findOneUser(userIdArr) {
let user = 0;
let returnedUser;
while(!returnedUser || user < userIdArr.length) {
let id = userIdArr[user];
user = await User.findByID(id);
user++;
}
}
try this.
Use promises and mongoose findOne method:
let argumentArr = ["user_id1", "user_id2", "user_id3", "user_id4"];
let getUser = new Promise(function(resolve, reject) {
for (let i = 0; i < argumentArr.length; i++) {
User.findOne({_id:argumentArr[i]}).then(user => {
if(Object.keys(user).length > 0){
resolve(user)
}
}).catch(err => {reject(err)})
}
});
getUser.then(
(user) => {console.log(user);},//expected results: user(:object)
(err) => {console.log(err);}
);
}

Mongo - Replace references with embedded documents

I have a Collection with a nested attribute that is an array of ObjectId References. These refer to documents in another Collection.
I'd like to replace these references with the documents themselves, i.e. embed those documents where the references are now. I've tried with and without the .snapshot() option. This may be caused because I'm updating a document while in a loop on that doc, and .snapshot() isn't available at that level.
My mongo-fu is low and I'm stuck on a call stack error. How can I do this?
Example code:
db.CollWithReferences.find({}).snapshot().forEach( function(document) {
var doc_id = document._id;
document.GroupsOfStuff.forEach( function(Group) {
var docsToEmbed= db.CollOfThingsToEmbed.find({ _id: { $in: Group.ArrayOfReferenceObjectIds }});
db.CollWithReferences.update({"_id": ObjectId(doc_id) },
{$set: {"Group.ArrayOfReferenceObjectIds ":docsToEmbed}} )
});
});
Gives this error:
{
"message" : "Maximum call stack size exceeded",
"stack" : "RangeError: Maximum call stack size exceeded" +
....}
I figure this is happening for one of two reasons. Either you are running out of memory by executing two queries in a for loop, or the update operation is being executed before the find operation has finished.
Either way, it is not a good idea to execute too many queries in a for loop as it can lead to this type of error.
I can't be sure if this will fix your problem as I don't know how many documents are in your collections, but it may work if you first get all documents from the CollWithReferences collection, then all you need from the CollOfThingsToEmbed collection. Then build a map of an _id from the CollOfThingsToEmbed collection to the actual document that corresponds to that. You can then loop through each document you got from the CollWithReferences collection, and mutate the groupsOfStuff array by accessing each ArrayOfReferenceObjectIds array and setting the ObjectId to the value that you have in the map you already built up, which will be the whole document. Then just update that document by setting GroupsOfSuff to its mutated value.
The following JavaScript code will do this (it could be organised better to have no logic in the global scope etc.):
var references = db.CollWithReferences.find({});
function getReferenceIds(references) {
var referenceIds = [];
for (var i = 0; i < references.length; i++) {
var group = references[i].GroupsOfStuff;
for (let j = 0; j < group.ArrayOfReferenceObjectIds; j++) {
referenceIds.push(group.ArrayOfReferenceObjectIds[j]);
}
}
return referenceIds;
}
function buildIdMap(docs) {
var map = {};
for (var i = 0; i < docs.length; i++) {
map[docs[i]._id.toString()] = docs[i];
}
return map;
}
var referenceIds = getReferenceIds(references);
var docsToEmbed = db.CollOfThingsToEmbed.find({_id: {$in: referenceIds}});
var idMap = buildIdMap(docsToEmbed);
for (var i = 0; i < references.length; i++) {
var groups = references[i].GroupsOfStuff;
for (var j = 0; j < groups.length; j++) {
refs = groups[j].ArrayOfReferenceObjectIds;
refs.forEach(function(ref) {
ref = idMap[ref.toString()];
});
}
db.CollWithReferences.update({
_id: ObjectId(ref._id)
}, {
$set: {GroupsOfStuff: groups}
});
}
It would be better if it was possible to just do one bulk update, but as each document needs to be updated differently, this is not possible.

mongodb move documents from one collection to another collection

How can documents be moved from one collection to another collection in MongoDB?? For example: I have lot of documents in collection A and I want to move all 1 month older documents to collection B (these 1 month older documents should not be in collection A).
Using aggregation we can do copy. But what I am trying to do is moving of documents.
What method can be used to move documents?
The bulk operations #markus-w-mahlberg showed (and #mark-mullin refined) are efficient but unsafe as written. If the bulkInsert fails, the bulkRemove will still continue. To make sure you don't lose any records when moving, use this instead:
function insertBatch(collection, documents) {
var bulkInsert = collection.initializeUnorderedBulkOp();
var insertedIds = [];
var id;
documents.forEach(function(doc) {
id = doc._id;
// Insert without raising an error for duplicates
bulkInsert.find({_id: id}).upsert().replaceOne(doc);
insertedIds.push(id);
});
bulkInsert.execute();
return insertedIds;
}
function deleteBatch(collection, documents) {
var bulkRemove = collection.initializeUnorderedBulkOp();
documents.forEach(function(doc) {
bulkRemove.find({_id: doc._id}).removeOne();
});
bulkRemove.execute();
}
function moveDocuments(sourceCollection, targetCollection, filter, batchSize) {
print("Moving " + sourceCollection.find(filter).count() + " documents from " + sourceCollection + " to " + targetCollection);
var count;
while ((count = sourceCollection.find(filter).count()) > 0) {
print(count + " documents remaining");
sourceDocs = sourceCollection.find(filter).limit(batchSize);
idsOfCopiedDocs = insertBatch(targetCollection, sourceDocs);
targetDocs = targetCollection.find({_id: {$in: idsOfCopiedDocs}});
deleteBatch(sourceCollection, targetDocs);
}
print("Done!")
}
Update 2
Please do NOT upvote this answer any more. As written #jasongarber's answer is better in any aspect.
Update
This answer by #jasongarber is a safer approach and should be used instead of mine.
Provided I got you right and you want to move all documents older than 1 month, and you use mongoDB 2.6, there is no reason not to use bulk operations, which are the most efficient way of doing multiple operations I am aware of:
> var bulkInsert = db.target.initializeUnorderedBulkOp()
> var bulkRemove = db.source.initializeUnorderedBulkOp()
> var date = new Date()
> date.setMonth(date.getMonth() -1)
> db.source.find({"yourDateField":{$lt: date}}).forEach(
function(doc){
bulkInsert.insert(doc);
bulkRemove.find({_id:doc._id}).removeOne();
}
)
> bulkInsert.execute()
> bulkRemove.execute()
This should be pretty fast and it has the advantage that in case something goes wrong during the bulk insert, the original data still exists.
Edit
In order to prevent too much memory to be utilized, you can execute the bulk operation on every x docs processed:
> var bulkInsert = db.target.initializeUnorderedBulkOp()
> var bulkRemove = db.source.initializeUnorderedBulkOp()
> var x = 10000
> var counter = 0
> var date = new Date()
> date.setMonth(date.getMonth() -1)
> db.source.find({"yourDateField":{$lt: date}}).forEach(
function(doc){
bulkInsert.insert(doc);
bulkRemove.find({_id:doc._id}).removeOne();
counter ++
if( counter % x == 0){
bulkInsert.execute()
bulkRemove.execute()
bulkInsert = db.target.initializeUnorderedBulkOp()
bulkRemove = db.source.initializeUnorderedBulkOp()
}
}
)
> bulkInsert.execute()
> bulkRemove.execute()
Insert and remove:
var documentsToMove = db.collectionA.find({});
documentsToMove.forEach(function(doc) {
db.collectionB.insert(doc);
db.collectionA.remove(doc);
});
note: this method might be quite slow for large collections or collections holding large documents.
$out is use to create the new collection with data , so use $out
db.oldCollection.aggregate([{$out : "newCollection"}])
then use drop
db.oldCollection.drop()
you can use range query to get data from sourceCollection and keep the cursor data in variable and loop on it and insert to target collection:
var doc = db.sourceCollection.find({
"Timestamp":{
$gte:ISODate("2014-09-01T00:00:00Z"),
$lt:ISODate("2014-10-01T00:00:00Z")
}
});
doc.forEach(function(doc){
db.targetCollection.insert(doc);
})
Hope so it helps!!
First option (Using mongo dump)
1.Get a dump from collection
mongodump -d db -c source_collection
2.Restore from collection
mongorestore -d db -c target_collection dir=dump/db_name/source_collection.bson
Second Option
Running aggregate
db.getCollection('source_collection').aggregate([ { $match: {"emailAddress" : "apitester#mailinator.com"} }, { $out: "target_collection" } ])
Third Option (Slowest)
Running a through for loop
db.getCollection('source_collection').find().forEach(function(docs){ db.getCollection('target_collection').insert(docs); }) print("Rolleback Completed!");
May be from the performance point of view it's better to remove a lot of documents using one command(especially if you have indexes for query part) rather than deleting them one-by-one.
For example:
db.source.find({$gte: start, $lt: end}).forEach(function(doc){
db.target.insert(doc);
});
db.source.remove({$gte: start, $lt: end});
This is a restatement of #Markus W Mahlberg
Returning the favor - as a function
function moveDocuments(sourceCollection,targetCollection,filter) {
var bulkInsert = targetCollection.initializeUnorderedBulkOp();
var bulkRemove = sourceCollection.initializeUnorderedBulkOp();
sourceCollection.find(filter)
.forEach(function(doc) {
bulkInsert.insert(doc);
bulkRemove.find({_id:doc._id}).removeOne();
}
)
bulkInsert.execute();
bulkRemove.execute();
}
An example use
var x = {dsid:{$exists: true}};
moveDocuments(db.pictures,db.artifacts,x)
to move all documents that have top level element dsid from the pictures to the artifacts collection
Here's an update to #jasongarber's answer which uses the more recent mongo 'bulkWrite' operation (Read docs here), and also keeps the whole process asynchronous so you can run it as part of a wider script which depends on its' completion.
async function moveDocuments (sourceCollection, targetCollection, filter) {
const sourceDocs = await sourceCollection.find(filter)
console.log(`Moving ${await sourceDocs.count()} documents from ${sourceCollection.collectionName} to ${targetCollection.collectionName}`)
const idsOfCopiedDocs = await insertDocuments(targetCollection, sourceDocs)
const targetDocs = await targetCollection.find({_id: {$in: idsOfCopiedDocs}})
await deleteDocuments(sourceCollection, targetDocs)
console.log('Done!')
}
async function insertDocuments (collection, documents) {
const insertedIds = []
const bulkWrites = []
await documents.forEach(doc => {
const {_id} = doc
insertedIds.push(_id)
bulkWrites.push({
replaceOne: {
filter: {_id},
replacement: doc,
upsert: true,
},
})
})
if (bulkWrites.length) await collection.bulkWrite(bulkWrites, {ordered: false})
return insertedIds
}
async function deleteDocuments (collection, documents) {
const bulkWrites = []
await documents.forEach(({_id}) => {
bulkWrites.push({
deleteOne: {
filter: {_id},
},
})
})
if (bulkWrites.length) await collection.bulkWrite(bulkWrites, {ordered: false})
}
From MongoDB 3.0 up, you can use the copyTo command with the following syntax:
db.source_collection.copyTo("target_collection")
Then you can use the drop command to remove the old collection:
db.source_collection.drop()
I do like the response from #markus-w-mahlberg, however at times, I have seen the need to keep it a bit simpler for people. As such I have a couple of functions that are below. You could naturally wrap thing here with bulk operators as he did, but this code works with new and old Mongo systems equally.
function parseNS(ns){
//Expects we are forcing people to not violate the rules and not doing "foodb.foocollection.month.day.year" if they do they need to use an array.
if (ns instanceof Array){
database = ns[0];
collection = ns[1];
}
else{
tNS = ns.split(".");
if (tNS.length > 2){
print('ERROR: NS had more than 1 period in it, please pass as an [ "dbname","coll.name.with.dots"] !');
return false;
}
database = tNS[0];
collection = tNS[1];
}
return {database: database,collection: collection};
}
function insertFromCollection( sourceNS, destNS, query, batchSize, pauseMS){
//Parse and check namespaces
srcNS = parseNS(sourceNS);
destNS = parseNS(destNS);
if ( srcNS == false || destNS == false){return false;}
batchBucket = new Array();
totalToProcess = db.getDB(srcNS.database).getCollection(srcNS.collection).find(query,{_id:1}).count();
currentCount = 0;
print("Processed "+currentCount+"/"+totalToProcess+"...");
db.getDB(srcNS.database).getCollection(srcNS.collection).find(query).addOption(DBQuery.Option.noTimeout).forEach(function(doc){
batchBucket.push(doc);
if ( batchBucket.length > batchSize){
db.getDB(destNS.database).getCollection(destNS.collection)insert(batchBucket);
currentCount += batchBucket.length;
batchBucket = [];
sleep (pauseMS);
print("Processed "+currentCount+"/"+totalToProcess+"...");
}
}
print("Completed");
}
/** Example Usage:
insertFromCollection("foo.bar","foo2.bar",{"type":"archive"},1000,20);
You could obviously add a db.getSiblingDB(srcNS.database).getCollection(srcNS.collection).remove(query,true)
If you wanted to also remove the records after they are copied to the new location. The code can easily be built like that to make it restartable.
I had 2297 collection for 15 million of documents but some collection was empty.
Using only copyTo the script failed, but with this script optimization:
db.getCollectionNames().forEach(function(collname) {
var c = db.getCollection(collname).count();
if(c!==0){
db.getCollection(collname).copyTo('master-collection');
print('Copied collection ' + collname);
}
});
all works fine for me.
NB: copyTo is deprecated because it block the read/write operation: so I think is fine if you know that the database is not usable during this operation.
In my case for each didn't work. So I had to make some changes.
var kittySchema = new mongoose.Schema({
name: String
});
var Kitten = mongoose.model('Kitten', kittySchema);
var catSchema = new mongoose.Schema({
name: String
});
var Cat = mongoose.model('Cat', catSchema);
This is Model for both the collection
`function Recursion(){
Kitten.findOne().lean().exec(function(error, results){
if(!error){
var objectResponse = results;
var RequiredId = objectResponse._id;
delete objectResponse._id;
var swap = new Cat(objectResponse);
swap.save(function (err) {
if (err) {
return err;
}
else {
console.log("SUCCESSFULL");
Kitten.deleteOne({ _id: RequiredId }, function(err) {
if (!err) {
console.log('notification!');
}
else {
return err;
}
});
Recursion();
}
});
}
if (err) {
console.log("No object found");
// return err;
}
})
}`
I planned to arhieve 1000 records at a time using bulkinsert and bulkdelete methods of pymongo.
For both source and target
create mongodb objects to connect to the database.
instantiate the bulk objects. Note: I created a backup of bulk objects too. This will help me to rollback the insertion or removal when an error occurs.
example:
For source
// replace this with mongodb object creation logic
source_db_obj = db_help.create_db_obj(source_db, source_col)
source_bulk = source_db_obj.initialize_ordered_bulk_op()
source_bulk_bak = source_db_obj.initialize_ordered_bulk_op()
For target
// replace this with mogodb object creation logic
target_db_obj = db_help.create_db_obj(target_db, target_col)
target_bulk = target_db_obj.initialize_ordered_bulk_op()
target_bulk_bak = target_db_obj.initialize_ordered_bulk_op()
Obtain the source records that matches the filter criteria
source_find_results = source_db_obj.find(filter)
Loop through the source records
create target and source bulk operations
Append archived_at field with the current datetime to the target collection
//replace this with the logic to obtain the UTCtime.
doc['archived_at'] = db_help.getUTCTime()
target_bulk.insert(document)
source_bulk.remove(document)
for rollback in case of any errors or exceptions, create target_bulk_bak and source_bulk_bak operations.
target_bulk_bak.find({'_id':doc['_id']}).remove_one()
source_bulk_bak.insert(doc)
//remove the extra column
doc.pop('archieved_at', None)
When the record count to 1000, execute the target - bulk insertion and source - bulk removal. Note: this method takes target_bulk and source_bulk objects for execution.
execute_bulk_insert_remove(source_bulk, target_bulk)
When exception occurs, execute the target_bulk_bak removal and source_bulk_bak inesertions. This would rollback the changes. Since mongodb doesn't have rollback, I came up with this hack
execute_bulk_insert_remove(source_bulk_bak, target_bulk_bak)
Finally re-initialize the source and target bulk and bulk_bak objects. This is necessary because you can use them only once.
Complete code
def execute_bulk_insert_remove(source_bulk, target_bulk):
try:
target_bulk.execute()
source_bulk.execute()
except BulkWriteError as bwe:
raise Exception(
"could not archive document, reason: {}".format(bwe.details))
def archive_bulk_immediate(filter, source_db, source_col, target_db, target_col):
"""
filter: filter criteria for backup
source_db: source database name
source_col: source collection name
target_db: target database name
target_col: target collection name
"""
count = 0
bulk_count = 1000
source_db_obj = db_help.create_db_obj(source_db, source_col)
source_bulk = source_db_obj.initialize_ordered_bulk_op()
source_bulk_bak = source_db_obj.initialize_ordered_bulk_op()
target_db_obj = db_help.create_db_obj(target_db, target_col)
target_bulk = target_db_obj.initialize_ordered_bulk_op()
target_bulk_bak = target_db_obj.initialize_ordered_bulk_op()
source_find_results = source_db_obj.find(filter)
start = datetime.now()
for doc in source_find_results:
doc['archived_at'] = db_help.getUTCTime()
target_bulk.insert(doc)
source_bulk.find({'_id': doc['_id']}).remove_one()
target_bulk_bak.find({'_id': doc['_id']}).remove_one()
doc.pop('archieved_at', None)
source_bulk_bak.insert(doc)
count += 1
if count % 1000 == 0:
logger.info("count: {}".format(count))
try:
execute_bulk_insert_remove(source_bulk, target_bulk)
except BulkWriteError as bwe:
execute_bulk_insert_remove(source_bulk_bak, target_bulk_bak)
logger.info("Bulk Write Error: {}".format(bwe.details))
raise
source_bulk = source_db_obj.initialize_ordered_bulk_op()
source_bulk_bak = source_db_obj.initialize_ordered_bulk_op()
target_bulk = target_db_obj.initialize_ordered_bulk_op()
target_bulk_bak = target_db_obj.initialize_ordered_bulk_op()
end = datetime.now()
logger.info("archived {} documents to {} in ms.".format(
count, target_col, (end - start)))