How to copy collection from localhost to remote mongo instance - mongodb

db.runCommand({cloneCollection : "newdb.rep", from:"localhost:27017"})
I am connected to the remote Mongo instance now.
However I understand this is majorly used to copy a collection from remote to local and not the other way round

you could use this script, which will connect to local and remote database.
Some info here
Then iterate via collection and insert in bulks of 1000 documents.
var localConnection = connect("localhost:27017/myDatabase");
var destinationConnection = connect("localhost:27020/myDatabase");
var documentLimit = 1000;
var docCount = localConnection.find({}).count();
var chunks = docCount / documentLimit;
for (var i = 0; i <= chunks; i++) {
var bulk = destinationConnection.initializeUnorderedBulkOp();
localConnection .find({}).snapshot()
.limit(documentLimit).forEach(function (doc) {
bulk.insert(doc);
});
bulk.execute()
}

#profesor79's script doesn't work with the newer version of MongoDB and for collections containing more than 1000 documents without some changes. It doesn't use skip() so MongoDB throws error when it tries to override the first 1000 docs again.
I have used the following script successfully on a collection containing around 20000 documents.
var localConnection = connect(
"<db_user>:<db_password>#localhost:27017/<db_name>"
);
var destinationConnection = connect(
"<db_user>:<db_password>#<remote_host>:<remote_port>/<remote_db>"
);
var documentLimit = 1000;
var docCount = localConnection.collection_name.count();
var chunks = docCount / documentLimit;
for (var i = 0; i <= chunks; i++) {
var bulk = destinationConnection.collection_name.initializeUnorderedBulkOp();
localConnection.collection_name
.find({})
// after each iteration skip 1000, 2000 and so on...
.skip(i*documentLimit)
.limit(documentLimit)
.forEach(function(doc) {
bulk.insert(doc);
});
bulk.execute();
}
Provide your own local and destination connection strings and documentLimit (i.e. number of documents to be processed as a chunk) and it should work.

Related

How do ı terminate cursor mongodb

I try to abort a query after it works specific time. So ı use mongodb maxTimeMS() method for my query. But it is not working.
I except that this query will stop after 5 second but keep working. how ı terminate my query after it works specific time
var sum = 0 ;
var inbox=db.getMongo().getDB("xxxx").getCollection("Inbox");
var oldInbox=db.getMongo().getDB("xxxx").getCollection("oldInbox");
var inboxCount = inbox.count();
var oldCount = oldInbox.count();
var dif = inboxCount - oldCount ;
if ( dif > 10000000 ){
var cursor = inbox.find().maxTimeMS(5000);
cursor.sort({_id : -1}).forEach(function(uidDoc) {
var uid = uidDoc.uid;
var docsToMigrate = [];
var idsToRemove = [];
inbox.find({uid : uid}).sort({_id : -1}).skip(10).forEach(function(doc) {
docsToMigrate.push(doc);
idsToRemove.push(doc._id);
var x = doc._id;
});
oldInbox.insert(docsToMigrate);
inbox.remove({_id : {$in : idsToRemove}});
sum = sum + idsToRemove.length;
if ( x = 0 )
{
print(sum);
cursor.close();
}
});
};
Based on documentation here, maxTimeMS is a limit on processing time within the database. This will cover query planning, execution, and data serialization, but the timer stops when the cursor is idle. This is not wall time and means that it does not account for the execution time within your forEach callback. Since your query seems quite simple, it will likely take much more time than the maxTimeMS limit for it to stop.
So to answer your question, you'll need to keep a timer elsewhere and manage it yourself using paginated queries.

Mongo Document Increment Sequence is skipping numbers

I'm currently having issues when querying for one of my Documents inside a Database through Meteor.
Using this line of code I'm trying to retrieve the next sequence number out of the DB. But it sometimes skips numbers randomly for some reason.
var col = MyCounters.findOne(type);
MyCounters.update(col._id, {$inc: {seq: 1}});
return col.seq;
Not getting any kind of errors server side.
Does anybody know what the issue might be?
I'm on Meteor 1.4+
====================
Update
I also update another Collection with the new value obtained from MyCounters collection, so it would be something like this:
var col = MyCounters.findOne(type);
MyCounters.update(col._id, {$inc: {seq: 1}});
var barId = col.seq;
// declare barObject + onInsertError
barObject.barId = barId;
// ...
FooCollection.insert(barObject, onInsertError);
And FooCollection ends up having skipped sequence numbers up to 5000 sometimes.
If you want increament at that item Document, you can use :
var col = MyCounters.findOne(type);
var valueOne = 1;
var nameItem = 'seq';
var inc = {};
inc[ nameItem ] = valueOne;
MyCounters.update({ _id: col._id }, { '$inc': inc } )
But if you want increament value from all Document from Collections MyCounters ( maks seq + 1 ) you can use :
var count = MyCounters.findOne({}, {sort:{seq:-1}}).seq;
count = count + 1;
MyCounters.update({_id:col._id}, {$set:{seq:count}})
I hope it work for you. Thanks
refer to : https://stackoverflow.com/a/33968766/4287229

When copying individual items from one collection to another in MongoDB via a script, the resulting collection has fewer entries than the original

My test collection has 56 entities in them. When the following script is executed the resulting collection has less entries than the original collection. The number varies for each run. What would cause this issue and is there a workaround for this?
var collectionToUpdate = 'testcollection';
var temporaryCollectionName = collectionToUpdate + '_old'
db.getCollection(collectionToUpdate).renameCollection(temporaryCollectionName);
var oldCollection = db.getCollection(temporaryCollectionName);
db.createCollection(collectionToUpdate);
var newCollection = db.getCollection(collectionToUpdate);
var count = 0;
oldCollection.find().forEach(
function (element) {
count++;
newCollection.insert(element)
}
);
print(count);
Versions used:
MongoDB - 3.2.8
RoboMongo - 0.9.0-RC10

How do I Bulk Add a Random Field to each record in MongoDB

There are a number of questions and answers about randomly ordering results or randomly getting a single record. The answers recommend adding a random field, creating an index on that field, and then doing a random draw. It looks like:
db.myindex.find().forEach(function(doc) {
db.myindex.update({_id: doc._id}, {$set: {rand: Math.random()}})
})
This works great, but it takes several hours (lots and lots of data). It looks like is limited by write locking which makes sense since the update is happening for each record. How do I do this in bulk? I tried:
var bulk = db.myindex.initializeUnorderedBulkOp();
bulk.find({}).update( { $set: { rand: Math.random() } } );
bulk.execute();
But it sets the rand field to the same value for every record! How do I fix this?
Edit: By the way, the reason that I need to do this is because I get a huge bson file from someone else and I need to import it frequently, so can't wait multiple hours to get it updated.
Introduce a loop with the bulk operations sent to the server once per 1000 documents, or as many modifications as you can fit under the 64MB BSON limit:
var bulk = db.myindex.initializeOrderedBulkOp();
var counter = 0;
db.myindex.find().forEach(function(doc) {
bulk.find({ "_id": doc._id }).updateOne({
"$set": { "rand": Math.random() }
});
counter++;
if (counter % 1000 == 0) {
bulk.execute();
bulk = db.myindex.initializeOrderedBulkOp();
}
});
if (counter % 1000 != 0){
bulk.execute();
}
If the collection is just static data, and you're getting a BSON file from someone else, it might be quicker to stream the BSON file through a filter to generate a new BSON file that you can then import using mongoimport.
Here is one that I wrote using nodeJS that can process a BSON file at around 1GB/min.
var bson = require('bson');
var BSON = new bson.BSONPure.BSON();
var BSONStream = require('bson-stream');
var fs = require('fs');
var sb = require('stream-buffers');
var rs = fs.createReadStream('tweets.bson');
var ws = fs.createWriteStream('tweets_random.bson',{flags:'a'});
var writeBuffer = new sb.WritableStreamBuffer({
initialSize: (1024*1024),
incrementAmount: (10*1024)
});
rs.pipe(new BSONStream()).on('data',function(obj) {
obj.rand = Math.random();
writeBuffer.write(BSON.serialize(obj));
if(writeBuffer.size()>(1024*1024)) {
var size = writeBuffer.size();
ws.write(writeBuffer.getContents(),function() {
console.log("Wrote",size,"bytes");
console.log("Buffer has:",writeBuffer.size(),"bytes left");
});
}
});
It might go faster if you modify the buffer size/increment parameters.
This is of course assuming that you have the luxury of reimporting your data.

mongodb move documents from one collection to another collection

How can documents be moved from one collection to another collection in MongoDB?? For example: I have lot of documents in collection A and I want to move all 1 month older documents to collection B (these 1 month older documents should not be in collection A).
Using aggregation we can do copy. But what I am trying to do is moving of documents.
What method can be used to move documents?
The bulk operations #markus-w-mahlberg showed (and #mark-mullin refined) are efficient but unsafe as written. If the bulkInsert fails, the bulkRemove will still continue. To make sure you don't lose any records when moving, use this instead:
function insertBatch(collection, documents) {
var bulkInsert = collection.initializeUnorderedBulkOp();
var insertedIds = [];
var id;
documents.forEach(function(doc) {
id = doc._id;
// Insert without raising an error for duplicates
bulkInsert.find({_id: id}).upsert().replaceOne(doc);
insertedIds.push(id);
});
bulkInsert.execute();
return insertedIds;
}
function deleteBatch(collection, documents) {
var bulkRemove = collection.initializeUnorderedBulkOp();
documents.forEach(function(doc) {
bulkRemove.find({_id: doc._id}).removeOne();
});
bulkRemove.execute();
}
function moveDocuments(sourceCollection, targetCollection, filter, batchSize) {
print("Moving " + sourceCollection.find(filter).count() + " documents from " + sourceCollection + " to " + targetCollection);
var count;
while ((count = sourceCollection.find(filter).count()) > 0) {
print(count + " documents remaining");
sourceDocs = sourceCollection.find(filter).limit(batchSize);
idsOfCopiedDocs = insertBatch(targetCollection, sourceDocs);
targetDocs = targetCollection.find({_id: {$in: idsOfCopiedDocs}});
deleteBatch(sourceCollection, targetDocs);
}
print("Done!")
}
Update 2
Please do NOT upvote this answer any more. As written #jasongarber's answer is better in any aspect.
Update
This answer by #jasongarber is a safer approach and should be used instead of mine.
Provided I got you right and you want to move all documents older than 1 month, and you use mongoDB 2.6, there is no reason not to use bulk operations, which are the most efficient way of doing multiple operations I am aware of:
> var bulkInsert = db.target.initializeUnorderedBulkOp()
> var bulkRemove = db.source.initializeUnorderedBulkOp()
> var date = new Date()
> date.setMonth(date.getMonth() -1)
> db.source.find({"yourDateField":{$lt: date}}).forEach(
function(doc){
bulkInsert.insert(doc);
bulkRemove.find({_id:doc._id}).removeOne();
}
)
> bulkInsert.execute()
> bulkRemove.execute()
This should be pretty fast and it has the advantage that in case something goes wrong during the bulk insert, the original data still exists.
Edit
In order to prevent too much memory to be utilized, you can execute the bulk operation on every x docs processed:
> var bulkInsert = db.target.initializeUnorderedBulkOp()
> var bulkRemove = db.source.initializeUnorderedBulkOp()
> var x = 10000
> var counter = 0
> var date = new Date()
> date.setMonth(date.getMonth() -1)
> db.source.find({"yourDateField":{$lt: date}}).forEach(
function(doc){
bulkInsert.insert(doc);
bulkRemove.find({_id:doc._id}).removeOne();
counter ++
if( counter % x == 0){
bulkInsert.execute()
bulkRemove.execute()
bulkInsert = db.target.initializeUnorderedBulkOp()
bulkRemove = db.source.initializeUnorderedBulkOp()
}
}
)
> bulkInsert.execute()
> bulkRemove.execute()
Insert and remove:
var documentsToMove = db.collectionA.find({});
documentsToMove.forEach(function(doc) {
db.collectionB.insert(doc);
db.collectionA.remove(doc);
});
note: this method might be quite slow for large collections or collections holding large documents.
$out is use to create the new collection with data , so use $out
db.oldCollection.aggregate([{$out : "newCollection"}])
then use drop
db.oldCollection.drop()
you can use range query to get data from sourceCollection and keep the cursor data in variable and loop on it and insert to target collection:
var doc = db.sourceCollection.find({
"Timestamp":{
$gte:ISODate("2014-09-01T00:00:00Z"),
$lt:ISODate("2014-10-01T00:00:00Z")
}
});
doc.forEach(function(doc){
db.targetCollection.insert(doc);
})
Hope so it helps!!
First option (Using mongo dump)
1.Get a dump from collection
mongodump -d db -c source_collection
2.Restore from collection
mongorestore -d db -c target_collection dir=dump/db_name/source_collection.bson
Second Option
Running aggregate
db.getCollection('source_collection').aggregate([ { $match: {"emailAddress" : "apitester#mailinator.com"} }, { $out: "target_collection" } ])
Third Option (Slowest)
Running a through for loop
db.getCollection('source_collection').find().forEach(function(docs){ db.getCollection('target_collection').insert(docs); }) print("Rolleback Completed!");
May be from the performance point of view it's better to remove a lot of documents using one command(especially if you have indexes for query part) rather than deleting them one-by-one.
For example:
db.source.find({$gte: start, $lt: end}).forEach(function(doc){
db.target.insert(doc);
});
db.source.remove({$gte: start, $lt: end});
This is a restatement of #Markus W Mahlberg
Returning the favor - as a function
function moveDocuments(sourceCollection,targetCollection,filter) {
var bulkInsert = targetCollection.initializeUnorderedBulkOp();
var bulkRemove = sourceCollection.initializeUnorderedBulkOp();
sourceCollection.find(filter)
.forEach(function(doc) {
bulkInsert.insert(doc);
bulkRemove.find({_id:doc._id}).removeOne();
}
)
bulkInsert.execute();
bulkRemove.execute();
}
An example use
var x = {dsid:{$exists: true}};
moveDocuments(db.pictures,db.artifacts,x)
to move all documents that have top level element dsid from the pictures to the artifacts collection
Here's an update to #jasongarber's answer which uses the more recent mongo 'bulkWrite' operation (Read docs here), and also keeps the whole process asynchronous so you can run it as part of a wider script which depends on its' completion.
async function moveDocuments (sourceCollection, targetCollection, filter) {
const sourceDocs = await sourceCollection.find(filter)
console.log(`Moving ${await sourceDocs.count()} documents from ${sourceCollection.collectionName} to ${targetCollection.collectionName}`)
const idsOfCopiedDocs = await insertDocuments(targetCollection, sourceDocs)
const targetDocs = await targetCollection.find({_id: {$in: idsOfCopiedDocs}})
await deleteDocuments(sourceCollection, targetDocs)
console.log('Done!')
}
async function insertDocuments (collection, documents) {
const insertedIds = []
const bulkWrites = []
await documents.forEach(doc => {
const {_id} = doc
insertedIds.push(_id)
bulkWrites.push({
replaceOne: {
filter: {_id},
replacement: doc,
upsert: true,
},
})
})
if (bulkWrites.length) await collection.bulkWrite(bulkWrites, {ordered: false})
return insertedIds
}
async function deleteDocuments (collection, documents) {
const bulkWrites = []
await documents.forEach(({_id}) => {
bulkWrites.push({
deleteOne: {
filter: {_id},
},
})
})
if (bulkWrites.length) await collection.bulkWrite(bulkWrites, {ordered: false})
}
From MongoDB 3.0 up, you can use the copyTo command with the following syntax:
db.source_collection.copyTo("target_collection")
Then you can use the drop command to remove the old collection:
db.source_collection.drop()
I do like the response from #markus-w-mahlberg, however at times, I have seen the need to keep it a bit simpler for people. As such I have a couple of functions that are below. You could naturally wrap thing here with bulk operators as he did, but this code works with new and old Mongo systems equally.
function parseNS(ns){
//Expects we are forcing people to not violate the rules and not doing "foodb.foocollection.month.day.year" if they do they need to use an array.
if (ns instanceof Array){
database = ns[0];
collection = ns[1];
}
else{
tNS = ns.split(".");
if (tNS.length > 2){
print('ERROR: NS had more than 1 period in it, please pass as an [ "dbname","coll.name.with.dots"] !');
return false;
}
database = tNS[0];
collection = tNS[1];
}
return {database: database,collection: collection};
}
function insertFromCollection( sourceNS, destNS, query, batchSize, pauseMS){
//Parse and check namespaces
srcNS = parseNS(sourceNS);
destNS = parseNS(destNS);
if ( srcNS == false || destNS == false){return false;}
batchBucket = new Array();
totalToProcess = db.getDB(srcNS.database).getCollection(srcNS.collection).find(query,{_id:1}).count();
currentCount = 0;
print("Processed "+currentCount+"/"+totalToProcess+"...");
db.getDB(srcNS.database).getCollection(srcNS.collection).find(query).addOption(DBQuery.Option.noTimeout).forEach(function(doc){
batchBucket.push(doc);
if ( batchBucket.length > batchSize){
db.getDB(destNS.database).getCollection(destNS.collection)insert(batchBucket);
currentCount += batchBucket.length;
batchBucket = [];
sleep (pauseMS);
print("Processed "+currentCount+"/"+totalToProcess+"...");
}
}
print("Completed");
}
/** Example Usage:
insertFromCollection("foo.bar","foo2.bar",{"type":"archive"},1000,20);
You could obviously add a db.getSiblingDB(srcNS.database).getCollection(srcNS.collection).remove(query,true)
If you wanted to also remove the records after they are copied to the new location. The code can easily be built like that to make it restartable.
I had 2297 collection for 15 million of documents but some collection was empty.
Using only copyTo the script failed, but with this script optimization:
db.getCollectionNames().forEach(function(collname) {
var c = db.getCollection(collname).count();
if(c!==0){
db.getCollection(collname).copyTo('master-collection');
print('Copied collection ' + collname);
}
});
all works fine for me.
NB: copyTo is deprecated because it block the read/write operation: so I think is fine if you know that the database is not usable during this operation.
In my case for each didn't work. So I had to make some changes.
var kittySchema = new mongoose.Schema({
name: String
});
var Kitten = mongoose.model('Kitten', kittySchema);
var catSchema = new mongoose.Schema({
name: String
});
var Cat = mongoose.model('Cat', catSchema);
This is Model for both the collection
`function Recursion(){
Kitten.findOne().lean().exec(function(error, results){
if(!error){
var objectResponse = results;
var RequiredId = objectResponse._id;
delete objectResponse._id;
var swap = new Cat(objectResponse);
swap.save(function (err) {
if (err) {
return err;
}
else {
console.log("SUCCESSFULL");
Kitten.deleteOne({ _id: RequiredId }, function(err) {
if (!err) {
console.log('notification!');
}
else {
return err;
}
});
Recursion();
}
});
}
if (err) {
console.log("No object found");
// return err;
}
})
}`
I planned to arhieve 1000 records at a time using bulkinsert and bulkdelete methods of pymongo.
For both source and target
create mongodb objects to connect to the database.
instantiate the bulk objects. Note: I created a backup of bulk objects too. This will help me to rollback the insertion or removal when an error occurs.
example:
For source
// replace this with mongodb object creation logic
source_db_obj = db_help.create_db_obj(source_db, source_col)
source_bulk = source_db_obj.initialize_ordered_bulk_op()
source_bulk_bak = source_db_obj.initialize_ordered_bulk_op()
For target
// replace this with mogodb object creation logic
target_db_obj = db_help.create_db_obj(target_db, target_col)
target_bulk = target_db_obj.initialize_ordered_bulk_op()
target_bulk_bak = target_db_obj.initialize_ordered_bulk_op()
Obtain the source records that matches the filter criteria
source_find_results = source_db_obj.find(filter)
Loop through the source records
create target and source bulk operations
Append archived_at field with the current datetime to the target collection
//replace this with the logic to obtain the UTCtime.
doc['archived_at'] = db_help.getUTCTime()
target_bulk.insert(document)
source_bulk.remove(document)
for rollback in case of any errors or exceptions, create target_bulk_bak and source_bulk_bak operations.
target_bulk_bak.find({'_id':doc['_id']}).remove_one()
source_bulk_bak.insert(doc)
//remove the extra column
doc.pop('archieved_at', None)
When the record count to 1000, execute the target - bulk insertion and source - bulk removal. Note: this method takes target_bulk and source_bulk objects for execution.
execute_bulk_insert_remove(source_bulk, target_bulk)
When exception occurs, execute the target_bulk_bak removal and source_bulk_bak inesertions. This would rollback the changes. Since mongodb doesn't have rollback, I came up with this hack
execute_bulk_insert_remove(source_bulk_bak, target_bulk_bak)
Finally re-initialize the source and target bulk and bulk_bak objects. This is necessary because you can use them only once.
Complete code
def execute_bulk_insert_remove(source_bulk, target_bulk):
try:
target_bulk.execute()
source_bulk.execute()
except BulkWriteError as bwe:
raise Exception(
"could not archive document, reason: {}".format(bwe.details))
def archive_bulk_immediate(filter, source_db, source_col, target_db, target_col):
"""
filter: filter criteria for backup
source_db: source database name
source_col: source collection name
target_db: target database name
target_col: target collection name
"""
count = 0
bulk_count = 1000
source_db_obj = db_help.create_db_obj(source_db, source_col)
source_bulk = source_db_obj.initialize_ordered_bulk_op()
source_bulk_bak = source_db_obj.initialize_ordered_bulk_op()
target_db_obj = db_help.create_db_obj(target_db, target_col)
target_bulk = target_db_obj.initialize_ordered_bulk_op()
target_bulk_bak = target_db_obj.initialize_ordered_bulk_op()
source_find_results = source_db_obj.find(filter)
start = datetime.now()
for doc in source_find_results:
doc['archived_at'] = db_help.getUTCTime()
target_bulk.insert(doc)
source_bulk.find({'_id': doc['_id']}).remove_one()
target_bulk_bak.find({'_id': doc['_id']}).remove_one()
doc.pop('archieved_at', None)
source_bulk_bak.insert(doc)
count += 1
if count % 1000 == 0:
logger.info("count: {}".format(count))
try:
execute_bulk_insert_remove(source_bulk, target_bulk)
except BulkWriteError as bwe:
execute_bulk_insert_remove(source_bulk_bak, target_bulk_bak)
logger.info("Bulk Write Error: {}".format(bwe.details))
raise
source_bulk = source_db_obj.initialize_ordered_bulk_op()
source_bulk_bak = source_db_obj.initialize_ordered_bulk_op()
target_bulk = target_db_obj.initialize_ordered_bulk_op()
target_bulk_bak = target_db_obj.initialize_ordered_bulk_op()
end = datetime.now()
logger.info("archived {} documents to {} in ms.".format(
count, target_col, (end - start)))