Unordered bulk update records in MongoDB shell - mongodb

I've got a collection consisting of millions of documents that resemble the following:
{
_id: ObjectId('...'),
value: "0.53"
combo: [
{
h: 0,
v: "0.42"
},
{
h: 1,
v: "1.32"
}
]
}
The problem is that the values are stored as strings and I need to convert them to float/double.
I'm trying this and it's working but this'll take days to complete, given the volume of data:
db.collection.find({}).forEach(function(obj) {
if (typeof(obj.value) === "string") {
obj.value = parseFloat(obj.value);
db.collection.save(obj);
}
obj.combo.forEach(function(hv){
if (typeof(hv.value) === "string") {
hv.value = parseFloat(hv.value);
db.collection.save(obj);
}
});
});
I came across bulk update reading the Mongo docs and I'm trying this:
var bulk = db.collection.initializeUnorderedBulkOp();
bulk.find({}).update(
{
$set: {
"value": parseFloat("value"),
}
});
bulk.execute();
This runs... but I get a NAN as a value, which is because it thinks I'm trying to convert "value" to a float. I've tried different variations like this.value and "$value" but to no avail. Plus this approach only attempts to correct the value in the other object, not the ones in the array.
I'd appreciate any help. Thanks in advance!

Figured it out the following way:
1) To convert at the document level, I came across this post and the reply by Markus paved the way to my solution:
var bulk = db.collection.initializeUnorderedBulkOp()
var myDocs = db.collection.find()
var ops = 0
myDocs.forEach(
function(myDoc) {
bulk.find({ _id: myDoc._id }).updateOne(
{
$set : {
"value": parseFloat(myDoc.value),
}
}
);
if ((++ops % 1000) === 0){
bulk.execute();
bulk = db.collection.initializeUnorderedBulkOp();
}
}
)
bulk.execute();
2) The second part involved updating the array object values and I discovered the syntax to do so in the accepted answer on this post. In my case, I knew that there were 24 values in I ran this separately from the first query and the result looked like:
var bulk = db.collection.initializeUnorderedBulkOp()
var myDocs = db.collection.find()
var ops = 0
myDocs.forEach(
function(myDoc) {
bulk.find({ _id: myDoc._id }).update(
{
$set : {
"combo.0.v": parseFloat(myDoc.combo[0].v),
"combo.1.v": parseFloat(myDoc.combo[1].v),
"combo.2.v": parseFloat(myDoc.combo[2].v),
"combo.3.v": parseFloat(myDoc.combo[3].v),
"combo.4.v": parseFloat(myDoc.combo[4].v),
"combo.5.v": parseFloat(myDoc.combo[5].v),
"combo.6.v": parseFloat(myDoc.combo[6].v),
"combo.7.v": parseFloat(myDoc.combo[7].v),
"combo.8.v": parseFloat(myDoc.combo[8].v),
"combo.9.v": parseFloat(myDoc.combo[9].v),
"combo.10.v": parseFloat(myDoc.combo[10].v),
"combo.11.v": parseFloat(myDoc.combo[11].v),
"combo.12.v": parseFloat(myDoc.combo[12].v),
"combo.13.v": parseFloat(myDoc.combo[13].v),
"combo.14.v": parseFloat(myDoc.combo[14].v),
"combo.15.v": parseFloat(myDoc.combo[15].v),
"combo.16.v": parseFloat(myDoc.combo[16].v),
"combo.17.v": parseFloat(myDoc.combo[17].v),
"combo.18.v": parseFloat(myDoc.combo[18].v),
"combo.19.v": parseFloat(myDoc.combo[19].v),
"combo.20.v": parseFloat(myDoc.combo[20].v),
"combo.21.v": parseFloat(myDoc.combo[21].v),
"combo.22.v": parseFloat(myDoc.combo[22].v),
"combo.23.v": parseFloat(myDoc.combo[23].v)
}
}
);
if ((++ops % 1000) === 0){
bulk.execute();
bulk = db.collection.initializeUnorderedBulkOp();
}
}
)
bulk.execute();
Just to give an idea regarding performance, the forEach was going through around 900 documents a minute, which for 15 million records would have taken days, literally! Not only that but this was only converting the types at the document level, not the array level. For that, I would have to loop through each document and loop through each array (15 million x 24 iterations)! With this approach (running both queries side by side), it completed both in under 6 hours.
I hope this helps someone else.

Related

How to change data type of a field using mongo shell?

Is it possible to change the data type. eg I have a field 'user' and it's datatype is string. I need to change it's data type to ObjectId.
I have tried but getting error.
> db.booking.find().foreach( function (x) { x.user = ObjectId(x.user); db.booking.save(x); });
2017-06-28T09:30:35.317+0000 E QUERY [thread1] TypeError: db.booking.find(...).foreach is not a function :
#(shell):1:1
>
The best way is to use the bulk operations API with .bulkWrite():
var ops = [];
db.booking.find({},{ "user": 1 }).forEach(doc => {
doc.user = new ObjectId(doc.user.valueOf());
ops.push({
"updateOne": {
"filter": { "_id": doc._id },
"update": {
"$set": { "user": doc.user }
}
}
});
if ( ops.length >= 500 ) {
db.booking.bulkWrite(ops);
ops = [];
}
});
if ( ops.length > 0 ) {
db.booking.bulkWrite(ops);
ops = [];
}
As opposed to methods like .save() this only updates the specified field, as well as actually only committing in "batches" to the server, so you remove the overhead of back and forth communication on each write to only one write and acknowledge per batch. Using 500 is a reasonable size, but the underlying driver and server will always separate at 1000.

How do I Bulk Add a Random Field to each record in MongoDB

There are a number of questions and answers about randomly ordering results or randomly getting a single record. The answers recommend adding a random field, creating an index on that field, and then doing a random draw. It looks like:
db.myindex.find().forEach(function(doc) {
db.myindex.update({_id: doc._id}, {$set: {rand: Math.random()}})
})
This works great, but it takes several hours (lots and lots of data). It looks like is limited by write locking which makes sense since the update is happening for each record. How do I do this in bulk? I tried:
var bulk = db.myindex.initializeUnorderedBulkOp();
bulk.find({}).update( { $set: { rand: Math.random() } } );
bulk.execute();
But it sets the rand field to the same value for every record! How do I fix this?
Edit: By the way, the reason that I need to do this is because I get a huge bson file from someone else and I need to import it frequently, so can't wait multiple hours to get it updated.
Introduce a loop with the bulk operations sent to the server once per 1000 documents, or as many modifications as you can fit under the 64MB BSON limit:
var bulk = db.myindex.initializeOrderedBulkOp();
var counter = 0;
db.myindex.find().forEach(function(doc) {
bulk.find({ "_id": doc._id }).updateOne({
"$set": { "rand": Math.random() }
});
counter++;
if (counter % 1000 == 0) {
bulk.execute();
bulk = db.myindex.initializeOrderedBulkOp();
}
});
if (counter % 1000 != 0){
bulk.execute();
}
If the collection is just static data, and you're getting a BSON file from someone else, it might be quicker to stream the BSON file through a filter to generate a new BSON file that you can then import using mongoimport.
Here is one that I wrote using nodeJS that can process a BSON file at around 1GB/min.
var bson = require('bson');
var BSON = new bson.BSONPure.BSON();
var BSONStream = require('bson-stream');
var fs = require('fs');
var sb = require('stream-buffers');
var rs = fs.createReadStream('tweets.bson');
var ws = fs.createWriteStream('tweets_random.bson',{flags:'a'});
var writeBuffer = new sb.WritableStreamBuffer({
initialSize: (1024*1024),
incrementAmount: (10*1024)
});
rs.pipe(new BSONStream()).on('data',function(obj) {
obj.rand = Math.random();
writeBuffer.write(BSON.serialize(obj));
if(writeBuffer.size()>(1024*1024)) {
var size = writeBuffer.size();
ws.write(writeBuffer.getContents(),function() {
console.log("Wrote",size,"bytes");
console.log("Buffer has:",writeBuffer.size(),"bytes left");
});
}
});
It might go faster if you modify the buffer size/increment parameters.
This is of course assuming that you have the luxury of reimporting your data.

mapreduce between consecutive documents

Setup:
I got a large collection with the following entries
Name - String
Begin - time stamp
End - time stamp
Problem:
I want to get the gaps between documents, Using the map-reduce paradigm.
Approach:
I'm trying to set a new collection of pairs mid, after that I can compute differences from it using $unwind and Pair[1].Begin - Pair[0].End
function map(){
emit(0, this)
}
function reduce(){
var i = 0;
var pairs = [];
while ( i < values.length -1){
pairs.push([values[i], values[i+1]]);
i = i + 1;
}
return {"pairs":pairs};
}
db.collection.mapReduce(map, reduce, sort:{begin:1}, out:{replace:"mid"})
This works with limited number of document because of the 16MB document cap. I'm not sure if I need to get the collection into memory and doing it there, How else can I approach this problem?
The mapReduce function of MongoDB has a different way of handling what you propose than the method you are using to solve it. The key factor here is "keeping" the "previous" document in order to make the comparison to the next.
The actual mechanism that supports this is the "scope" functionality, which allows a sort of "global" variable approach to use in the overall code. As you will see, what you are asking when that is considered takes no "reduction" at all as there is no "grouping", just emission of document "pair" data:
db.collection.mapReduce(
function() {
if ( last == null ) {
last = this;
} else {
emit(
{
"start_id": last._id,
"end_id": this._id
},
this.Begin - last.End
);
last = this;
}
},
function() {}, // no reduction required
{
"out": { "inline": 1 },
"scope": { "last": null }
}
)
Out with a collection as the output as required to your size.
But this way by using a "global" to keep the last document then the code is both simple and efficient.

MongoDB MapReduce: Not working as expected for more than 1000 records

I wrote a mapreduce function where the records are emitted in the following format
{userid:<xyz>, {event:adduser, count:1}}
{userid:<xyz>, {event:login, count:1}}
{userid:<xyz>, {event:login, count:1}}
{userid:<abc>, {event:adduser, count:1}}
where userid is the key and the remaining are the value for that key.
After the MapReduce function, I want to get the result in following format
{userid:<xyz>,{events: [{adduser:1},{login:2}], allEventCount:3}}
To acheive this I wrote the following reduce function
I know this can be achieved by group by.. both in aggregation framework and mapreduce, but we require a similar functionality for a complex scenario. So, I am taking this approach.
var reducefn = function(key,values){
var result = {allEventCount:0, events:[]};
values.forEach(function(value){
var notfound=true;
for(var n = 0; n < result.events.length; n++){
eventObj = result.events[n];
for(ev in eventObj){
if(ev==value.event){
result.events[n][ev] += value.allEventCount;
notfound=false;
break;
}
}
}
if(notfound==true){
var newEvent={}
newEvent[value.event]=1;
result.events.push(newEvent);
}
result.allEventCount += value.allEventCount;
});
return result;
}
This runs perfectly, when I run for 1000 records, when there are 3k or 10k records, the result I get is something like this
{ "_id" : {...}, "value" :{"allEventCount" :30, "events" :[ { "undefined" : 1},
{"adduser" : 1 }, {"remove" : 3 }, {"training" : 1 }, {"adminlogin" : 1 },
{"downgrade" : 2 } ]} }
Not able to understand where this undefined came from and also the sum of the individual events is less than allEventCount. All the docs in the collection has non-empty field event so there is no chance of undefined.
Mongo DB version -- 2.2.1
Environment -- Local machine, no sharding.
In the reduce function, why should this operation fail result.events[n][ev] += value.allEventCount; when the similar operation result.allEventCount += value.allEventCount; passes?
The corrected answer as suggested by johnyHK
Reduce function:
var reducefn = function(key,values){
var result = {totEvents:0, event:[]};
values.forEach(function(value){
value.event.forEach(function(eventElem){
var notfound=true;
for(var n = 0; n < result.event.length; n++){
eventObj = result.event[n];
for(ev in eventObj){
for(evv in eventElem){
if(ev==evv){
result.event[n][ev] += eventElem[evv];
notfound=false;
break;
}
}}
}
if(notfound==true){
result.event.push(eventElem);
}
});
result.totEvents += value.totEvents;
});
return result;
}
The shape of the object you emit from your map function must be the same as the object returned from your reduce function, as the results of a reduce can get fed back into reduce when processing large numbers of docs (like in this case).
So you need to change your emit to emit docs like this:
{userid:<xyz>, {events:[{adduser: 1}], allEventCount:1}}
{userid:<xyz>, {events:[{login: 1}], allEventCount:1}}
and then update your reduce function accordingly.

mongodb query with group()?

this is my collection structure :
coll{
id:...,
fieldA:{
fieldA1:[
{
...
}
],
fieldA2:[
{
text: "ciao",
},
{
text: "hello",
},
]
}
}
i want to extract all fieldA2 in my collection but if the fieldA2 is in two or more times i want show only one.
i try this
Db.runCommand({distinct:’coll’,key:’fieldA.fieldA2.text’})
but nothing. this return all filedA1 in the collection.
so i try
db.coll.group( {
key: { 'fieldA.fieldA2.text': 1 },
cond: { } },
reduce: function ( curr, result ) { },
initial: { }
} )
but this return an empty array...
How i can do this and see the execution time?? thank u very match...
Since you are running 2.0.4 (I recommend upgrading), you must run this through MR (I think, maybe there is a better way). Something like:
map = function(){
for(i in this.fieldA.fieldA2){
emit(this.fieldA.fieldA2[i].text, 1);
// emit per text value so that this will group unique text values
}
}
reduce = function(values){
// Now lets just do a simple count of how many times that text value was seen
var count = 0;
for (index in values) {
count += values[index];
}
return count;
}
Will then give you a collection of documents whereby _id is the unique text value from fieldA2 and the value field is of the amount of times is appeared i the collection.
Again this is a draft and is not tested.
I think the answer is simpler than a Map/Reduce .. if you just want distinct values plus execution time, the following should work:
var startTime = new Date()
var values = db.coll.distinct('fieldA.fieldA2.text');
var endTime = new Date();
print("Took " + (endTime - startTime) + " ms");
That would result in a values array with a list of distinct fieldA.fieldA2.text values:
[ "ciao", "hello", "yo", "sayonara" ]
And a reported execution time:
Took 2 ms