MongoDB aggregate $group and $match with group result - mongodb

I have a collection as follows
{
"_id" : ObjectId("553b2c740f12bb30f85bd41c"),
"symbol" : "EUR/GBP",
"order_id" : "PW_BarclaysTrades60530",
"ticket_id" : "PW_BarclaysTrades.60530",
"basketid" : "TESTBASKET-1428483828043",
"date_sent" : ISODate("2015-04-07T18:30:00.000Z"),
"destination" : "BarclaysTrades",
"order_price" : 0.0000000000000000,
"order_quantity" : 4000000.0000000000000000,
"order_type" : 1.0000000000000000,
"parent_quantity" : 250000000.0000000000000000,
"time_sent" : "09:03:48",
"side" : 1,
"tif" : "0",
"execution_id" : 88939,
"date_recvd" : ISODate("2015-04-07T18:30:00.000Z"),
"exe_quantity" : 50000.0000000000000000,
"time_recvd" : "09:03:48",
"execution_price" : 2.5000000000000000,
"execution_type" : 1
}
I would like to get the documents whose execution_price greater than average(execution_price) for each destination in the collection
Trying to aggregate as follows:
db.orders_by_symbol.aggregate( [
{ $limit:300000 },
{ $match:{ destination: "PAPER" } },
{ $group:{_id:{Destination:"$destination"},avg_exec_price:
{$avg:"$execution_price"} ,"data":{"$push": "$$ROOT"}}},
{$unwind:"$data"},
{$match:{execution_price:{$ne: "$avg_exec_price"}}},
{$project:{_id:0,symbol:"$data.symbol",destination:"$data.destination",
execution_id:"$data.execution_id",
exec_price:"$data.execution_price",
avg_ex_price:"$avg_exec_price"}}],
{allowDiskUse:true})
Getting the following Result
{
"result" : [
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 89109,
"exec_price" : 6.5000000000000000,
"avg_ex_price" : 95.0747920857049140
},
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 89110,
"exec_price" : 6.0000000000000000,
"avg_ex_price" : 95.0747920857049140
},
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 89111,
"exec_price" : 6.5000000000000000,
"avg_ex_price" : 95.0747920857049140
}
But when I change the '$ne' operator with '$gt' no result is being produced. Both exec_price and avg_ex_price are double datatype.Not sure why it is not working as expected.

Using MongoDB Server 3.6 and newer:
var pipeline = [
{ "$match": { "destination": "PAPER" } },
{ "$facet": {
"average": [
{ "$group": {
"_id": null,
"avg_exec_price": { "$avg": "$execution_price" }
} }
],
"data": [
{ "$project": {
"_id": 0,
"symbol": 1,
"destination": 1,
"execution_id": 1,
"execution_price": 1
} }
]
} },
{ "$addFields": {
"average": { "$arrayElemAt": ["$average", 0] }
} },
{ "$addFields": {
"data": {
"$filter" : {
"input": {
"$map": {
"input": "$data",
"as": "el",
"in": {
"symbol": "$$el.symbol",
"destination": "$$el.symbol",
"execution_id": "$$el.symbol",
"exec_price": "$$el.execution_price",
"avg_exec_price": "$average.avg_exec_price"
}
}
},
"as": "doc",
"cond": {
"$gt" : [
"$$doc.exec_price",
"$$doc.avg_exec_price"
]
}
}
}
} },
{ "$unwind": "$data" },
{ "$replaceRoot": { "newRoot": "$data" } }
];
For MongoDB versions which do not support the above operators and pipelines, use the $project operator to create an additional field that stores the comparison of the two fields via the $gt aggregation operator:
var pipeline = [
{ "$match": {
"destination": "PAPER"
} },
{ "$group": {
"_id": null,
"avg_exec_price": { "$avg": "$execution_price" },
"data": { "$addToSet": "$$ROOT" }
} },
{ "$unwind": "$data" },
{ "$project": {
"_id": 0,
"data": 1,
"avg_exec_price": 1,
"isGreaterThanAverage": {
"$gt" : [ "$data.execution_price", "$avg_exec_price" ]
}
} },
{ "$match": {
"isGreaterThanAverage": true
} },
{ "$project": {
"_id": 0,
"symbol": "$data.symbol",
"destination": "$data.destination",
"execution_id": "$data.execution_id",
"exec_price": "$data.execution_price",
"avg_ex_price": "$avg_exec_price"
} }
];
Now to test the above aggregation, suppose you have the following minimum test case collection:
db.test.insert([{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 88939,
"execution_price" : 1.8
},
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 88921,
"execution_price" : 6.8
},
{
"symbol" : "USD/GBP",
"destination" : "foo",
"execution_id" : 88955,
"execution_price" : 3.1
},
{
"symbol" : "AUD/GBP",
"destination" : "PAPER",
"execution_id" : 88941,
"execution_price" : 1.1
},
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 88907,
"execution_price" : 9.4
}]);
Running the above aggregation
db.test.aggregate(pipeline);
will produce the result:
/* 0 */
{
"result" : [
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 88907,
"exec_price" : 9.4,
"avg_ex_price" : 4.775
},
{
"symbol" : "EUR/GBP",
"destination" : "PAPER",
"execution_id" : 88921,
"exec_price" : 6.8,
"avg_ex_price" : 4.775
}
],
"ok" : 1
}

After reading your questions you should use $cond in your aggregation as below :
db.collectionName.aggregate({
"$match": {
"destination": "PAPER"
}
}, {
"$group": {
"_id": "$destination",
"avg_exec_price": {
"$avg": "$execution_price"
},
"data": {
"$push": "$$ROOT"
}
}
}, {
"$unwind": "$data"
}, {
"$group": {
"_id": "$_id",
"data": {
"$push": {
"check": {
"$cond": [{
"$gt": ["$data.execution_price", "$avg_exec_price"] // check in $cond if execution_price gt avg_exec_price
}, "$data", ""] //push data if true else blank
}
}
}
}
}, {
"$unwind": "$data"
}, {
"$match": {
"data.check": {
"$exists": true, // check data.check not empty or blank
"$ne": ""
}
}
}, {
"$project": {
"_id": "$_id",
"data": "$data.check"
}
}).pretty()

Related

Forming an array with aggregation in MongoDB

I have a document in MongoDB 3.4 with the following structure:
{
"_id" : ObjectId("5e3419e468d01013eadb83dc"),
"id_station" : "62",
"fiware_service" : null,
"fiware_servicepath" : null,
"id_fiware_name" : "CE_del_medio",
"attrName" : "15",
"attrType" : "float",
"attrValue" : 0.33,
"id_sensor_station_absolute" : "15_62",
"recvTimeTs" : 1580387045,
"recvTime" : "2020-01-30T12:24:05.00Z",
"id_fiware" : "15",
"sensor_type" : [
{
"name" : "id",
"type" : "String",
"value" : "15"
},
{
"name" : "img",
"type" : "String",
"value" : "assets/img/contrast.png"
},
{
"name" : "manufacturer",
"type" : "String",
"value" : "Hortisis"
},
{
"name" : "medida",
"type" : "String",
"value" : "mS/cm"
},
{
"name" : "name_comun",
"type" : "String",
"value" : "CE del medio"
},
{
"name" : "place",
"type" : "String",
"value" : "interior"
},
{
"name" : "timestamp",
"type" : "DateTime",
"value" : "2020-01-30T12:24:05.00Z"
},
{
"name" : "type",
"type" : "String",
"value" : "fertigation"
}
]
}
I need to convert the sensor_type field to an array with only one object, as follows:
{
"_id":"15_62",
"medidas":[
{
"_id":"5e3419e468d01013eadb83dc",
"marca":"Hortisis",
"modelo":"Estacion",
"fabricante":"Hortisis",
"id_station":"15",
"sensor_type":[
{
"name":"15",
"type":"fertigation",
"place":"interior",
"img":"assets/img/contrast.png",
"name_comun":"Temp. Suelo",
"medida":"mS/cm"
}
],
"attrName":"15",
"attrValue":0.33,
"recvTimeTs":1580387045,
"recvTime":"2020-01-30T12:24:05.00Z",
"id_sensor_station_absolute":"15_62"
}
]
}
As you can really see it is formatting the sensor_type field = name : value.
I'm working with NODEJS and mongoose.
This is my query: (first I search, sort, only show the first value and then with the project I give format, the problem is that I don't know how to tell the project to put that format if I put "sensor_type": "$latest.attributes.name") it only shows the names and I don't know how to put it in the mentioned format.
Datagreenhouse.aggregate([
{ "$match": { "id_sensor_station_absolute": { "$in": array3 } } }, // "id_station": { "$in": id_station },
{ "$sort": { "recvTime": -1 } },
{
"$group": {
"_id": "$id_sensor_station_absolute",
"latest": { "$first": "$$ROOT" },
}
},
{
"$project": {
"_id": 1,
"id_station": "$latest.id_station",
//"id_sensor_station_absolute": "$id_sensor_station_absolute",
"attrName": "$latest.attrName",
"attrValue": "$latest.attrValue",
"recvTimeTs": "$latest.recvTimeTs",
"recvTime": "$latest.recvTime",
"id_sensor_station_absolute": "$latest.id_sensor_station_absolute",
"sensor_type": "$latest.attributes",
"name": { $arrayElemAt: ["$latest.attributes", 0] },
"type": { $arrayElemAt: ["$latest.attributes", 1] },
"place": { $arrayElemAt: ["$latest.attributes", 2] },
"img": { $arrayElemAt: ["$latest.attributes", 1] },
"name_comun": { $arrayElemAt: ["$latest.attributes", 4] },
"medida": { $arrayElemAt: ["$latest.attributes", 3] },
"interfaz": { $arrayElemAt: ["$latest.attributes", 6] },
}
}
], (err, DatagreenhouseRecuperado) => {
if (err) return res.status(500).send({ message: 'Error al realizar la peticion' + err })
if (!DatagreenhouseRecuperado) return res.status(404).send({ message: 'Error el usuario no existe' })
res.status(200).send({ DatagreenhouseRecuperado })
})
Thank you for your help. Best regards.
Since version 3.4.4, MongoDB introduced a magnific operator: $arrayToObject
This operator allows us transmute array key:value pair into object.
Syntax
RAW DATA $map $arrayToObject
sensor_type : [ sensor_type : [ sensor_type : {
{ \ { \
"name" : "manufacturer", ----> k: "manufacturer", --->
"type" : "String", / v: "Hortisis" / "manufacturer" : "Hortisis"
"value" : "Hortisis"
} }
] ] }
db.datagreenhouses.aggregate([
{
"$match": {} // setup your match criteria
},
{
"$sort": {
"recvTime": -1
}
},
{
$group: {
_id: "$id_sensor_station_absolute",
medidas: {
$push: {
_id: "$_id",
"marca": "Hortisis", // don't know where you get this value
"modelo": "Estacion", // don't know where you get this value
"id_station": "$id_station",
"attrName": "$attrName",
"attrValue": "$attrValue",
"recvTimeTs": "$recvTimeTs",
"recvTime": "$recvTime",
"id_sensor_station_absolute": "$id_sensor_station_absolute",
"sensor_type": {
$arrayToObject: {
$map: {
input: "$sensor_type",
in: {
k: "$$this.name",
v: "$$this.value"
}
}
}
}
}
}
}
}
])
MongoPlayground
[
{
"_id": "15_62",
"medidas": [
{
"_id": ObjectId("5e3419e468d01013eadb83dc"),
"attrName": "15",
"attrValue": 0.33,
"id_sensor_station_absolute": "15_62",
"id_station": "62",
"marca": "Hortisis",
"modelo": "Estacion",
"recvTime": "2020-01-30T12:24:05.00Z",
"recvTimeTs": 1.580387045e+09,
"sensor_type": {
"id": "15",
"img": "assets/img/contrast.png",
"manufacturer": "Hortisis",
"medida": "mS/cm",
"name_comun": "CE del medio",
"place": "interior",
"timestamp": "2020-01-30T12:24:05.00Z",
"type": "fertigation"
}
}
]
}
]
All you need to do is transform data to the desired result with an easy to handle object ($unwind medidas field, transform and then $group again)
Note: If your MongoDB is earlier 3.4.4 version, follow update procedure:
Install MongoDB 3.4.4 or newer
Make mongodump with new version MongoBD
Stop old MongoBD
Remove /data directory (make backup)
Start new MongoDB and run mongorestore

Combine results based on condition during group by

Mongo query generated out of java code:
{
"pipeline": [{
"$match": {
"Id": "09cd9a5a-85c5-4948-808b-20a52d92381a"
}
},
{
"$group": {
"_id": "$result",
"id": {
"$first": "$result"
},
"labelKey": {
"$first": {
"$ifNull": ["$result",
"$result"]
}
},
"value": {
"$sum": 1
}
}
}]
}
Field 'result' can have values like Approved, Rejected, null and "" (empty string). What I am trying to achieve is combining the count of both null and empty together.
So that the empty string Id will have the count of both null and "", which is equal to 4
I'm sure theres a more "proper" way but this is what i could quickly come up with:
[
{
"$group" : {
"_id" : "$result",
"id" : {
"$first" : "$result"
},
"labelKey" : {
"$first" : {
"$ifNull" : [
"$result",
"$result"
]
}
},
"value" : {
"$sum" : 1.0
}
}
},
{
"$group" : {
"_id" : {
"$cond" : [{
$or: [
{"$eq": ["$_id", "Approved"]},
{"$eq": ["$_id", "Rejected"]},
]}},
"$_id",
""
]
},
"temp" : {
"$push" : {
"_id" : "$_id",
"labelKey" : "$labelKey"
}
},
"count" : {
"$sum" : "$value"
}
}
},
{
"$unwind" : "$temp"
},
{
"$project" : {
"_id" : "$temp._id",
"labelKey": "$temp.labelKey",
"count" : "$count"
}
}
],
);
Due to the fact the second group is only on 4 documents tops i don't feel too bad about doing this.
I have used $facet.
The MongoDB stage $facet lets you run several independent pipelines within the stage of a pipeline, all using the same data. This means that you can run several aggregations with the same preliminary stages, and successive stages.
var queries = [{
"$match": {
"Id": "09cd9a5a-85c5-4948-808b-20a52d92381a"
}
},{
$facet: {//
"empty": [
{
$match : {
result : { $in : ['',null]}
}
},{
"$group" : {
"_id" : null,
value : { $sum : 1}
}
}
],
"non_empty": [
{
$match : {
result : { $nin : ['',null]}
}
},{
"$group" : {
"_id" : '$result',
value : { $sum : 1}
}
}
]
}
},
{
$project: {
results: {
$concatArrays: [ "$empty", "$non_empty" ]
}
}
}];
Output :
{
"results": [{
"_id": null,
"value": 52 // count of both '' and null.
}, {
"_id": "Approved",
"value": 83
}, {
"_id": "Rejected",
"value": 3661
}]
}
Changing the group by like below solved the problem
{
"$group": {
"_id": {
"$ifNull": ["$result", ""]
},
"id": {
"$first": "$result"
},
"labelKey": {
"$first": {
"$ifNull": ["$result",
"$result"]
}
},
"value": {
"$sum": 1
}
}
}

Is it recommended to use unwind in working with large amount of data with nested documents on MongoDB [duplicate]

I am new in mongodb and trying to work with nested documents.I have a query as below
db.EndpointData.aggregate([
{ "$group" : { "_id" : "$EndpointId", "RequestCount" : { "$sum" : 1 }, "FirstActivity" : { "$min" : "$DateTime" }, "LastActivity" : { "$max" : "$DateTime" }, "Tags" : { "$push" : "$Tags" } } },
{ "$unwind" : "$Tags" },
{ "$unwind" : "$Tags" },
{ "$group" : { "_id" : "$_id", "RequestCount" : { "$first" : "$RequestCount" }, "Tags" : { "$push" : "$Tags" }, "FirstActivity" : { "$first" : "$FirstActivity" }, "LastActivity" : { "$first" : "$LastActivity" } } },
{ "$unwind" : "$Tags" },
{ "$unwind" : "$Tags.Sensors" },
{ "$group" : { "_id" : { "EndpointId" : "$_id", "Uid" : "$Tags.Uid", "Type" : "$Tags.Sensors.Type" }, "RequestCount" : { "$first" : "$RequestCount" }, "FirstActivity" : { "$first" : "$FirstActivity" }, "LastActivity" : { "$first" : "$LastActivity" } } },
{ "$group" : { "_id" : { "EndpointId" : "$_id.EndpointId", "Uid" : "$_id.Uid" }, "count" : { "$sum" : 1 }, "RequestCount" : { "$first" : "$RequestCount" }, "FirstActivity" : { "$first" : "$FirstActivity" }, "LastActivity" : { "$first" : "$LastActivity" } } },
{ "$group" : { "_id" : "$_id.EndpointId", "TagCount" : { "$sum" : 1 }, "SensorCount" : { "$sum" : "$count" }, "RequestCount" : { "$first" : "$RequestCount" }, "FirstActivity" : { "$first" : "$FirstActivity" }, "LastActivity" : { "$first" : "$LastActivity" } } }])
and my data structure is as below
{
"_id": "6aef51dfaf42ea1b70d0c4db",
"EndpointId": "98799bcc-e86f-4c8a-b340-8b5ed53caf83",
"DateTime": "2018-05-06T19:05:02.666Z",
"Url": "test",
"Tags": [
{
"Uid": "C1:3D:CA:D4:45:11",
"Type": 1,
"DateTime": "2018-05-06T19:05:02.666Z",
"Sensors": [
{
"Type": 1,
"Value": { "$numberDecimal": "-95" }
},
{
"Type": 2,
"Value": { "$numberDecimal": "-59" }
},
{
"Type": 3,
"Value": { "$numberDecimal": "11.029802536740132" }
}
]
},
{
"Uid": "C1:3D:CA:D4:45:11",
"Type": 1,
"DateTime": "2018-05-06T19:05:02.666Z",
"Sensors": [
{
"Type": 1,
"Value": { "$numberDecimal": "-92" }
},
{
"Type": 2,
"Value": { "$numberDecimal": "-59" }
}
]
}
]
}
This query works fine and correct. I count Tags, Sensors and repeat times of each EdpointID. But the problem is when I work with large size of data (about 10,000,000 documents) I get memory problem. It seems having 4 levels of unwind make problem in this query. How can I reduce unwinds in this query?
As long as your data has unique sensor and tag readings per document, which to date what you have presented appears to, then you simply don't need $unwind at all.
In fact, all you really need is a single $group:
db.endpoints.aggregate([
// In reality you would $match to limit the selection of documents
{ "$match": {
"DateTime": { "$gte": new Date("2018-05-01"), "$lt": new Date("2018-06-01") }
}},
{ "$group": {
"_id": "$EndpointId",
"FirstActivity" : { "$min" : "$DateTime" },
"LastActivity" : { "$max" : "$DateTime" },
"RequestCount": { "$sum": 1 },
"TagCount": {
"$sum": {
"$size": { "$setUnion": ["$Tags.Uid",[]] }
}
},
"SensorCount": {
"$sum": {
"$sum": {
"$map": {
"input": { "$setUnion": ["$Tags.Uid",[]] },
"as": "tag",
"in": {
"$size": {
"$reduce": {
"input": {
"$filter": {
"input": {
"$map": {
"input": "$Tags",
"in": {
"Uid": "$$this.Uid",
"Type": "$$this.Sensors.Type"
}
}
},
"cond": { "$eq": [ "$$this.Uid", "$$tag" ] }
}
},
"initialValue": [],
"in": { "$setUnion": [ "$$value", "$$this.Type" ] }
}
}
}
}
}
}
}
}}
])
Or if you actually do need to accumulate those "unique" values of "Sensors" and "Tags" from across different documents, then you still need initial $unwind statements to get the right grouping, but nowhere near as much as you presently have:
db.endpoints.aggregate([
// In reality you would $match to limit the selection of documents
{ "$match": {
"DateTime": { "$gte": new Date("2018-05-01"), "$lt": new Date("2018-06-01") }
}},
{ "$unwind": "$Tags" },
{ "$unwind": "$Tags.Sensors" },
{ "$group": {
"_id": {
"EndpointId": "$EndpointId",
"Uid": "$Tags.Uid",
"Type": "$Tags.Sensors.Type"
},
"FirstActivity": { "$min": "$DateTime" },
"LastActivity": { "$max": "$DateTime" },
"RequestCount": { "$addToSet": "$_id" }
}},
{ "$group": {
"_id": {
"EndpointId": "$_id.EndpointId",
"Uid": "$_id.Uid",
},
"FirstActivity": { "$min": "$FirstActivity" },
"LastActivity": { "$max": "$LastActivity" },
"count": { "$sum": 1 },
"RequestCount": { "$addToSet": "$RequestCount" }
}},
{ "$group": {
"_id": "$_id.EndpointId",
"FirstActivity": { "$min": "$FirstActivity" },
"LastActivity": { "$max": "$LastActivity" },
"TagCount": { "$sum": 1 },
"SensorCount": { "$sum": "$count" },
"RequestCount": { "$addToSet": "$RequestCount" }
}},
{ "$addFields": {
"RequestCount": {
"$size": {
"$reduce": {
"input": {
"$reduce": {
"input": "$RequestCount",
"initialValue": [],
"in": { "$setUnion": [ "$$value", "$$this" ] }
}
},
"initialValue": [],
"in": { "$setUnion": [ "$$value", "$$this" ] }
}
}
}
}}
],{ "allowDiskUse": true })
And from MongoDB 4.0 you can use $toString on the ObjectId within _id and simply merge the unique keys for those in order to keep the RequestCount using $mergeObjects. This is cleaner and a bit more scalable than pushing nested array content and flattening it
db.endpoints.aggregate([
// In reality you would $match to limit the selection of documents
{ "$match": {
"DateTime": { "$gte": new Date("2018-05-01"), "$lt": new Date("2018-06-01") }
}},
{ "$unwind": "$Tags" },
{ "$unwind": "$Tags.Sensors" },
{ "$group": {
"_id": {
"EndpointId": "$EndpointId",
"Uid": "$Tags.Uid",
"Type": "$Tags.Sensors.Type"
},
"FirstActivity": { "$min": "$DateTime" },
"LastActivity": { "$max": "$DateTime" },
"RequestCount": {
"$mergeObjects": {
"$arrayToObject": [[{ "k": { "$toString": "$_id" }, "v": 1 }]]
}
}
}},
{ "$group": {
"_id": {
"EndpointId": "$_id.EndpointId",
"Uid": "$_id.Uid",
},
"FirstActivity": { "$min": "$FirstActivity" },
"LastActivity": { "$max": "$LastActivity" },
"count": { "$sum": 1 },
"RequestCount": { "$mergeObjects": "$RequestCount" }
}},
{ "$group": {
"_id": "$_id.EndpointId",
"FirstActivity": { "$min": "$FirstActivity" },
"LastActivity": { "$max": "$LastActivity" },
"TagCount": { "$sum": 1 },
"SensorCount": { "$sum": "$count" },
"RequestCount": { "$mergeObjects": "$RequestCount" }
}},
{ "$addFields": {
"RequestCount": {
"$size": {
"$objectToArray": "$RequestCount"
}
}
}}
],{ "allowDiskUse": true })
Either form returns the same data, though the order of keys in the result may vary:
{
"_id" : "89799bcc-e86f-4c8a-b340-8b5ed53caf83",
"FirstActivity" : ISODate("2018-05-06T19:05:02.666Z"),
"LastActivity" : ISODate("2018-05-06T19:05:02.666Z"),
"RequestCount" : 2,
"TagCount" : 4,
"SensorCount" : 16
}
The result is obtained from these sample documents which you originally gave as a sample source in the original question on the topic:
{
"_id" : ObjectId("5aef51dfaf42ea1b70d0c4db"),
"EndpointId" : "89799bcc-e86f-4c8a-b340-8b5ed53caf83",
"DateTime" : ISODate("2018-05-06T19:05:02.666Z"),
"Url" : "test",
"Tags" : [
{
"Uid" : "C1:3D:CA:D4:45:11",
"Type" : 1,
"DateTime" : ISODate("2018-05-06T19:05:02.666Z"),
"Sensors" : [
{
"Type" : 1,
"Value" : NumberDecimal("-95")
},
{
"Type" : 2,
"Value" : NumberDecimal("-59")
},
{
"Type" : 3,
"Value" : NumberDecimal("11.029802536740132")
},
{
"Type" : 4,
"Value" : NumberDecimal("27.25")
},
{
"Type" : 6,
"Value" : NumberDecimal("2924")
}
]
},
{
"Uid" : "C1:3D:CA:D4:45:11",
"Type" : 1,
"DateTime" : ISODate("2018-05-06T19:05:02.666Z"),
"Sensors" : [
{
"Type" : 1,
"Value" : NumberDecimal("-95")
},
{
"Type" : 2,
"Value" : NumberDecimal("-59")
},
{
"Type" : 3,
"Value" : NumberDecimal("11.413037961112279")
},
{
"Type" : 4,
"Value" : NumberDecimal("27.25")
},
{
"Type" : 6,
"Value" : NumberDecimal("2924")
}
]
},
{
"Uid" : "E5:FA:2A:35:AF:DD",
"Type" : 1,
"DateTime" : ISODate("2018-05-06T19:05:02.666Z"),
"Sensors" : [
{
"Type" : 1,
"Value" : NumberDecimal("-97")
},
{
"Type" : 2,
"Value" : NumberDecimal("-58")
},
{
"Type" : 3,
"Value" : NumberDecimal("10.171658037099185")
}
]
}
]
}
/* 2 */
{
"_id" : ObjectId("5aef51e0af42ea1b70d0c4dc"),
"EndpointId" : "89799bcc-e86f-4c8a-b340-8b5ed53caf83",
"Url" : "test",
"Tags" : [
{
"Uid" : "E2:02:00:18:DA:40",
"Type" : 1,
"DateTime" : ISODate("2018-05-06T19:05:04.574Z"),
"Sensors" : [
{
"Type" : 1,
"Value" : NumberDecimal("-98")
},
{
"Type" : 2,
"Value" : NumberDecimal("-65")
},
{
"Type" : 3,
"Value" : NumberDecimal("7.845424441900629")
},
{
"Type" : 4,
"Value" : NumberDecimal("0.0")
},
{
"Type" : 6,
"Value" : NumberDecimal("3012")
}
]
},
{
"Uid" : "12:3B:6A:1A:B7:F9",
"Type" : 1,
"DateTime" : ISODate("2018-05-06T19:05:04.574Z"),
"Sensors" : [
{
"Type" : 1,
"Value" : NumberDecimal("-95")
},
{
"Type" : 2,
"Value" : NumberDecimal("-59")
},
{
"Type" : 3,
"Value" : NumberDecimal("12.939770381907275")
}
]
}
]
}
Bottom line is that you can either use the first given form here which will accumulate "within each document" and then "accumulate per endpoint" within a single stage and is the most optimal, or you actually require to identify things like the "Uid" on the tags or the "Type" on the sensor where those values occur more than once over any combination of documents grouping by the endpoint.
Your sample data supplied to date only shows that these values are "unique within each document", therefore the first given form would be most optimal if this is the case for all remaining data.
In the event that it is not, then "unwinding" the two nested arrays in order to "aggregate the detail across documents" is the only way to approach this. You can limit the date range or other criteria as most "queries" typically have some bounds and do not actually work on the "whole" collection data, but the main fact remains that arrays would be "unwound" creating essentially a document copy for every array member.
The point on optimization means that you only need to do this "twice" as there are only two arrays. Doing successive $group to $unwind to $group is always a sure sign you a doing something really wrong. Once you "take something apart" you should only ever need to "put it back together" once. In a series of graded steps as demonstrated here is the once approach which optimizes.
Outside of the scope of your question still remains:
Add other realistic constraints to the query to reduce the documents processed, maybe even do so in "batches" and combine results
Add the allowDiskUse option to the pipeline to let temporary storage be used. ( actually demonstrated on the commands )
Consider that "nested arrays" are probably not the best storage method for the analysis you want to do. It's always more efficient when you know you need to $unwind to simply write the data in that "unwound" form directly into a collection.
If you're dealing with data on the order of 10,000,000 documents, you're going to run into aggregation pipeline size limits easily. Specifically, according to the MongoDB documentation, there is a pipeline RAM use limit of 100MB. If each document has at least 10 bytes of data, then that's enough to hit that limit, and your documents would absolutely exceed that amount.
There are a few options available to you to resolve this problem:
1) You can use the allowDiskUse option as noted in the documentation.
2) You can project your documents further between unwind stages to limit document size (very unlikely to be enough on its own).
3) You can periodically generate summary documents on subsets of your data, and then perform your aggregations on those summary documents. If, for example, you run summary documents on subsets of size 1,000, you can reduce the number of documents in your pipelines from 10,000,000 to just 10,000.
4) You can look into sharding your collection and running these aggregate operations on a cluster to reduce the load on any single server.
Options 1 and 2 are both very short-term solutions. They're easy to implement, but won't help much in the long run. Options 3 and 4, however, are far more involved and trickier to implement, but will provide the greatest amount of scalability and are more likely to continue meeting your needs long-term.
Do be warned, however, that if you plan to approach option 4, you need to be very prepared. A sharded collection cannot be unsharded, and messing up can cause potentially irreparable data loss. Having a dedicated DBA with experience with MongoDB clusters is recommended.

How to find sum of total value from inner array?

{
"_id" : ObjectId("56fb04fd2e6bb8bc059287c9"),
"BillNo" : "Bill_001",
"DateP" : "12-12-2015",
"Type" : "Cash",
"Items" : [
{
"id" : NumberInt(1),
"ItemName" : "cement",
"Qty" : "100",
"Rate" : "10",
"Total" : "1000"
},
{
"id" : NumberInt(2),
"ItemName" : "steel",
"Qty" : "10",
"Rate" : "50",
"Total" : "500"
},
{
"id" : NumberInt(3),
"ItemName" : "sand",
"Qty" : "1",
"Rate" : "1500",
"Total" : "1500"
}
]
}
{
"_id" : ObjectId("56fb05382e6bb8bc059287ca"),
"BillNo" : "Bill_002",
"DateP" : "12-10-2015",
"Type" : "Cash",
"Items" : [
{
"id" : NumberInt(1),
"ItemName" : "Paint",
"Qty" : "50",
"Rate" : "100",
"Total" : "5000"
},
{
"id" : NumberInt(2),
"ItemName" : "Brush",
"Qty" : "5",
"Rate" : "10",
"Total" : "50"
}
]
}
In the above collection stores all the purchase details in main document and its Items details storing as inner array of main item.I need to get the result like following by using mongodb; How to find total from inner array in mongodb.
Bill_001 1500
Bill_002 5050
Ideally in MongoDB you can use $map with $sum as both an $group accumulator and it's new role in adding the members of the provided array:
db.collection.aggregate({
{ "$group": {
"_id": "$BillNo",
"Total": {
"$sum": {
"$sum": {
"$map": {
"input": "$Items",
"as": "item",
"in": "$$item.Total"
}
}
}
}
}}
})
Or just per document:
db.collection.aggregate({
{ "$group": {
"_id": "$_id",
"BillNo": { "$first": "$BillNo" },
"DateP": { "$first" "$DateP" },
"Type": { "$first": "$Type" }
"Total": {
"$sum": {
"$sum": {
"$map": {
"input": "$Items",
"as": "item",
"in": "$$item.Total"
}
}
}
}
}}
})
Using the other accumulator of $first. Of course you could really just $project With MongoDB 3.2:
db.collection.aggregate({
{ "$project": {
"BillNo": 1,
"DateP": 1,
"Type": 1,
"Total": {
"$sum": {
"$map": {
"input": "$Items",
"as": "item",
"in": "$$item.Total"
}
}
}
}}
})
In older versions you still need $unwind on the array first:
db.collection.aggregate([
{ "$unwind": "$Items" },
{ "$group": {
"_id": "$BillNo",
"Total": {
"$sum": "$Items.Total"
}
}}
])
Or if you are only adding per document:
db.collection.aggregate([
{ "$unwind": "$Items" },
{ "$group": {
"_id": "_id",
"BillNo": { "$first": "$BillNo" },
"DateP": { "$first": "$DateP" },
"Type": { "$first": "$Type" },
"Total": {
"$sum": "$Items.Total"
}
}}
])
But only of course once you actually fix the strings to be numeric values.
Ideally you can fix it like this:
var ops = [];
db.collection.find().forEach(function(doc) {
doc.Items.forEach(function(item) {
ops.push({
"updateOne": {
"filter": { "_id": doc._id, "Items.id": item.id },
"update": {
"$set": {
"Items.$.Qty": parseInt(item.Qty),
"Items.$.Rate": parseInt(item.Rate),
"Items.$Total": parseInt(item.Total)
}
}
}
});
// Send batch of updates
if ( ops.length == 1000 ) {
db.collection.bulkWrite(ops);
ops = [];
}
})
});
// Clear any unprocessed updates
if ( ops.length > 0 ) {
db.collection.bulkWrite(ops);
}

Aggregate with count of sub documents matching the condition and grouping

I've collections of documents as like as below:
{
"_id" : ObjectId("55d4410544c96d6f6578f893"),
"executionProject" : "Project1",
"suiteList" : [
{
"suiteStatus" : "PASS"
}
],
"runEndTime" : ISODate("2015-08-19T08:40:47.049Z"),
"runStartTime" : ISODate("2015-08-19T08:40:37.621Z"),
"runStatus" : "PASS",
"__v" : 1
}
{
"_id" : ObjectId("55d44eb4c0422e7b8bffe76b"),
"executionProject" : "Project1",
"suiteList" : [
{
"suiteStatus" : "PASS"
}
],
"runEndTime" : ISODate("2015-08-19T09:39:13.528Z"),
"runStartTime" : ISODate("2015-08-19T09:39:00.406Z"),
"runStatus" : "PASS",
"__v" : 1
}
{
"_id" : ObjectId("55d44f0bc0422e7b8bffe76f"),
"executionProject" : "Project1",
"suiteList" : [
{
"suiteStatus" : "FAIL"
}
],
"runEndTime" : ISODate("2015-08-19T09:46:31.108Z"),
"runStartTime" : ISODate("2015-08-19T09:40:27.377Z"),
"runStatus" : "PASS",
"__v" : 1
}
{
"_id" : ObjectId("55d463d0c0422e7b8bffe789"),
"executionProject" : "Project2",
"suiteList" : [
{
"suiteStatus" : "PASS"
},
{
"suiteStatus" : "PASS"
}
],
"runEndTime" : ISODate("2015-08-19T11:09:52.537Z"),
"runStartTime" : ISODate("2015-08-19T11:09:04.539Z"),
"runStatus" : "FAIL",
"__v" : 1
}
{
"_id" : ObjectId("55d464ebc0422e7b8bffe7c2"),
"executionProject" : "Project3",
"suiteList" : [
{
"suiteStatus" : "FAIL"
}
],
"runEndTime" : ISODate("2015-08-19T11:18:41.460Z"),
"runStartTime" : ISODate("2015-08-19T11:13:47.268Z"),
"runStatus" : "FAIL",
"__v" : 10
}
And I'm expecting output as follows:
[
{
"executionProject": "Project1",
"suite-pass": 0,
"suite-fail": 1,
"runEndTime": ISODate("2015-08-19T09:46:31.108Z")
},
{
"executionProject": "Project2",
"suite-pass": 2,
"suite-fail": 0,
"runEndTime": ISODate("2015-08-19T11:09:52.537Z")
},
{
"executionProject": "Project3",
"suite-pass": 0,
"suite-fail": 1,
"runEndTime": ISODate("2015-08-19T11:18:41.460Z")
},
]
I want to group by project and order by runEndTime and show the pass and fail counts of suiteList.
I tried this as suggested by Blakes in Mongodb: Group by element and show the sub-document count based on condition and sort the document by date:
db.testruns.aggregate([
{ "$sort": { "runEndTime": 1 } },
{ "$group": {
"_id": "$executionProject",
"suite-pass": {
"$last": {
"$cond": [
{ "$anyElementTrue": {
"$map": {
"input": "$suiteList",
"as": "suite",
"in": {
"$eq": [ "$$suite.suiteStatus", "PASS" ]
}
}
}},
1,
0
]
}
},
"suite-fail": {
"$last": {
"$cond": [
{ "$anyElementTrue": {
"$map": {
"input": "$suiteList",
"as": "suite",
"in": {
"$eq": [ "$$suite.suiteStatus", "FAIL" ]
}
}
}},
1,
0
]
}
},
"runEndTime": { "$last": "$runEndTime" }
}},
{ "$sort": { "runEndTime": 1 } }
]);
I was expecting the suite-pass count for Project2 as 2 since there are 2 elements in suiteList, but it returns 1.
You should have read the answer properly, as there already was another alternate listing and explanation of why the expected result you want from the one you used would be different.
Instead you want this one, which respects the possible multiple "PASS" or "FAIL":
Model.aggregate(
[
{ "$sort": { "executionProject": 1, "runEndTime": 1 } },
{ "$group": {
"_id": "$executionProject",
"suiteList": { "$last": "$suiteList" },
"runEndTime": { "$last": "$runEndTime" }
}},
{ "$unwind": "$suiteList" },
{ "$group": {
"_id": "$_id",
"suite-pass": {
"$sum": {
"$cond": [
{ "$eq": [ "$suiteList.suiteStatus", "PASS" ] },
1,
0
]
}
},
"suite-fail": {
"$sum": {
"$cond": [
{ "$eq": [ "$suiteList.suiteStatus", "FAIL" ] },
1,
0
]
}
},
"runEndTime": {"$first": "$runEndTime"}
}},
{ "$sort": { "runEndTime": 1 }}
],
function(err,result) {
}
);
Which is sort of a "combination" of approaches. The first is to get the "last" by runTime as you were expecting. The next is to break down the array and this time actually "sum up" the possible occurances of pass or fail, rather than just record a 1 for either pass or fail in the array, the actual "pass" or "fail" are counted.
With results:
{
"_id" : "Project1",
"suite-pass" : 0,
"suite-fail" : 1,
"runEndTime" : ISODate("2015-08-19T09:46:31.108Z")
}
{
"_id" : "Project2",
"suite-pass" : 2,
"suite-fail" : 0,
"runEndTime" : ISODate("2015-08-19T11:09:52.537Z")
}
{
"_id" : "Project3",
"suite-pass" : 0,
"suite-fail" : 1,
"runEndTime" : ISODate("2015-08-19T11:18:41.460Z")
}
Unwind suiteList and used $sum in group as below :
db.testruns.aggregate({
"$unwind": "$suiteList"
}, {
"$group": {
"_id": "$executionProject",
"suite-pass": {
"$sum": {
"$cond": {
"if": {
"$eq": ["$suiteList.suiteStatus", "PASS"]
},
"then": 1,
"else": 0
}
}
},
"suite-fail": {
"$sum": {
"$cond": {
"if": {
"$eq": ["$suiteList.suiteStatus", "FAIL"]
},
"then": 1,
"else": 0
}
}
},
"runEndTime": {
"$last": "$runEndTime"
}
}
}, {
"$sort": {
"runEndTime": 1
}
})