Grouping multiple documents with nested array of objects in MongoDB - mongodb

I'm having documents that are having this structures
x = {
"scalar": 1,
"array": [
{"key": 1, "value": 2},
{"key": 2, "value": 3},
],
"array2": [
{"key": 1, "value": 2},
{"key": 2, "value": 3},
],
}
and
y = {
"scalar": 2,
"array": [
{"key": 1, "value": 3},
{"key": 3, "value": 0},
],
"array2": [
{"key": 1, "value": 3},
{"key": 3, "value": 0},
],
}
The end results I'm trying to find is this
{
"scalar": 3, # SUM of scalar
"array": [
{"key": 1, "value": 5}, # SUM by key = 1
{"key": 2, "value": 3},
{"key": 3, "value": 0},
],
"array2": [
{"key": 1, "value": 5}, # SUM by key = 1
{"key": 2, "value": 3},
{"key": 3, "value": 0},
],
}
I've tried to use double $unwind and then do push by. I'm thinking of using $reduce to get the final results

Query
one way to do it, is by facet, you want 3 groupings and facet can do that , like break into 3 seperate parts, to not mix the unwinds, i think this is the most simple way to do it
Test code here
db.collection.aggregate([
{
"$facet": {
"scalar": [
{
"$project": {
"scalar": 1
}
},
{
"$group": {
"_id": null,
"sum": {
"$sum": "$scalar"
}
}
},
{
"$unset": [
"_id"
]
}
],
"array": [
{
"$project": {
"array": 1
}
},
{
"$unwind": {
"path": "$array"
}
},
{
"$group": {
"_id": "$array.key",
"sum": {
"$sum": "$array.value"
}
}
},
{
"$project": {
"_id": 0,
"key": "$_id",
"value": "$sum"
}
}
],
"array2": [
{
"$project": {
"array2": 1
}
},
{
"$unwind": {
"path": "$array2"
}
},
{
"$group": {
"_id": "$array2.key",
"sum": {
"$sum": "$array2.value"
}
}
},
{
"$project": {
"_id": 0,
"key": "$_id",
"value": "$sum"
}
}
]
}
},
{
"$set": {
"scalar": {
"$arrayElemAt": [
"$scalar.sum",
0
]
}
}
}
])
Other alternative is to unwind both arrays, but then unwinds and groups will be mixed, making things complicated i think.
Also $reduce cant be used for grouping in MongoDB i think, because we can't construct dynamic paths.
If group-reduce and have this data (key=key value=value)
{"1" : 5 , "2" : 3}
And we see {"key" 1, "value" : 5} how we can check if the above data contains the 1 as key? We cant construct dynamic paths, like $$this.1 . Only way it to convert it to an array and back to object that will be so slow.

Related

Calculate running total across for different groups by day

I'm trying to aggreate a collection of transactions into a running total of owners by day.
The initial collection looks like this:
[
{ "to": "A", "from": "0", "ts": 1 },
{ "to": "A", "from": "0", "ts": 1 },
{ "to": "B", "from": "0", "ts": 1 },
{ "to": "B", "from": "0", "ts": 2 },
{ "to": "C", "from": "0", "ts": 3 },
{ "to": "A", "from": "B", "ts": 4 }
]
What I would like to get is something like this:
[
{
"ts": 1,
"holdings": [
{ "owner": "0", "holdings": -3 },
{ "owner": "A", "holdings": 2 },
{ "owner": "B", "holdings": 1 }
]
},
{
"ts": 2,
"holdings": [
{ "owner": "0", "holdings": -4 },
{ "owner": "A", "holdings": 2 },
{ "owner": "B", "holdings": 2 }
]
},
{
"ts": 4,
"holdings": [
{ "owner": "0", "holdings": -5 },
{ "owner": "A", "holdings": 3 },
{ "owner": "B", "holdings": 1 },
{ "owner": "C", "holdings": 1 }
]
}
]
I've already understood how to generate this for a single ts that I'm setting, but I don't know how to do it across all ts.
The aggregation pipeline for a single ts looks like this:
db.collection.aggregate([
// start with: { "to": "A", "from": "0", "ts": 1 }
{
// create a doc with an array with subset of fields:
// { "_id": ObjectId("5a934e000102030405000000"),
// "data": [ { "change": 1, "owner": "A", "ts": "1" },
// { "change": -1, "owner": "0", "ts": "1" } ] }
$project: {
data: [
{
owner: '$to',
ts: '$ts',
change: 1,
},
{
owner: '$from',
ts: '$ts',
change: -1,
},
],
},
},
{
// unwind the array into 2 docs:
// { "_id": ObjectId("5a934e000102030405000000"), "data": { "change": 1, "owner": "A", "ts": "1" } },
// { "_id": ObjectId("5a934e000102030405000000"), "data": { "change": -1, "owner": "0", "ts": "1" } },
$unwind: '$data',
},
{
// use data as root:
// { "data": { "change": 1, "owner": "A", "ts": "1" } },
// { "data": { "change": -1, "owner": "0", "ts": "1" } }
$replaceRoot: {
newRoot: '$data',
},
},
{
// select day to calc totals
$match: {
ts: {
$lt: 6,
},
},
},
{
// sum totals, grouped by owner
$group: {
_id: '$owner',
//_id: null,
holdings: {
$sum: '$change',
},
},
},
])
This gives the correct result for a particular day (selected in the match stage). I don't understand how I can now generalize that to all days.
One way to do it is using $setWindowFields, which has a built-in accumulation:
db.collection.aggregate([
{
$project: {
ts: "$ts",
data: [{owner: "$to", change: 1}, {owner: "$from", change: -1}]
}
},
{$unwind: "$data"},
{
$group: {
_id: {ts: "$ts", owner: "$data.owner"},
holdings: {$sum: "$data.change"}
}
},
{
$setWindowFields: {
partitionBy: "$_id.owner",
sortBy: {"_id.ts": 1},
output: {
cumulativeHoldings: {
$sum: "$holdings",
window: {documents: ["unbounded", "current"]}
}
}
}
},
{
$group: {
_id: "$_id.ts",
holdings: {$push: {owner: "$_id.owner", holdings: "$cumulativeHoldings"}}
}
}
])
Playground

Group by an optional field in mongodb

I would like to independently group the results of an or clause, including overlap. The data set is rather large so running 2 queries sequentially will result in an undesirable wait time. I am hoping I can somehow project which clause returned the corresponding data. Given this data set:
[
{
"_id": 1,
"item": "abc",
"name": "Michael",
"price": NumberDecimal("10"),
"quantity": NumberInt("2"),
"date": ISODate("2014-03-01T08:00:00Z")
},
{
"_id": 2,
"item": "jkl",
"name": "Toby",
"price": NumberDecimal("20"),
"quantity": NumberInt("1"),
"date": ISODate("2014-03-01T09:00:00Z")
},
{
"_id": 3,
"item": "xyz",
"name": "Keith",
"price": NumberDecimal("5"),
"quantity": NumberInt("10"),
"date": ISODate("2014-03-15T09:00:00Z")
},
{
"_id": 4,
"item": "abc",
"name": "Dwight",
"price": NumberDecimal("5"),
"quantity": NumberInt("20"),
"date": ISODate("2014-04-04T11:21:39.736Z")
},
{
"_id": 5,
"item": "abc",
"name": "Ryan",
"price": NumberDecimal("10"),
"quantity": NumberInt("10"),
"date": ISODate("2014-04-04T21:23:13.331Z")
},
{
"_id": 6,
"item": "def",
"name": "Jim",
"price": NumberDecimal("7.5"),
"quantity": NumberInt("5"),
"date": ISODate("2015-06-04T05:08:13Z")
},
{
"_id": 7,
"item": "abc",
"name": "Keith",
"price": NumberDecimal("7.5"),
"quantity": NumberInt("10"),
"date": ISODate("2015-09-10T08:43:00Z")
},
{
"_id": 8,
"item": "abc",
"name": "Michael",
"price": NumberDecimal("10"),
"quantity": NumberInt("5"),
"date": ISODate("2016-02-06T20:20:13Z")
},
]
I would like to receive this result:
[{
"_id": {
"name": "Keith"
},
"count": 2
},
{
"_id": {
"item": "abc",
},
"count": 5
}]
Here is what I have tried so far:
db.collection.aggregate([
{
$match: {
$or: [
{
item: "abc"
},
{
name: "Keith"
}
]
}
},
{
$group: {
_id: {
item: "$item",
name: "$name"
},
count: {
$sum: 1
}
}
}
])
You can use $facet to get multiple aggregation pipelines into the same stage in this way:
Using $facet there are two "outputs" one group by name and other by item.
In each one there are multiple stages:
First $match to process only documents you want.
Then $group with _id name or item, and $count to get the total.
db.collection.aggregate([
{
"$facet": {
"groupByName": [
{
"$match": {"name": "Keith"}
},
{
"$group": {"_id": "$name","count": {"$sum": 1}}
}
],
"groupByItem": [
{
"$match": {"item": "abc"}
},
{
"$group": {"_id": "$item","count": {"$sum": 1}}
}
]
}
}
])
Example here
The output is:
{
"groupByItem": [
{
"_id": "abc",
"count": 5
}
],
"groupByName": [
{
"_id": "Keith",
"count": 2
}
]
}
Here it is:
mongos> db.n.aggregate([ { $facet:{ names:[ {$match:{name:"Keith"}} , {$group:{_id:{name:"$name"}, count:{$sum:1}}} ] , items:[ {$match:{item:"abc"}},{ $group:{_id:{item:"$item"}, count:{$sum:1}} } ] } } , {$project:{ "namesANDitems":{$concatArrays:[ "$names","$items" ]} }} ,{$unwind:"$namesANDitems"} ,{$replaceRoot:{newRoot:"$namesANDitems"} } ]).pretty()
{ "_id" : { "name" : "Keith" }, "count" : 2 }
{ "_id" : { "item" : "abc" }, "count" : 5 }
mongos>
explained:
You create two pipes via $facet
Match in every facet pipe what you need to group pipe1=names , pipe2=items
Join the arrays from the two pipes in single array named "namesANDitems"
Convert the array to object with $unwind
Remove the temporary object name namesANDitems so you have only the two objects as requested

MongoDB, How to associate array fields for statistics

Example JSON:
{
"groups": [
{
"_id": 1,
"name": "g1"
},
{
"_id": 2,
"name": "g2"
}
],
"items": [
{
"_id": 1,
"name": "item1",
"gid": 1
},
{
"_id": 2,
"name": "item2",
"gid": 2
}
]
}
How to associate two arrays and count ?I tried to use aggregate, I didn't get the results I wanted.
Required Result:
Or can directly find all the items associated with it, perfect....
{"groups": [
{
"_id": 1,
"name": "g1",
"count": 1,
"items": [
{
"_id": 1,
"name": "item1"
}
]
},
{
"_id": 2,
"name": "g2",
"count": 1,
"items": [
{
"_id": 2,
"name": "item2"
}
]
}
]}
db.getCollection('collection').aggregate([
{$unwind:{
path:"$groups",
preserveNullAndEmptyArrays:true
}},
{$unwind:{
path:"$items",
preserveNullAndEmptyArrays:true
}},
{$redact: {$cond: [{
$eq: [
"$groups._id",
"$items.gid"
]
},
"$$KEEP",
"$$PRUNE"
]
}
},
{$project:{
_id:1,
groups_id:"$groups._id",
group_name:"$groups.name",
item_data:{
_id:"$items._id",
name:"$items.name",
}
}},
{
$group:{
_id:"$groups_id",
name:{$first:"$group_name"},
count:{$sum:1},
items:{$push:"$item_data"}
}
}
])

MongoDB - Average fields values over documents

I have a collection with this schema:
{
"fields":
{
"field1": [
{"name": "abc", "value": 2},
{"name": "bcd", "value": 4},
{"name": "cde", "value": 6}
],
"field2": [
{"name": "dec", "value": 3},
{"name": "das", "value": 8},
{"name": "pam", "value": 10}
]
}
},
{
"fields":
{
"field1": [
{"name": "abc", "value": 7},
{"name": "cde", "value": 12}
],
"field2": [
{"name": "dec", "value": 3},
{"name": "das", "value": 8},
{"name": "pam", "value": 10}
]
}
}
What I'm trying to obtain is e.g. the average values of all members of 'field1', evaluating 0 if a member exist in a document but not in another (like 'bcd').
So in this example I should get:
{
'_id': 'abc',
'avg': 4.5
},
{
'_id': 'bcd',
'avg': 2
},
{
'_id': 'cde',
'avg': 9
}
I wrote this aggregation query but I'm pretty sure there is something wrong with it:
db.statuses.aggregate([
{
$unwind: '$fields.field1'
},
{
$group: {
_id: '$fields.field1.name',
avg: {
$avg: '$fields.field1.value'
}
}
},
{
$sort: {
avg: -1
}
}
])
I think I should add a step before the average calculation in which I have to build an array of all values for each name (0 if the name does not exist in a document), and then evaluate the average on these arrays. Am I right?
How could I do this?

MongoDB projection. Operator $add field|expression array awareness or after $slice

I've got collection that looks like:
[{
"org": "A",
"type": "simple",
"payFor": 3,
"price": 100
},
{
"org": "A",
"type": "custom",
"payFor": 2,
"price": 115
},
{
"org": "B",
"type": "simple",
"payFor": 1,
"price": 110
},
{
"org": "B",
"type": "custom",
"payFor": 2,
"price": 200
},
{
"org": "B",
"type": "custom",
"payFor": 4,
"price": 220
}]
And need to produce result with query to perform group by "org" where payments appears for only first "payFor" prices in "type".
I'm trying to use expression result by $slice operator in $add but this is not works.
pipeline:
[{
"$group": {
"_id": {
"org": "$org",
"type": "$type"
},
"payFor": {
"$max": "$payFor"
},
"count": {
"$sum": 1
},
"prices": {
"$push": "$price"
}
}
},
{
"$group": {
"_id": "$_id.org",
"payments": {
"$push": {
"type": "$_id.type",
"forFirst": "$payFor",
"sum": {
"$cond": [
{
"$gte": [
"$payFor",
"$count"
]
},
{
"$add": {
"$prices": {
"$slice": "$count"
}
}
},
{
"$add": "$prices"
}
]
}
}
}
}
}]
I know that it is possible to traverse unwinded prices and pick only "payFor" count of them. but result collections are more rich than in example above and this operation will produce some unecessary overheads.
Need some advice from community. Please. Thanks.