Querying mongoDB for some chart data - my pipeline seems convoluted - mongodb

This is a long question. If you bother answering, I will be extra grateful.
I have some time series data that I am trying to query to create various charts. The data format isn't the most simple, but I think my aggregation pipeline is getting a bit out of hand. I am planning to use charts.js to visualise the data on the client.
I will post a sample of my data below as well as my pipeline, with the desired output.
My question is in two parts - answering either one could solve the problem.
Does charts.js accept data formats other than an array of numbers per row? This would mean my pipeline could try to do less.
My pipeline doesn't quite get to the result I need. Can you recommend any alterations to get the correct result from my pipeline? Is there is a simpler way to get my desired output format?
Sample data
Here is a real data sample - a brand with one facebook account and one twitter account. There is some data for some dates in June. Lots of null day and month fields have been omitted.
Brand
[{
"_id": "5943f427e7c11ac3ad3652b0",
"name": "Brand1",
"facebookAccounts": [
"5943f427e7c11ac3ad3652ac",
],
"twitterAccounts": [
"5943f427e7c11ac3ad3652aa",
],
}]
FacebookAccounts
[
{
"_id" : "5943f427e7c11ac3ad3652ac"
"name": "Brand 1 Name",
"years": [
{
"date": "2017-01-01T00:00:00.000Z",
"months": [
{
"date": "2017-06-01T00:00:00.000Z",
"days": [
{
"date": "2017-06-16T00:00:00.000Z",
"likes": 904025,
},
{
"date": "2017-06-17T00:00:00.000Z",
"likes": null,
},
{
"date": "2017-06-18T00:00:00.000Z",
"likes": 904345,
},
],
},
],
}
]
}
]
Twitter accounts
[
{
"_id": "5943f427e7c11ac3ad3652aa",
"name": "Brand 1 Name",
"vendorId": "twitterhandle",
"years": [
{
"date": "2017-01-01T00:00:00.000Z",
"months": [
{
"date": "2017-06-01T00:00:00.000Z",
"days": [
{
"date": "2017-06-16T00:00:00.000Z",
"followers": 69390,
},
{
"date": "2017-06-17T00:00:00.000Z",
"followers": 69397,
{
"date": "2017-06-18T00:00:00.000Z",
"followers": 69428,
},
{
"date": "2017-06-19T00:00:00.000Z",
"followers": 69457,
},
]
},
],
}
]
}
]
The query
For this example, I want, for each brand, a daily sum of facebook likes and twitter followers between June 16th and June 18th. So here, the required format is:
{
brand: Brand1,
date: ["2017-06-16T00:00:00.000Z", "2017-06-17T00:00:00.000Z", "2017-06-18T00:00:00.000Z"],
stat: [973415, 69397, 973773]
}
The pipeline
The pipeline seems more convoluted due to the population, but I accept that complexity and it is necessary. Here are the steps:
db.getCollection('brands').aggregate([
{ $match: { _id: { $in: [ObjectId("5943f427e7c11ac3ad3652b0") ] } } },
// Unwind all relevant account types. Make one row per account
{ $project: {
accounts: { $setUnion: [ '$facebookAccounts', '$twitterAccounts' ] } ,
name: '$name'
}
},
{ $unwind: '$accounts' },
// populate the accounts.
// These transform the arrays of facebookAccount ObjectIds into the objects described above.
{ $lookup: { from: 'facebookaccounts', localField: 'accounts', foreignField: '_id', as: 'facebookAccounts' } },
{ $lookup: { from: 'twitteraccounts', localField: 'accounts', foreignField: '_id', as: 'twitterAccounts' } },
// unwind the populated accounts. Back to one record per account.
{ $unwind: { path: '$facebookAccounts', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$twitterAccounts', preserveNullAndEmptyArrays: true } },
// unwind to the granularity we want. Here it is one record per day per account per brand.
{ $unwind: { path: '$facebookAccounts.years', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$facebookAccounts.years.months', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$facebookAccounts.years.months.days', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$facebookAccounts.years.months.days', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$twitterAccounts.years', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$twitterAccounts.years.months', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$twitterAccounts.years.months.days', preserveNullAndEmptyArrays: true } },
{ $unwind: { path: '$twitterAccounts.years.months.days', preserveNullAndEmptyArrays: true } },
// Filter each one between dates
{ $match: { $or: [
{ $and: [
{ 'facebookAccounts.years.months.days.date': { $gte: new Date('2017-06-16') } } ,
{ 'facebookAccounts.years.months.days.date': { $lte: new Date('2017-06-18') } }
]},
{ $and: [
{ 'twitterAccounts.years.months.days.date': { $gte: new Date('2017-06-16') } } ,
{ 'twitterAccounts.years.months.days.date': { $lte: new Date('2017-06-18') } }
]}
] }},
// Build stats and date arrays for each account
{ $group: {
_id: '$accounts',
brandId: { $first: '$_id' },
brandName: { $first: '$name' },
stat: {
$push: {
$sum: {
$add: [
{ $ifNull: ['$facebookAccounts.years.months.days.likes', 0] },
{ $ifNull: ['$twitterAccounts.years.months.days.followers', 0] }
]
}
}
},
date: { $push: { $ifNull: ['$facebookAccounts.years.months.days.date', '$twitterAccounts.years.months.days.date'] } } ,
}}
])
This gives me the output format
[{
_id: accountId, // facebook
brandName: 'Brand1'
date: ["2017-06-16T00:00:00.000Z", "2017-06-17T00:00:00.000Z", "2017-06-18T00:00:00.000Z"],
stat: [904025, null, 904345]
},
{
_id: accountId // twitter
brandName: 'Brand1',
date: ["2017-06-16T00:00:00.000Z", "2017-06-17T00:00:00.000Z", "2017-06-18T00:00:00.000Z"],
stat: [69457, 69390, 69397]
}]
So I now need to perform column-wise addition on my stat properties.And then I am stuck - I feel like there should be a more pipeline friendly way to sum these rather than column-wise addition.
Note I accept the extra work that the population required and am happy with that. Most of the repetition is done programmatically.
Thank you if you've gotten this far.

I can trim a lot of fat out of this and keep it compatible with MongoDB 3.2 ( which you must be using at least due to preserveNullAndEmptyArrays ) available operators with a few simple actions. Mostly by simply joining the arrays immediately after $lookup, which is the best place to do it:
Short Optimize
db.brands.aggregate([
{ "$lookup": {
"from": "facebookaccounts",
"localField": "facebookAccounts",
"foreignField": "_id",
"as": "facebookAccounts"
}},
{ "$lookup": {
"from": "twitteraccounts",
"localField": "twitterAccounts",
"foreignField": "_id",
"as": "twitterAccounts"
}},
{ "$project": {
"name": 1,
"all": {
"$concatArrays": [ "$facebookAccounts", "$twitterAccounts" ]
}
}},
{ "$match": {
"all.years.months.days.date": {
"$gte": new Date("2017-06-16"), "$lte": new Date("2017-06-18")
}
}},
{ "$unwind": "$all" },
{ "$unwind": "$all.years" },
{ "$unwind": "$all.years.months" },
{ "$unwind": "$all.years.months.days" },
{ "$match": {
"all.years.months.days.date": {
"$gte": new Date("2017-06-16"), "$lte": new Date("2017-06-18")
}
}},
{ "$group": {
"_id": {
"brand": "$name",
"date": "$all.years.months.days.date"
},
"total": {
"$sum": {
"$sum": [
{ "$ifNull": [ "$all.years.months.days.likes", 0 ] },
{ "$ifNull": [ "$all.years.months.days.followers", 0 ] }
]
}
}
}},
{ "$sort": { "_id": 1 } },
{ "$group": {
"_id": "$_id.brand",
"date": { "$push": "$_id.date" },
"stat": { "$push": "$total" }
}}
])
This gives the result:
{
"_id" : "Brand1",
"date" : [
ISODate("2017-06-16T00:00:00Z"),
ISODate("2017-06-17T00:00:00Z"),
ISODate("2017-06-18T00:00:00Z")
],
"stat" : [
973415,
69397,
973773
]
}
With MongoDB 3.4 we could probably speed it up a "little" more by filtering the arrays and breaking them down before we eventually $unwind to make this work across documents, or maybe even not worry about going across documents at all if the "name" from "brands" is unique. The pipeline operations to compact down the arrays "in place" though are quite cumbersome to code, if a "little" better on performance.
You seem to be doing this "per brand" or for a small sample, so it's likely of little consequence.
As for the chartjs data format, I don't seem to be able to get my hands on what I believe is a different data format to the array format here, but again this should have little bearing.
The main point I see addressed is we can easily move away from your previous output that separated the "facebook" and "twitter" data, and simply aggregate by date moving all the data together "before" the arrays are constructed.
That last point then obviates the need for further "convoluted" operations to attempt to "merge" those two documents and the arrays produced.
Alternate Optimize
As an alternate approach where this does in fact not aggregate across documents, then you can essentially do the "filter" on the array in place and then simply sum and reshape the received result in client code.
db.brands.aggregate([
{ "$lookup": {
"from": "facebookaccounts",
"localField": "facebookAccounts",
"foreignField": "_id",
"as": "facebookAccounts"
}},
{ "$lookup": {
"from": "twitteraccounts",
"localField": "twitterAccounts",
"foreignField": "_id",
"as": "twitterAccounts"
}},
{ "$project": {
"name": 1,
"all": {
"$map": {
"input": { "$concatArrays": [ "$facebookAccounts", "$twitterAccounts" ] },
"as": "all",
"in": {
"years": {
"$map": {
"input": "$$all.years",
"as": "year",
"in": {
"months": {
"$map": {
"input": "$$year.months",
"as": "month",
"in": {
"days": {
"$filter": {
"input": "$$month.days",
"as": "day",
"cond": {
"$and": [
{ "$gte": [ "$$day.date", new Date("2017-06-16") ] },
{ "$lte": [ "$$day.date", new Date("2017-06-18") ] }
]
}
}
}
}
}
}
}
}
}
}
}
}
}}
]).map(doc => {
doc.all = [].concat.apply([],[].concat.apply([],[].concat.apply([],doc.all.map(d => d.years)).map(d => d.months)).map(d => d.days));
doc.all = doc.all.reduce((a,b) => {
if ( a.findIndex( d => d.date.valueOf() == b.date.valueOf() ) != -1 ) {
a[a.findIndex( d => d.date.valueOf() == b.date.valueOf() )].stat += (b.hasOwnProperty('likes')) ? (b.likes || 0) : (b.followers || 0);
} else {
a = a.concat([{ date: b.date, stat: (b.hasOwnProperty('likes')) ? (b.likes || 0) : (b.followers || 0) }]);
}
return a;
},[]);
doc.date = doc.all.map(d => d.date);
doc.stat = doc.all.map(d => d.stat);
delete doc.all;
return doc;
})
This really leaves all the things that "need" to happen on the server, on the server. And it's then a fairly trivial task to "flatten" the array and process to "sum up" and reshape it. This would mean less load on the server, and the data returned is not really that much greater per document.
Gives the same result of course:
[
{
"_id" : ObjectId("5943f427e7c11ac3ad3652b0"),
"name" : "Brand1",
"date" : [
ISODate("2017-06-16T00:00:00Z"),
ISODate("2017-06-17T00:00:00Z"),
ISODate("2017-06-18T00:00:00Z")
],
"stat" : [
973415,
69397,
973773
]
}
]
Committing to the Diet
The biggest problem you really have is with the multiple collections and the heavily nested documents. Neither of these is doing you any favors here and will with larger results cause real performance problems.
The nesting in particular is completely unnecessary as well as not being very maintainable since there are limitations to "update" where you have nested arrays. See the positional $ operator documentation, as well as many posts about this.
Instead you really want a single collection with all those "days" entries in it. You can always work with that source easily for query as well as aggregation purposes and it should look something like this:
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac38097"),
"date" : ISODate("2017-06-16T00:00:00Z"),
"likes" : 904025,
"__t" : "Facebook",
"account" : ObjectId("5943f427e7c11ac3ad3652ac")
}
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac38098"),
"date" : ISODate("2017-06-17T00:00:00Z"),
"likes" : null,
"__t" : "Facebook",
"account" : ObjectId("5943f427e7c11ac3ad3652ac")
}
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac38099"),
"date" : ISODate("2017-06-18T00:00:00Z"),
"likes" : 904345,
"__t" : "Facebook",
"account" : ObjectId("5943f427e7c11ac3ad3652ac")
}
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac3809a"),
"date" : ISODate("2017-06-16T00:00:00Z"),
"followers" : 69390,
"__t" : "Twitter",
"account" : ObjectId("5943f427e7c11ac3ad3652aa")
}
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac3809b"),
"date" : ISODate("2017-06-17T00:00:00Z"),
"followers" : 69397,
"__t" : "Twitter",
"account" : ObjectId("5943f427e7c11ac3ad3652aa")
}
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac3809c"),
"date" : ISODate("2017-06-18T00:00:00Z"),
"followers" : 69428,
"__t" : "Twitter",
"account" : ObjectId("5943f427e7c11ac3ad3652aa")
}
{
"_id" : ObjectId("5948cd5cd6eb0b7d6ac3809d"),
"date" : ISODate("2017-06-19T00:00:00Z"),
"followers" : 69457,
"__t" : "Twitter",
"account" : ObjectId("5943f427e7c11ac3ad3652aa")
}
Combining those referenced in the brands collection as well:
{
"_id" : ObjectId("5943f427e7c11ac3ad3652b0"),
"name" : "Brand1",
"accounts" : [
ObjectId("5943f427e7c11ac3ad3652ac"),
ObjectId("5943f427e7c11ac3ad3652aa")
]
}
Then you simply aggregate like this:
db.brands.aggregate([
{ "$lookup": {
"from": "social",
"localField": "accounts",
"foreignField": "account",
"as": "accounts"
}},
{ "$unwind": "$accounts" },
{ "$match": {
"accounts.date": {
"$gte": new Date("2017-06-16"), "$lte": new Date("2017-06-18")
}
}},
{ "$group": {
"_id": {
"brand": "$name",
"date": "$accounts.date"
},
"stat": {
"$sum": {
"$sum": [
{ "$ifNull": [ "$accounts.likes", 0 ] },
{ "$ifNull": [ "$accounts.followers", 0 ] }
]
}
}
}},
{ "$sort": { "_id": 1 } },
{ "$group": {
"_id": "$_id.brand",
"date": { "$push": "$_id.date" },
"stat": { "$push": "$stat" }
}}
])
This is actually the most efficient thing you can do, and it's mostly because of what actually happens on the server. We need to look at the "explain" output to see what happens to the pipeline here:
{
"$lookup" : {
"from" : "social",
"as" : "accounts",
"localField" : "accounts",
"foreignField" : "account",
"unwinding" : {
"preserveNullAndEmptyArrays" : false
},
"matching" : {
"$and" : [
{
"date" : {
"$gte" : ISODate("2017-06-16T00:00:00Z")
}
},
{
"date" : {
"$lte" : ISODate("2017-06-18T00:00:00Z")
}
}
]
}
}
}
This is what happens when you send $lookup -> $unwind -> $match to the server as the latter two stages are "hoisted" into the $lookup itself. This reduces the results in the actual "query" run on the collection to be joined.
Without that sequence, then $lookup potentially pulls in "a lot of data" with no constraint, and would break the 16MB BSON limit under most normal loads.
So not only is the process a lot more simple in the altered form, it actually "scales" where the present structure will not. This is something that you seriously should consider.

Related

Pivot mongo data in aggregation pipeline after $addToSet: one set becomes keys, one set values

I have a pipeline so far that looks like this:
db.getCollection("Members").aggregate(
[
{
"$match" : {
"member.MemberInfo.BusinessUnitCode" : "20"
}
},
{
"$group" : {
"_id" : "$_id",
"items" : {
"$addToSet" : {
"Type" : "$member.MemberIdentifications.MemberIdentification.IdentificationType",
"Value" : "$member.MemberIdentifications.MemberIdentification.Identifier"
}
}
}
}}
And I have results that are:
{
"_id" : ObjectId("53ad60c1600f5b241c693cbd"),
"items" : [
{
"Type" : [
"Medicaid ID",
"Medicare ID",
"Amisys ID",
"Social Security Number",
"Market Prominence ID",
"MBI",
"MPI"
],
"Value" : [
"221075***",
"450807099M",
"C0004125301",
"45*******",
"N00020269104",
"3K13EA8EY99",
"17296217"
]
}
]}
What I want is this:
{
"_id" : ObjectId("53ad60c1600f5b241c693cbd"),
{"Medicaid ID": "221075501",
"Medicare ID": "450807099M",
"Amisys ID": "C0004125301",
"Social Security Number": "45*******",
"Market Prominence ID": "N00020269104",
"MBI": "3K13EA8EY99",
"MPI": "17296217"
}
}
In table view this would look like (obviously with many more records):
_id Medicaid ID Medicare ID Amisys ID Social Security Number Market
53ad60c1600f5b241c693cbd 221075*** 450807099M C0004125301 45******* N00020269104
I'm not sure what next steps to take. I've looked at a similar question about pivoting data in mongo to make rows columns here, but Its a different case and it doesnt really apply to mine. What Im basically doing is pivoting from the two sets I created so one set become the kets and one set becomes the values of one new document
You may solve it as you would do with for loop. For your aggregation, add 3 steps.
Explanation
$unwind: "$items" will create single document for each item.
$range: [0,{$size: "$items.Type"},1] returns [0,1,2,3,4,5,6]
We create array with this structure [{ k:"Type[i]", v:"Value[i]" }]
If we use $arrayToObject + $replaceRoot we get desired result
db.Members.aggregate([
{
$unwind: "$items"
},
{
$project: {
_id: 1,
data: {
$map: {
input: {
$range: [
0,
{
$size: "$items.Type"
},
1
]
},
in: {
k: {
$arrayElemAt: [
"$items.Type",
"$$this"
]
},
v: {
$arrayElemAt: [
"$items.Value",
"$$this"
]
}
}
}
}
}
},
{
$replaceRoot: {
newRoot: {
$arrayToObject: {
$concatArrays: [
[
{
k: "_id",
v: "$_id"
}
],
"$data"
]
}
}
}
}
])
MongoPlayground
NOTE: You can get desired result with $unwind explained here
EDIT:
Try to change:
db.Members.aggregate([
{
"$match": {
"member.MemberInfo.BusinessUnitCode": "20"
}
},
{
"$group": {
"_id": "$_id",
"items": {
"$push": {
"k": "$member.MemberIdentifications.MemberIdentification.IdentificationType",
"v": "$member.MemberIdentifications.MemberIdentification.Identifier"
}
}
}
},
{
$replaceRoot: {
newRoot: {
$arrayToObject: {
$concatArrays: [
[
{
k: "_id",
v: "$_id"
}
],
"$items"
]
}
}
}
}
])

How to do LEFT JOIN in MongoDB aggregate function? [duplicate]

I have a collection of users where each document has following structure:
{
"_id": "<id>",
"login": "xxx",
"solved": [
{
"problem": "<problemID>",
"points": 10
},
...
]
}
The field solved may be empty or contain arbitrary many subdocuments. My goal is to get a list of users together with the total score (sum of points) where users that haven't solved any problem yet will be assigned total score of 0. Is this possible to do this with a single query (ideally using aggregation framework)?
I was trying to use following query in aggregation framework:
{ "$group": {
"_id": "$_id",
"login": { "$first": "$login" },
"solved": { "$addToSet": { "points": 0 } }
} }
{ "$unwind": "$solved" }
{ "$group": {
"_id": "$_id",
"login": { "$first": "$login" },
"solved": { "$sum": "$solved.points" }
} }
However I am getting following error:
exception: The top-level _id field is the only field currently supported for exclusion
Thank you in advance
With MongoDB 3.2 version and newer, the $unwind operator now has some options where in particular the preserveNullAndEmptyArrays option will solve this.
If this option is set to true and if the path is null, missing, or an empty array, $unwind outputs the document. If false, $unwind does not output a document if the path is null, missing, or an empty array. In your case, set it to true:
db.collection.aggregate([
{ "$unwind": {
"path": "$solved",
"preserveNullAndEmptyArrays": true
} },
{ "$group": {
"_id": "$_id",
"login": { "$first": "$login" },
"solved": { "$sum": "$solved.points" }
} }
])
Here is the solution - it assumes that the field "solved" is either absent, is equal to null or has an array of problems and scores solved. The case it does not handle is "solved" being an empty array - although that would be a simple additional adjustment you could add.
project = {$project : {
"s" : {
"$ifNull" : [
"$solved",
[
{
"points" : 0
}
]
]
},
"login" : 1
}
};
unwind={$unwind:"$s"};
group= { "$group" : {
"_id" : "$_id",
"login" : {
"$first" : "$login"
},
"score" : {
"$sum" : "$s.points"
}
}
}
db.students.aggregate( [ project, unwind, group ] );
$lookup then $unwind inside look up array and that could be empty
let posts = await Post.aggregate<ActivityDoc>([
{
$match: {
_id: new mongoose.Types.ObjectId(req.params.id),
},
},
{
$lookup: {
from: 'users',
localField: 'user',
foreignField: '_id',
as: 'user',
},
},
{
$unwind: '$user',
},
{
$unwind: {
path: '$user.follower',
preserveNullAndEmptyArrays: true,
},
},
{
$match: {
$or: [
{
$and: [
{
'privacy.mode': {
$eq: PrivacyMode.EveryOne,
},
},
],
},
{
$and: [
{
'privacy.mode': {
$eq: PrivacyMode.MyCircle,
},
},
{
'user.follower.id': {
$eq: req.currentUser?.id,
},
},
],
},
],
},
},
]);

MongoDB: How to get add filter main collection by second collection using $lookup [duplicate]

How can I add a filter after an $lookup or is there any other method to do this?
My data collection test is:
{ "_id" : ObjectId("570557d4094a4514fc1291d6"), "id" : 100, "value" : "0", "contain" : [ ] }
{ "_id" : ObjectId("570557d4094a4514fc1291d7"), "id" : 110, "value" : "1", "contain" : [ 100 ] }
{ "_id" : ObjectId("570557d4094a4514fc1291d8"), "id" : 120, "value" : "1", "contain" : [ 100 ] }
{ "_id" : ObjectId("570557d4094a4514fc1291d9"), "id" : 121, "value" : "2", "contain" : [ 100, 120 ] }
I select id 100 and aggregate the childs:
db.test.aggregate([ {
$match : {
id: 100
}
}, {
$lookup : {
from : "test",
localField : "id",
foreignField : "contain",
as : "childs"
}
}]);
I get back:
{
"_id":ObjectId("570557d4094a4514fc1291d6"),
"id":100,
"value":"0",
"contain":[ ],
"childs":[ {
"_id":ObjectId("570557d4094a4514fc1291d7"),
"id":110,
"value":"1",
"contain":[ 100 ]
},
{
"_id":ObjectId("570557d4094a4514fc1291d8"),
"id":120,
"value":"1",
"contain":[ 100 ]
},
{
"_id":ObjectId("570557d4094a4514fc1291d9"),
"id":121,
"value":"2",
"contain":[ 100, 120 ]
}
]
}
But I want only childs that match with "value: 1"
At the end I expect this result:
{
"_id":ObjectId("570557d4094a4514fc1291d6"),
"id":100,
"value":"0",
"contain":[ ],
"childs":[ {
"_id":ObjectId("570557d4094a4514fc1291d7"),
"id":110,
"value":"1",
"contain":[ 100 ]
},
{
"_id":ObjectId("570557d4094a4514fc1291d8"),
"id":120,
"value":"1",
"contain":[ 100 ]
}
]
}
The question here is actually about something different and does not need $lookup at all. But for anyone arriving here purely from the title of "filtering after $lookup" then these are the techniques for you:
MongoDB 3.6 - Sub-pipeline
db.test.aggregate([
{ "$match": { "id": 100 } },
{ "$lookup": {
"from": "test",
"let": { "id": "$id" },
"pipeline": [
{ "$match": {
"value": "1",
"$expr": { "$in": [ "$$id", "$contain" ] }
}}
],
"as": "childs"
}}
])
Earlier - $lookup + $unwind + $match coalescence
db.test.aggregate([
{ "$match": { "id": 100 } },
{ "$lookup": {
"from": "test",
"localField": "id",
"foreignField": "contain",
"as": "childs"
}},
{ "$unwind": "$childs" },
{ "$match": { "childs.value": "1" } },
{ "$group": {
"_id": "$_id",
"id": { "$first": "$id" },
"value": { "$first": "$value" },
"contain": { "$first": "$contain" },
"childs": { "$push": "$childs" }
}}
])
If you question why would you $unwind as opposed to using $filter on the array, then read Aggregate $lookup Total size of documents in matching pipeline exceeds maximum document size for all the detail on why this is generally necessary and far more optimal.
For releases of MongoDB 3.6 and onwards, then the more expressive "sub-pipeline" is generally what you want to "filter" the results of the foreign collection before anything gets returned into the array at all.
Back to the answer though which actually describes why the question asked needs "no join" at all....
Original
Using $lookup like this is not the most "efficient" way to do what you want here. But more on this later.
As a basic concept, just use $filter on the resulting array:
db.test.aggregate([
{ "$match": { "id": 100 } },
{ "$lookup": {
"from": "test",
"localField": "id",
"foreignField": "contain",
"as": "childs"
}},
{ "$project": {
"id": 1,
"value": 1,
"contain": 1,
"childs": {
"$filter": {
"input": "$childs",
"as": "child",
"cond": { "$eq": [ "$$child.value", "1" ] }
}
}
}}
]);
Or use $redact instead:
db.test.aggregate([
{ "$match": { "id": 100 } },
{ "$lookup": {
"from": "test",
"localField": "id",
"foreignField": "contain",
"as": "childs"
}},
{ "$redact": {
"$cond": {
"if": {
"$or": [
{ "$eq": [ "$value", "0" ] },
{ "$eq": [ "$value", "1" ] }
]
},
"then": "$$DESCEND",
"else": "$$PRUNE"
}
}}
]);
Both get the same result:
{
"_id":ObjectId("570557d4094a4514fc1291d6"),
"id":100,
"value":"0",
"contain":[ ],
"childs":[ {
"_id":ObjectId("570557d4094a4514fc1291d7"),
"id":110,
"value":"1",
"contain":[ 100 ]
},
{
"_id":ObjectId("570557d4094a4514fc1291d8"),
"id":120,
"value":"1",
"contain":[ 100 ]
}
]
}
Bottom line is that $lookup itself cannot "yet" query to only select certain data. So all "filtering" needs to happen after the $lookup
But really for this type of "self join" you are better off not using $lookup at all and avoiding the overhead of an additional read and "hash-merge" entirely. Just fetch the related items and $group instead:
db.test.aggregate([
{ "$match": {
"$or": [
{ "id": 100 },
{ "contain.0": 100, "value": "1" }
]
}},
{ "$group": {
"_id": {
"$cond": {
"if": { "$eq": [ "$value", "0" ] },
"then": "$id",
"else": { "$arrayElemAt": [ "$contain", 0 ] }
}
},
"value": { "$first": { "$literal": "0"} },
"childs": {
"$push": {
"$cond": {
"if": { "$ne": [ "$value", "0" ] },
"then": "$$ROOT",
"else": null
}
}
}
}},
{ "$project": {
"value": 1,
"childs": {
"$filter": {
"input": "$childs",
"as": "child",
"cond": { "$ne": [ "$$child", null ] }
}
}
}}
])
Which only comes out a little different because I deliberately removed the extraneous fields. Add them in yourself if you really want to:
{
"_id" : 100,
"value" : "0",
"childs" : [
{
"_id" : ObjectId("570557d4094a4514fc1291d7"),
"id" : 110,
"value" : "1",
"contain" : [ 100 ]
},
{
"_id" : ObjectId("570557d4094a4514fc1291d8"),
"id" : 120,
"value" : "1",
"contain" : [ 100 ]
}
]
}
So the only real issue here is "filtering" any null result from the array, created when the current document was the parent in processing items to $push.
What you also seem to be missing here is that the result you are looking for does not need aggregation or "sub-queries" at all. The structure that you have concluded or possibly found elsewhere is "designed" so that you can get a "node" and all of it's "children" in a single query request.
That means just the "query" is all that is really needed, and the data collection ( which is all that is happening since no content is really being "reduced" ) is just a function of iterating the cursor result:
var result = {};
db.test.find({
"$or": [
{ "id": 100 },
{ "contain.0": 100, "value": "1" }
]
}).sort({ "contain.0": 1 }).forEach(function(doc) {
if ( doc.id == 100 ) {
result = doc;
result.childs = []
} else {
result.childs.push(doc)
}
})
printjson(result);
This does exactly the same thing:
{
"_id" : ObjectId("570557d4094a4514fc1291d6"),
"id" : 100,
"value" : "0",
"contain" : [ ],
"childs" : [
{
"_id" : ObjectId("570557d4094a4514fc1291d7"),
"id" : 110,
"value" : "1",
"contain" : [
100
]
},
{
"_id" : ObjectId("570557d4094a4514fc1291d8"),
"id" : 120,
"value" : "1",
"contain" : [
100
]
}
]
}
And serves as proof that all you really need to do here is issue the "single" query to select both the parent and children. The returned data is just the same, and all you are doing on either server or client is "massaging" into another collected format.
This is one of those cases where you can get "caught up" in thinking of how you did things in a "relational" database, and not realize that since the way the data is stored has "changed", you no longer need to use the same approach.
That is exactly what the point of the documentation example "Model Tree Structures with Child References" in it's structure, where it makes it easy to select parents and children within one query.

Using the aggregation framework to compare array element overlap

I have a collections with documents structured like below:
{
carrier: "abc",
flightNumber: 123,
dates: [
ISODate("2015-01-01T00:00:00Z"),
ISODate("2015-01-02T00:00:00Z"),
ISODate("2015-01-03T00:00:00Z")
]
}
I would like to search the collection to see if there are any documents with the same carrier and flightNumber that also have dates in the dates array that over lap. For example:
{
carrier: "abc",
flightNumber: 123,
dates: [
ISODate("2015-01-01T00:00:00Z"),
ISODate("2015-01-02T00:00:00Z"),
ISODate("2015-01-03T00:00:00Z")
]
},
{
carrier: "abc",
flightNumber: 123,
dates: [
ISODate("2015-01-03T00:00:00Z"),
ISODate("2015-01-04T00:00:00Z"),
ISODate("2015-01-05T00:00:00Z")
]
}
If the above records were present in the collection I would like to return them because they both have carrier: abc, flightNumber: 123 and they also have the date ISODate("2015-01-03T00:00:00Z") in the dates array. If this date were not present in the second document then neither should be returned.
Typically I would do this by grouping and counting like below:
db.flights.aggregate([
{
$group: {
_id: { carrier: "$carrier", flightNumber: "$flightNumber" },
uniqueIds: { $addToSet: "$_id" },
count: { $sum: 1 }
}
},
{
$match: {
count: { $gt: 1 }
}
}
])
But I'm not sure how I could modify this to look for array overlap. Can anyone suggest how to achieve this?
You $unwind the array if you want to look at the contents as "grouped" within them:
db.flights.aggregate([
{ "$unwind": "$dates" },
{ "$group": {
"_id": { "carrier": "$carrier", "flightnumber": "$flightnumber", "date": "$dates" },
"count": { "$sum": 1 },
"_ids": { "$addToSet": "$_id" }
}},
{ "$match": { "count": { "$gt": 1 } } },
{ "$unwind": "$_ids" },
{ "$group": { "_id": "$_ids" } }
])
That does in fact tell you which documents where the "overlap" resides, because the "same dates" along with the other same grouping key values that you are concerned about have a "count" which occurs more than once. Indicating the overlap.
Anything after the $match is really just for "presentation" as there is no point reporting the same _id value for multiple overlaps if you just want to see the overlaps. In fact if you want to see them together it would probably be best to leave the "grouped set" alone.
Now you could add a $lookup to that if retrieving the actual documents was important to you:
db.flights.aggregate([
{ "$unwind": "$dates" },
{ "$group": {
"_id": { "carrier": "$carrier", "flightnumber": "$flightnumber", "date": "$dates" },
"count": { "$sum": 1 },
"_ids": { "$addToSet": "$_id" }
}},
{ "$match": { "count": { "$gt": 1 } } },
{ "$unwind": "$_ids" },
{ "$group": { "_id": "$_ids" } },
}},
{ "$lookup": {
"from": "flights",
"localField": "_id",
"foreignField": "_id",
"as": "_ids"
}},
{ "$unwind": "$_ids" },
{ "$replaceRoot": {
"newRoot": "$_ids"
}}
])
And even do a $replaceRoot or $project to make it return the whole document. Or you could have even done $addToSet with $$ROOT if it was not a problem for size.
But the overall point is covered in the first three pipeline stages, or mostly in just the "first". If you want to work with arrays "across documents", then the primary operator is still $unwind.
Alternately for a more "reporting" like format:
db.flights.aggregate([
{ "$addFields": { "copy": "$$ROOT" } },
{ "$unwind": "$dates" },
{ "$group": {
"_id": {
"carrier": "$carrier",
"flightNumber": "$flightNumber",
"dates": "$dates"
},
"count": { "$sum": 1 },
"_docs": { "$addToSet": "$copy" }
}},
{ "$match": { "count": { "$gt": 1 } } },
{ "$group": {
"_id": {
"carrier": "$_id.carrier",
"flightNumber": "$_id.flightNumber",
},
"overlaps": {
"$push": {
"date": "$_id.dates",
"_docs": "$_docs"
}
}
}}
])
Which would report the overlapped dates within each group and tell you which documents contained the overlap:
{
"_id" : {
"carrier" : "abc",
"flightNumber" : 123.0
},
"overlaps" : [
{
"date" : ISODate("2015-01-03T00:00:00.000Z"),
"_docs" : [
{
"_id" : ObjectId("5977f9187dcd6a5f6a9b4b97"),
"carrier" : "abc",
"flightNumber" : 123.0,
"dates" : [
ISODate("2015-01-03T00:00:00.000Z"),
ISODate("2015-01-04T00:00:00.000Z"),
ISODate("2015-01-05T00:00:00.000Z")
]
},
{
"_id" : ObjectId("5977f9187dcd6a5f6a9b4b96"),
"carrier" : "abc",
"flightNumber" : 123.0,
"dates" : [
ISODate("2015-01-01T00:00:00.000Z"),
ISODate("2015-01-02T00:00:00.000Z"),
ISODate("2015-01-03T00:00:00.000Z")
]
}
]
}
]
}

How to find document and single subdocument matching given criterias in MongoDB collection

I have collection of products. Each product contains array of items.
> db.products.find().pretty()
{
"_id" : ObjectId("54023e8bcef998273f36041d"),
"shop" : "shop1",
"name" : "product1",
"items" : [
{
"date" : "01.02.2100",
"purchasePrice" : 1,
"sellingPrice" : 10,
"count" : 15
},
{
"date" : "31.08.2014",
"purchasePrice" : 10,
"sellingPrice" : 1,
"count" : 5
}
]
}
So, can you please give me an advice, how I can query MongoDB to retrieve all products with only single item which date is equals to the date I pass to query as parameter.
The result for "31.08.2014" must be:
{
"_id" : ObjectId("54023e8bcef998273f36041d"),
"shop" : "shop1",
"name" : "product1",
"items" : [
{
"date" : "31.08.2014",
"purchasePrice" : 10,
"sellingPrice" : 1,
"count" : 5
}
]
}
What you are looking for is the positional $ operator and "projection". For a single field you need to match the required array element using "dot notation", for more than one field use $elemMatch:
db.products.find(
{ "items.date": "31.08.2014" },
{ "shop": 1, "name":1, "items.$": 1 }
)
Or the $elemMatch for more than one matching field:
db.products.find(
{ "items": {
"$elemMatch": { "date": "31.08.2014", "purchasePrice": 1 }
}},
{ "shop": 1, "name":1, "items.$": 1 }
)
These work for a single array element only though and only one will be returned. If you want more than one array element to be returned from your conditions then you need more advanced handling with the aggregation framework.
db.products.aggregate([
{ "$match": { "items.date": "31.08.2014" } },
{ "$unwind": "$items" },
{ "$match": { "items.date": "31.08.2014" } },
{ "$group": {
"_id": "$_id",
"shop": { "$first": "$shop" },
"name": { "$first": "$name" },
"items": { "$push": "$items" }
}}
])
Or possibly in shorter/faster form since MongoDB 2.6 where your array of items contains unique entries:
db.products.aggregate([
{ "$match": { "items.date": "31.08.2014" } },
{ "$project": {
"shop": 1,
"name": 1,
"items": {
"$setDifference": [
{ "$map": {
"input": "$items",
"as": "el",
"in": {
"$cond": [
{ "$eq": [ "$$el.date", "31.08.2014" ] },
"$$el",
false
]
}
}},
[false]
]
}
}}
])
Or possibly with $redact, but a little contrived:
db.products.aggregate([
{ "$match": { "items.date": "31.08.2014" } },
{ "$redact": {
"$cond": [
{ "$eq": [ { "$ifNull": [ "$date", "31.08.2014" ] }, "31.08.2014" ] },
"$$DESCEND",
"$$PRUNE"
]
}}
])
More modern, you would use $filter:
db.products.aggregate([
{ "$match": { "items.date": "31.08.2014" } },
{ "$addFields": {
"items": {
"input": "$items",
"cond": { "$eq": [ "$$this.date", "31.08.2014" ] }
}
}}
])
And with multiple conditions, the $elemMatch and $and within the $filter:
db.products.aggregate([
{ "$match": {
"$elemMatch": { "date": "31.08.2014", "purchasePrice": 1 }
}},
{ "$addFields": {
"items": {
"input": "$items",
"cond": {
"$and": [
{ "$eq": [ "$$this.date", "31.08.2014" ] },
{ "$eq": [ "$$this.purchasePrice", 1 ] }
]
}
}
}}
])
So it just depends on whether you always expect a single element to match or multiple elements, and then which approach is better. But where possible the .find() method will generally be faster since it lacks the overhead of the other operations, which in those last to forms does not lag that far behind at all.
As a side note, your "dates" are represented as strings which is not a very good idea going forward. Consider changing these to proper Date object types, which will greatly help you in the future.
Based on Neil Lunn's code I work with this solution, it includes automatically all first level keys (but you could also exclude keys if you want):
db.products.find(
{ "items.date": "31.08.2014" },
{ "shop": 1, "name":1, "items.$": 1 }
{ items: { $elemMatch: { date: "31.08.2014" } } },
)
With multiple requirements:
db.products.find(
{ "items": {
"$elemMatch": { "date": "31.08.2014", "purchasePrice": 1 }
}},
{ items: { $elemMatch: { "date": "31.08.2014", "purchasePrice": 1 } } },
)
Mongo supports dot notation for sub-queries.
See: http://docs.mongodb.org/manual/reference/glossary/#term-dot-notation
Depending on your driver, you want something like:
db.products.find({"items.date":"31.08.2014"});
Note that the attribute is in quotes for dot notation, even if usually your driver doesn't require this.