MongoDB aggregate - average on specific values in array of documents - mongodb

I'm currently working on a database with the following structure:
{"_id" : ObjectId("1abc2"),
"startdatetime" : ISODate("2016-09-11T18:00:37Z"),
"diveValues" : [
{
"temp" : 15.269,
"depth" : 0.0,
},
{
"temp" : 14.779257384,
"depth" : 1.0,
},
{
"temp" : 14.3940253165,
"depth" : 2.0,
},
{
"temp" : 13.9225795455,
"depth" : 3.0,
},
{
"temp" : 13.8214431818,
"depth" : 4.0,
},
{
"temp" : 13.6899553571,
"depth" : 5.0,
}
]}
The database has information about depth n metres in water, and the temperature on given depth. This is stored in the "diveValues" array. I have been successful on averaging on all depths between to dates, both monthly average and daily average. What I'm having a serious issue with is to get the average between to depths, say between 1 and 4 metres, for every month the last 6 months.
Here is an example of average temperature for each month from January to June, for all depths:
db.collection.aggregate(
[
{$unwind:"$diveValues"},
{$match:
{'startdatetime':
{$gt:new ISODate("2016-01-10T06:00:29Z"),
$lt:new ISODate("2016-06-10T06:00:29Z")}
}
},
{$group:
{_id:
{ year: { $year: "$startdatetime" },
month: { $month: "$startdatetime" }},
avgTemp: { $avg: "$diveValues.temp" }}
},
{$sort:{_id:1}}
]
)
Resulting in:
{ "_id" : { "year" : 2016, "month" : 1 }, "avgTemp" : 7.575706502958313 }
{ "_id" : { "year" : 2016, "month" : 3 }, "avgTemp" : 6.85037457740135 }
{ "_id" : { "year" : 2016, "month" : 4 }, "avgTemp" : 7.215702831902588 }
{ "_id" : { "year" : 2016, "month" : 5 }, "avgTemp" : 9.153453683614638 }
{ "_id" : { "year" : 2016, "month" : 6 }, "avgTemp" : 11.497953009390237 }
Now, I can not seem to figure out how to get average temperature between 1 and 4 metres for the same period.
I have been trying to group the values by wanted depths, but have not managed it - more often than not ending up with bad syntax. Also, if I'm not wrong, the $match pipeline would return all depths as long as the dive has values for 1 and 4 metres, so that will not work.
With the find() tool I am using $slice to return the values I intend from the array - but have not been successful along with the aggregate() function.
Is there a way to solve this? Thanks in advance, much appreciated!

You'd need to place your $match pipeline before $unwind to optimize your aggregation operation as doing an $unwind operation on the whole collection could potentially cause some performance issues since it produces a copy of each document per array entry and that uses more memory (possible memory cap on aggregation pipelines of 10% total memory) thus takes "time" to produce the flattened arrays as well as "time" to process it. Hence it's better to limit the number of documents getting into the pipeline to be flattened.
db.collection.aggregate([
{
"$match": {
"startdatetime": {
"$gt": new ISODate("2016-01-10T06:00:29Z"),
"$lt": new ISODate("2016-06-10T06:00:29Z")
},
"diveValues.depth": { "$gte": 1, "$lte": 4 }
}
},
{ "$unwind": "$diveValues" },
{ "$match": { "diveValues.depth": { "$gte": 1, "$lte": 4 } } },
{
"$group": {
"_id": {
"year": { "$year": "$startdatetime" },
"month": { "$month": "$startdatetime" }
},
"avgTemp": { "$avg": "$diveValues.temp" }
}
}
])
If you want results to contain the average temps for all depths and for the 1-4 depth range, then you would need to run this pipeline which would use the $cond tenary operator to feed the $avg operator the accumulated temperatures within a group based on the depth range:
db.collection.aggregate([
{
"$match": {
"startdatetime": {
"$gt": new ISODate("2016-01-10T06:00:29Z"),
"$lt": new ISODate("2016-06-10T06:00:29Z")
}
}
},
{ "$unwind": "$diveValues" },
{
"$group": {
"_id": {
"year": { "$year": "$startdatetime" },
"month": { "$month": "$startdatetime" }
},
"avgTemp": { "$avg": "$diveValues.temp" },
"avgTempDepth1-4": {
"$avg": {
"$cond": [
{
"$and": [
{ "$gte": [ "$diveValues.depth", 1 ] },
{ "$lte": [ "$diveValues.depth", 4 ] }
]
},
"$diveValues.temp",
null
]
}
}
}
}
])

First of all, the date $match operator should be used at the beginning of the pipeline so that indexes can be used.
Now, to the question, you just need to filter the depth interval like you did with the dates:
db.col.aggregate([
{"$match": {
'startdatetime': {
"$gt": new ISODate("2016-01-10T06:00:29Z"),
"$lt": new ISODate("2016-11-10T06:00:29Z")
}
}},
{"$unwind": "$diveValues"},
{"$match": {
"diveValues.depth": {
"$gte": 1.0,
"$lt": 4.0
}
}},
{"$group": {
"_id": {
"year": {"$year": "$startdatetime" },
"month": {"$month": "$startdatetime" }
},
"avgTemp": { "$avg": "$diveValues.temp" }}
}
])
This will give you the average only for the chosen depth interval.

Related

Agregating by field value on MongoDB

I have a collection composed of documents similar to one below:
{
"_id" : ObjectId("5dc916a72440b14b3f0ec096"),
"date" : ISODate("2019-11-11T11:07:03.968+03:00"),
"actions" : [
{
"type" : "Type1",
"action" : true
},
{
"type" : "Type2",
"action" : true
},
{
"type" : "Type3",
"action" : false
}
]
}
I am trying to count all the action types based on the boolean value of the actions.action property.
This is how I came so far:
db.Actions.aggregate(
{
$group: {
_id: {
year: { $year: "$date" },
month: { $month: "$date" },
day: { $dayOfMonth: "$date" },
},
count: { $sum: 1 }
}
}
);
As you can see this only gives me the count of the documents in the collection grouped by the action date.
What I need is something like this:
{
"_id" : {
"year" : 2019,
"month" : 10,
"day" : 13
},
"Type1": 300,
"Type2": 200,
"Type3": 120,
"count" : 305
}
Is this possible with a query or should I go in the direction of creating a cursor and agregating the values with it?
db.Actions.aggregate([
// Unwind to de-normalize the array
{ "$unwind": "$actions" },
// Group on both day and "type"
{ "$group": {
"_id": {
"date": {
"$toDate": {
"$subtract": [
{ "$toLong": "$date" },
{ "$mod": [{ "$toLong": { "$toDate": "$date" } }, 1000 * 60 * 60 * 24 ] }
]
}
},
"type": "$actions.type"
},
"total": { "$sum": { "$toLong": "$actions.action" } }
}},
// Roll-up the grouping to just by "day"
{ "$group": {
"_id": "$_id.date",
"data": { "$push": { "k": "$_id.type", "v": "$total" } }
}},
// Convert to key/value output
{ "$replaceRoot": {
"newRoot": {
"$mergeObjects": [
{ "_id": "$_id", "count": { "$sum": "$data.v" } },
{ "$arrayToObject": "$data" }
]
}
}}
])
To summarize:
The $unwind is needed simply because you want to "group" on a value which is inside an array of a document. Using this "de-normalizes", or essentially makes each array element into a new document for the same property and all other "parent" properties of the document in which that array resides. In simple speak, you get a "copy" of the containing document for every array member as a new document.
The next $group basically uses a "Date math" approach to rounding to a singular day. This is a bit prettier than methods like $year and $month etc, and actually returns a Date object, which you client language of choice will understand.
Of course this is a compound grouping key, meaning that the other part is of course the type field from the array of actions. And since you only want true results to count, we apply $toLong again in order to translate the Boolean into a numeric value to $sum ( which basically means "count" when it's 0 or 1 ). In older releases you could also do this using $cond, but the simple type conversion is a lot more simple to read for intent.
The rest of this is basically about translating to the expected "key/value"* output of the question. Really, you got the desired result in the very first $group operation but to be "key/value" you need to put all those results into an array ( by "date" of course ) using $push, and then convert that array into the root document using the $arrayToObject function.

Group by day with Multiple Date Fields

I have documents stored into MongoDB like this :
{
"_id" : "XBpNKbdGSgGfnC2MJ",
"po" : 72134185,
"machine" : 40940,
"location" : "02A01",
"inDate" : ISODate("2017-07-19T06:10:13.059Z"),
"requestDate" : ISODate("2017-07-19T06:17:04.901Z"),
"outDate" : ISODate("2017-07-19T06:30:34Z")
}
And I want give the sum, by day, of inDate and outDate.
I can retrieve of both side the sum of documents by inDate day and, on other side, the sum of documents by outDate, but I would like the sum of each.
Currently, I use this pipeline :
$group: {
_id: {
yearA: { $year: '$inDate' },
monthA: { $month: '$inDate' },
dayA: { $dayOfMonth: '$inDate' },
},
count: { $sum: 1 },
},
and I give :
{ "_id" : { "year" : 2017, "month" : 7, "day" : 24 }, "count" : 1 }
{ "_id" : { "year" : 2017, "month" : 7, "day" : 21 }, "count" : 11 }
{ "_id" : { "year" : 2017, "month" : 7, "day" : 19 }, "count" : 20 }
But I would like, if it's possible :
{ "_id" : { "year" : 2017, "month" : 7, "day" : 24 }, "countIn" : 1, "countOut" : 4 }
{ "_id" : { "year" : 2017, "month" : 7, "day" : 21 }, "countIn" : 11, "countOut" : 23 }
{ "_id" : { "year" : 2017, "month" : 7, "day" : 19 }, "countIn" : 20, "countOut" : 18 }
Any idea ?
Many thanks :-)
You can also split the documents at the source, by essentially combining each value into an array of entries by "type" for "in" and "out". You can do this simply using $map and $cond to select the fields, then $unwind the array and then determine which field to "count" again by inspecting with $cond:
collection.aggregate([
{ "$project": {
"dates": {
"$filter": {
"input": {
"$map": {
"input": [ "in", "out" ],
"as": "type",
"in": {
"type": "$$type",
"date": {
"$cond": {
"if": { "$eq": [ "$$type", "in" ] },
"then": "$inDate",
"else": "$outDate"
}
}
}
}
},
"as": "dates",
"cond": { "$ne": [ "$$dates.date", null ] }
}
}
}},
{ "$unwind": "$dates" },
{ "$group": {
"_id": {
"year": { "$year": "$dates.date" },
"month": { "$month": "$dates.date" },
"day": { "$dayOfMonth": "$dates.date" }
},
"countIn": {
"$sum": {
"$cond": {
"if": { "$eq": [ "$dates.type", "in" ] },
"then": 1,
"else": 0
}
}
},
"countOut": {
"$sum": {
"$cond": {
"if": { "$eq": [ "$dates.type", "out" ] },
"then": 1,
"else": 0
}
}
}
}}
])
That's a safe way to do this that does not risk breaking the BSON limit, no matter what size of data you send at it.
Personally I would rather run as separate processes and "combine" the aggregated results separately, but that would depend on the environment you are running in, which is not mentioned in the question.
For an example of "parallel" execution, you can structure in Meteor somewhere along these lines:
import { Meteor } from 'meteor/meteor';
import { Source } from '../imports/source';
import { Target } from '../imports/target';
Meteor.startup(async () => {
// code to run on server at startup
await Source.remove({});
await Target.remove({});
console.log('Removed');
Source.insert({
"_id" : "XBpNKbdGSgGfnC2MJ",
"po" : 72134185,
"machine" : 40940,
"location" : "02A01",
"inDate" : new Date("2017-07-19T06:10:13.059Z"),
"requestDate" : new Date("2017-07-19T06:17:04.901Z"),
"outDate" : new Date("2017-07-19T06:30:34Z")
});
console.log('Inserted');
await Promise.all(
["In","Out"].map( f => new Promise((resolve,reject) => {
let cursor = Source.rawCollection().aggregate([
{ "$match": { [`${f.toLowerCase()}Date`]: { "$exists": true } } },
{ "$group": {
"_id": {
"year": { "$year": `$${f.toLowerCase()}Date` },
"month": { "$month": `$${f.toLowerCase()}Date` },
"day": { "$dayOfYear": `$${f.toLowerCase()}Date` }
},
[`count${f}`]: { "$sum": 1 }
}}
]);
cursor.on('data', async (data) => {
cursor.pause();
data.date = data._id;
delete data._id;
await Target.upsert(
{ date: data.date },
{ "$set": data }
);
cursor.resume();
});
cursor.on('end', () => resolve('done'));
cursor.on('error', (err) => reject(err));
}))
);
console.log('Mapped');
let targets = await Target.find().fetch();
console.log(targets);
});
Which is essentially going to output to the target collection as was mentioned in comments like:
{
"_id" : "XdPGMkY24AcvTnKq7",
"date" : {
"year" : 2017,
"month" : 7,
"day" : 200
},
"countIn" : 1,
"countOut" : 1
}
Riiiight. I came up with the following query. Admittedly, I have seen simpler and nicer ones in my life but it certainly gets the job done:
db.getCollection('test').aggregate
(
{
$facet: // split aggregation into two pipelines
{
"in": [
{ "$match": { "inDate": { "$ne": null } } }, // get rid of null values
{ $group: { "_id": { "y": { "$year": "$inDate" }, "m": { "$month": "$inDate" }, "d": { "$dayOfMonth": "$inDate" } }, "cIn": { $sum : 1 } } }, // compute sum per inDate
],
"out": [
{ "$match": { "outDate": { "$ne": null } } }, // get rid of null values
{ $group: { "_id": { "y": { "$year": "$outDate" }, "m": { "$month": "$outDate" }, "d": { "$dayOfMonth": "$outDate" } }, "cOut": { $sum : 1 } } }, // compute sum per outDate
]
}
},
{ $project: { "result": { $setUnion: [ "$in", "$out" ] } } }, // merge results into new array
{ $unwind: "$result" }, // unwind array into individual documents
{ $replaceRoot: { newRoot: "$result" } }, // get rid of the additional field level
{ $group: { _id: { year: "$_id.y", "month": "$_id.m", "day": "$_id.d" }, "countIn": { $sum: "$cIn" }, "countOut": { $sum: "$cOut" } } } // group into final result
)
As always with MongoDB aggregations you can get an idea of what's going on by simply reducing the projection stages step by step starting from the end of the query.
EDIT:
As you can see in the comments below there was a bit of a discussion around document size limits and the general applicability of this solution.
So let's look at those aspects in greater detail and let's also compare the performance of the $facet based solution to the one based on $map (suggested by #NeilLunn to avoid potential document size issues).
I created 2 million test records that have random dates assigned to both the "inDate" and the "outDate" field:
{
"_id" : ObjectId("597857e0fa37b3f66959571a"),
"inDate" : ISODate("2016-07-29T22:00:00.000Z"),
"outDate" : ISODate("1988-07-14T22:00:00.000Z")
}
The data range covered was from 01.01.1970 all the way to 01.01.2050, that's a total of 29220 distinct days. Given the random distribution of the 2 million test records across this time range both queries can be expected to return the full 29220 possible results (which both did).
Then I ran both queries five times after restarting my single MongoDB instance freshly and the results in milliseconds I got looked like this:
$facet: 5663, 5400, 5380, 5460, 5520
$map: 9648, 9134, 9058, 9085, 9132
I also measured the size of the single document returned by the facet stage which was 3.19MB so reasonably far away from the MongoDB document size limit (16MB at the time of writing) which, however, only applies to the result document anyway and wouldn't be a problem during pipeline processing.
Bottom line: If you want performance, use the solution suggested here. Be careful about the document size limit, though, in particular if your use case is not the exact one described in the question above (e.g. when you need to collect even more/bigger data). Also, I am not sure if in a sharded scenario both solutions still expose the same performance characteristics...

Group and count over a start and end range

If I have data in the following format:
[
{
_id: 1,
startDate: ISODate("2017-01-1T00:00:00.000Z"),
endDate: ISODate("2017-02-25T00:00:00.000Z"),
type: 'CAR'
},
{
_id: 2,
startDate: ISODate("2017-02-17T00:00:00.000Z"),
endDate: ISODate("2017-03-22T00:00:00.000Z"),
type: 'HGV'
}
]
Is it possible to retrieve data grouped by 'type', but also with a count of the type for each of month in a given date range e.g. between 2017/1/1 to 2017/4/1 would return:
[
{
_id: 'CAR',
monthCounts: [
/*January*/
{
from: ISODate("2017-01-1T00:00:00.000Z"),
to: ISODate("2017-01-31T23:59:59.999Z"),
count: 1
},
/*February*/
{
from: ISODate("2017-02-1T00:00:00.000Z"),
to: ISODate("2017-02-28T23:59:59.999Z"),
count: 1
},
/*March*/
{
from: ISODate("2017-03-1T00:00:00.000Z"),
to: ISODate("2017-03-31T23:59:59.999Z"),
count: 0
},
]
},
{
_id: 'HGV',
monthCounts: [
{
from: ISODate("2017-01-1T00:00:00.000Z"),
to: ISODate("2017-01-31T23:59:59.999Z"),
count: 0
},
{
from: ISODate("2017-02-1T00:00:00.000Z"),
to: ISODate("2017-02-28T23:59:59.999Z"),
count: 1
},
{
from: ISODate("2017-03-1T00:00:00.000Z"),
to: ISODate("2017-03-31T23:59:59.999Z"),
count: 1
},
]
}
]
The returned format is not really important, but what I am trying to achieve is in a single query to retrieve a number of counts for the same grouping (one per month). The input could be simply a start and end date to report from or more likely it could be an array of the date ranges to group by.
The algorithm for this is to basically "iterate" values between the interval of the two values. MongoDB has a couple of ways to deal with this, being what has always been present with mapReduce() and with new features available to the aggregate() method.
I'm going expand on your selection to deliberately show an overlapping month since your examples did not have one. This will result in the "HGV" values appearing in "three" months of output.
{
"_id" : 1,
"startDate" : ISODate("2017-01-01T00:00:00Z"),
"endDate" : ISODate("2017-02-25T00:00:00Z"),
"type" : "CAR"
}
{
"_id" : 2,
"startDate" : ISODate("2017-02-17T00:00:00Z"),
"endDate" : ISODate("2017-03-22T00:00:00Z"),
"type" : "HGV"
}
{
"_id" : 3,
"startDate" : ISODate("2017-02-17T00:00:00Z"),
"endDate" : ISODate("2017-04-22T00:00:00Z"),
"type" : "HGV"
}
Aggregate - Requires MongoDB 3.4
db.cars.aggregate([
{ "$addFields": {
"range": {
"$reduce": {
"input": { "$map": {
"input": { "$range": [
{ "$trunc": {
"$divide": [
{ "$subtract": [ "$startDate", new Date(0) ] },
1000
]
}},
{ "$trunc": {
"$divide": [
{ "$subtract": [ "$endDate", new Date(0) ] },
1000
]
}},
60 * 60 * 24
]},
"as": "el",
"in": {
"$let": {
"vars": {
"date": {
"$add": [
{ "$multiply": [ "$$el", 1000 ] },
new Date(0)
]
},
"month": {
}
},
"in": {
"$add": [
{ "$multiply": [ { "$year": "$$date" }, 100 ] },
{ "$month": "$$date" }
]
}
}
}
}},
"initialValue": [],
"in": {
"$cond": {
"if": { "$in": [ "$$this", "$$value" ] },
"then": "$$value",
"else": { "$concatArrays": [ "$$value", ["$$this"] ] }
}
}
}
}
}},
{ "$unwind": "$range" },
{ "$group": {
"_id": {
"type": "$type",
"month": "$range"
},
"count": { "$sum": 1 }
}},
{ "$sort": { "_id": 1 } },
{ "$group": {
"_id": "$_id.type",
"monthCounts": {
"$push": { "month": "$_id.month", "count": "$count" }
}
}}
])
The key to making this work is the $range operator which takes values for a "start" and and "end" as well as an "interval" to apply. The result is an array of values taken from the "start" and incremented until the "end" is reached.
We use this with startDate and endDate to generate the possible dates in between those values. You will note that we need to do some math here since the $range only takes a 32-bit integer, but we can take the milliseconds away from the timestamp values so that is okay.
Because we want "months", the operations applied extract the month and year values from the generated range. We actually generate the range as the "days" in between since "months" are difficult to deal with in math. The subsequent $reduce operation takes only the "distinct months" from the date range.
The result therefore of the first aggregation pipeline stage is a new field in the document which is an "array" of all the distinct months covered between startDate and endDate. This gives an "iterator" for the rest of the operation.
By "iterator" I mean than when we apply $unwind we get a copy of the original document for every distinct month covered in the interval. This then allows the following two $group stages to first apply a grouping to the common key of "month" and "type" in order to "total" the counts via $sum, and next $group makes the key just the "type" and puts the results in an array via $push.
This gives the result on the above data:
{
"_id" : "HGV",
"monthCounts" : [
{
"month" : 201702,
"count" : 2
},
{
"month" : 201703,
"count" : 2
},
{
"month" : 201704,
"count" : 1
}
]
}
{
"_id" : "CAR",
"monthCounts" : [
{
"month" : 201701,
"count" : 1
},
{
"month" : 201702,
"count" : 1
}
]
}
Note that the coverage of "months" is only present where there is actual data. Whilst possible to produce zero values over a range, it requires quite a bit of wrangling to do so and is not very practical. If you want zero values then it is better to add that in post processing in the client once the results have been retrieved.
If you really have your heart set on the zero values, then you should separately query for $min and $max values, and pass these in to "brute force" the pipeline into generating the copies for each supplied possible range value.
So this time the "range" is made externally to all documents, and you then use a $cond statement into the accumulator to see if the current data is within the grouped range produced. Also since the generation is "external", we really don't need the MongoDB 3.4 operator of $range, so this can be applied to earlier versions as well:
// Get min and max separately
var ranges = db.cars.aggregate(
{ "$group": {
"_id": null,
"startRange": { "$min": "$startDate" },
"endRange": { "$max": "$endDate" }
}}
).toArray()[0]
// Make the range array externally from all possible values
var range = [];
for ( var d = new Date(ranges.startRange.valueOf()); d <= ranges.endRange; d.setUTCMonth(d.getUTCMonth()+1)) {
var v = ( d.getUTCFullYear() * 100 ) + d.getUTCMonth()+1;
range.push(v);
}
// Run conditional aggregation
db.cars.aggregate([
{ "$addFields": { "range": range } },
{ "$unwind": "$range" },
{ "$group": {
"_id": {
"type": "$type",
"month": "$range"
},
"count": {
"$sum": {
"$cond": {
"if": {
"$and": [
{ "$gte": [
"$range",
{ "$add": [
{ "$multiply": [ { "$year": "$startDate" }, 100 ] },
{ "$month": "$startDate" }
]}
]},
{ "$lte": [
"$range",
{ "$add": [
{ "$multiply": [ { "$year": "$endDate" }, 100 ] },
{ "$month": "$endDate" }
]}
]}
]
},
"then": 1,
"else": 0
}
}
}
}},
{ "$sort": { "_id": 1 } },
{ "$group": {
"_id": "$_id.type",
"monthCounts": {
"$push": { "month": "$_id.month", "count": "$count" }
}
}}
])
Which produces the consistent zero fills for all possible months on all groupings:
{
"_id" : "HGV",
"monthCounts" : [
{
"month" : 201701,
"count" : 0
},
{
"month" : 201702,
"count" : 2
},
{
"month" : 201703,
"count" : 2
},
{
"month" : 201704,
"count" : 1
}
]
}
{
"_id" : "CAR",
"monthCounts" : [
{
"month" : 201701,
"count" : 1
},
{
"month" : 201702,
"count" : 1
},
{
"month" : 201703,
"count" : 0
},
{
"month" : 201704,
"count" : 0
}
]
}
MapReduce
All versions of MongoDB support mapReduce, and the simple case of the "iterator" as mentioned above is handled by a for loop in the mapper. We can get output as generated up to the first $group from above by simply doing:
db.cars.mapReduce(
function () {
for ( var d = this.startDate; d <= this.endDate;
d.setUTCMonth(d.getUTCMonth()+1) )
{
var m = new Date(0);
m.setUTCFullYear(d.getUTCFullYear());
m.setUTCMonth(d.getUTCMonth());
emit({ id: this.type, date: m},1);
}
},
function(key,values) {
return Array.sum(values);
},
{ "out": { "inline": 1 } }
)
Which produces:
{
"_id" : {
"id" : "CAR",
"date" : ISODate("2017-01-01T00:00:00Z")
},
"value" : 1
},
{
"_id" : {
"id" : "CAR",
"date" : ISODate("2017-02-01T00:00:00Z")
},
"value" : 1
},
{
"_id" : {
"id" : "HGV",
"date" : ISODate("2017-02-01T00:00:00Z")
},
"value" : 2
},
{
"_id" : {
"id" : "HGV",
"date" : ISODate("2017-03-01T00:00:00Z")
},
"value" : 2
},
{
"_id" : {
"id" : "HGV",
"date" : ISODate("2017-04-01T00:00:00Z")
},
"value" : 1
}
So it does not have the second grouping to compound to arrays, but we did produce the same basic aggregated output.

MongoDB Aggregation pipeline problems

I'm new to mongoDB and am having difficulty getting my head around aggregation pipelines.
I have created a database that holds information regarding my stock trading. In a cut down version one document from my portfolio collection looks a bit like this
{
"date" : 2015-12-31T15:50:00.000Z,
"time" : 1550,
"aum" : 1000000,
"basket" :[
{
"_id" : "Microsoft",
"shares" : 10,
"price" : 56.53,
"fx" : 1.0
},
.
.
.
{
"_id" : "GOOG.N",
"shares" : 20,
"price" : 759.69,
"fx" : 1.0
}
]
So, for each day, I keep track of my assets under management (aum) and a list of all the positions I hold with the current price. What I need to do is to calculate the daily net and gross exposure for the portfolio as a percentage of aum. Net exposure is simply:
sum(shares*price*fx)/aum
over all the stocks. Gross exposure is:
abs(shares*price*fx)/aum
(a negative position means a short position). I need to do this as a single query using the aggregation framework. I have tried numbers of queries but none seem to work so clearly I'm just wandering around in the dark. Can anyone give some guidance?
My query looks like this
db.strategy.aggregate(
// Pipeline
[
// Stage 1
{
$project: {
"_id": 0,
"date":1,
"time":1,
"aum":1,
"strategyName":1,
"gExposure": {$divide: ["$grossExposure","$aum"]}
}
},
// Stage 2
{
$group: {
_id :{ date:"$date",time:"$time",strategyName:"$strategyName"},
grossExposure: { $sum: { $abs: {$multiply: [ "$basket.sysCurShares","$basket.price","$basket.fx" ] } }}
}
},
// Stage 3
{
$sort: {
"_id.date": 1, "_id.time": 1, "_id.strategyName": 1
}
}
]
);
The query runs but the calculated value is zero. My projection isn't working as I'd expect either as I would like all the data flattened to a two dimensional table.
Since the basket field is an array, you need to flatten it using $unwind before running the $group aggregate operation. Also, create a new field in the $project that holds the exposure before the $group pipeline. Continuing from your previous attempt, you could try the following pipeline:
db.strategy.aggregate([
{ "$unwind": "$basket" },
{
"$project": {
"date": 1,
"time": 1,
"strategyName": 1,
"exposure": {
"$multiply": ["$basket.sysCurShares", "$basket.price", "$basket.fx"]
}
}
},
{
"$group": {
"_id": {
"date": "$date",
"time": "$time",
"strategyName": "$strategyName"
},
"totalExposure": { "$sum": "$exposure" },
"aum": { "$first": "$aum" }
}
},
{
"$project": {
"_id": 0,
"date": "$_id.date",
"time": "$_id.time",
"strategyName": "$_id.strategyName",
"netExposure": { "$divide": ["$totalExposure", "$aum"] },
"grossExposure": {
"$abs": { "$divide": ["$totalExposure", "$aum"] }
}
}
},
{ "$sort": { "date": 1, "time": 1, "strategyName": 1 } }
]);
you can do the same with mongodb 3.4 in single stage
db.strategy.aggregate([
{
$project:{
"date": 1,
"time": 1,
"strategyName": 1,
"netExposure":{ "$divide": [{"$reduce":{input:"$basket",initialValue:0,in:{$add:[{$multiply: ["$$this.fx","$$this.shares","$$this.price"]},"$$value"]}}}, "$aum"] },
"grossExposure":{"$abs":{ "$divide": [{"$reduce":{input:"$basket",initialValue:0,in:{$add:[{$multiply: ["$$this.fx","$$this.shares","$$this.price"]},"$$value"]}}}, "$aum"] }}
},
{ "$sort": { "date": 1, "time": 1, "strategyName": 1 } }
]);

Mongo aggregation within intervals of time

I have some log data stored in a mongo collection that includes basic information as a request_id and the time it was added to the collection, for example:
{
"_id" : ObjectId("55ae6ea558a5d3fe018b4568"),
"request_id" : "030ac9f1-aa13-41d1-9ced-2966b9a6g5c3",
"time" : ISODate("2015-07-21T16:00:00.00Z")
}
I was wondering if I could use the aggregation framework to aggregate some statistical data. I would like to get the counts of the objects created within each interval of N minutes for the last X hours.
So the output which I need for 10 minutes intervals for the last 1 hour should be something like the following:
{ "_id" : 0, "time" : ISODate("2015-07-21T15:00:00.00Z"), "count" : 67 }
{ "_id" : 0, "time" : ISODate("2015-07-21T15:10:00.00Z"), "count" : 113 }
{ "_id" : 0, "time" : ISODate("2015-07-21T15:20:00.00Z"), "count" : 40 }
{ "_id" : 0, "time" : ISODate("2015-07-21T15:30:00.00Z"), "count" : 10 }
{ "_id" : 0, "time" : ISODate("2015-07-21T15:40:00.00Z"), "count" : 32 }
{ "_id" : 0, "time" : ISODate("2015-07-21T15:50:00.00Z"), "count" : 34 }
I would use that to get data for graphs.
Any advice is appreciated!
There are a couple of ways of approaching this depending on which output format best suits your needs. The main note is that with the "aggregation framework" itself, you cannot actually return something "cast" as a date, but you can get values that are easily reconstructed into a Date object when processing results in your API.
The first approach is to use the "Date Aggregation Operators" available to the aggregation framework:
db.collection.aggregate([
{ "$match": {
"time": { "$gte": startDate, "$lt": endDate }
}},
{ "$group": {
"_id": {
"year": { "$year": "$time" },
"dayOfYear": { "$dayOfYear": "$time" },
"hour": { "$hour": "$time" },
"minute": {
"$subtract": [
{ "$minute": "$time" },
{ "$mod": [ { "$minute": "$time" }, 10 ] }
]
}
},
"count": { "$sum": 1 }
}}
])
Which returns a composite key for _id containing all the values you want for a "date". Alternately if just within an "hour" always then just use the "minute" part and work out the actual date based on the startDate of your range selection.
Or you can just use plain "Date math" to get the milliseconds since "epoch" which can again be fed to a date contructor directly.
db.collection.aggregate([
{ "$match": {
"time": { "$gte": startDate, "$lt": endDate }
}},
{ "$group": {
"_id": {
"$subtract": [
{ "$subtract": [ "$time", new Date(0) ] },
{ "$mod": [
{ "$subtract": [ "$time", new Date(0) ] },
1000 * 60 * 10
]}
]
},
"count": { "$sum": 1 }
}}
])
In all cases what you do not want to do is use $project before actually applying $group. As a "pipeline stage", $project must "cycle" though all documents selected and "transform" the content.
This takes time, and adds to the execution total of the query. You can simply just apply to the $group directly as has been shown.
Or if you are really "pure" about a Date object being returned without post processing, then you can always use "mapReduce", since the JavaScript functions actually allow recasting as a date, but slower than the aggregation framework and of course without a cursor response:
db.collection.mapReduce(
function() {
var date = new Date(
this.time.valueOf()
- ( this.time.valueOf() % ( 1000 * 60 * 10 ) )
);
emit(date,1);
},
function(key,values) {
return Array.sum(values);
},
{ "out": { "inline": 1 } }
)
Your best bet is using aggregation though, as transforming the response is quite easy:
db.collection.aggregate([
{ "$match": {
"time": { "$gte": startDate, "$lt": endDate }
}},
{ "$group": {
"_id": {
"year": { "$year": "$time" },
"dayOfYear": { "$dayOfYear": "$time" },
"hour": { "$hour": "$time" },
"minute": {
"$subtract": [
{ "$minute": "$time" },
{ "$mod": [ { "$minute": "$time" }, 10 ] }
]
}
},
"count": { "$sum": 1 }
}}
]).forEach(function(doc) {
doc._id = new Date(doc._id);
printjson(doc);
})
And then you have your interval grouping output with real Date objects.
Something like this?
pipeline = [
{"$project":
{"date": {
"year": {"$year": "$time"},
"month": {"$month": "$time"},
"day": {"$dayOfMonth": "$time"},
"hour": {"$hour": "$time"},
"minute": {"$subtract": [
{"$minute": "$time"},
{"$mod": [{"$minute": "$time"}, 10]}
]}
}}
},
{"$group": {"_id": "$date", "count": {"$sum": 1}}}
]
Example:
> db.foo.insert({"time": new Date(2015, 7, 21, 22, 21)})
> db.foo.insert({"time": new Date(2015, 7, 21, 22, 23)})
> db.foo.insert({"time": new Date(2015, 7, 21, 22, 45)})
> db.foo.insert({"time": new Date(2015, 7, 21, 22, 33)})
> db.foo.aggregate(pipeline)
and output:
{ "_id" : { "year" : 2015, "month" : 8, "day" : 21, "hour" : 20, "minute" : 40 }, "count" : 1 }
{ "_id" : { "year" : 2015, "month" : 8, "day" : 21, "hour" : 20, "minute" : 20 }, "count" : 2 }
{ "_id" : { "year" : 2015, "month" : 8, "day" : 21, "hour" : 20, "minute" : 30 }, "count" : 1 }
a pointer in lieu of a concrete answer. you can very easily do it for minutes, hours and given periods using the date aggregations . every 10 minutes will be a bit trickier but likely possible with some wrangling. nevertheless, the aggregation will be slow as nuts on large data sets.
i would suggest extracting the minutes post-insert
{
"_id" : ObjectId("55ae6ea558a5d3fe018b4568"),
"request_id" : "030ac9f1-aa13-41d1-9ced-2966b9a6g5c3",
"time" : ISODate("2015-07-21T16:00:00.00Z"),
"minutes": 16
}
and even though it sounds utterly absurd adding quartiles and sextiles or whatever that N might be.
{
"_id" : ObjectId("55ae6ea558a5d3fe018b4568"),
"request_id" : "030ac9f1-aa13-41d1-9ced-2966b9a6g5c3",
"time" : ISODate("2015-07-21T16:00:00.00Z"),
"minutes": 16,
"quartile: 1,
"sextile: 2,
}
first try doing a $div on the minutes. doesnt do ceil and floor. but check out
Is there a floor function in Mongodb aggregation framework?