Mongodb print count of unique values from multiple fields - mongodb

I got the following documents of a collection (let's name it myCollection):
{
"_id": {
"$oid": "601a75a0c9a338f09f238816"
},
"Sample": "lie50",
"Chromosome": "chr10",
"Position": {
"$numberLong": "47663"
},
"Reference": "C",
"Mutation": "T",
"Run": "Run_test",
"SYMBOL": "TUBB8"
},
{
"_id": {
"$oid": "601a75a0c9a338f09f238817"
},
"Sample": "lie50",
"Chromosome": "chr10",
"Position": {
"$numberLong": "47876"
},
"Reference": "T",
"Mutation": "C",
"Run": "Run_test",
"SYMBOL": "TUBB8"
},
{
"_id": {
"$oid": "601a75a0c9a338f09f238818"
},
"Sample": "lie50",
"Chromosome": "chr10",
"Position": {
"$numberLong": "48005"
},
"Reference": "G",
"Mutation": "A",
"Run": "Run_test",
"SYMBOL": "TUBB8"
},
{
"_id": {
"$oid": "601a75a0c9a338f09f238819"
},
"Sample": "lie12",
"Chromosome": "chr10",
"Position": {
"$numberLong": "48005"
},
"Reference": "G",
"Mutation": "A",
"Run": "Run_test",
"SYMBOL": "TUBB8"
}
I am interested in printing the distinct count of the values from the fields Chromosome, Position, Reference, and Mutation. This means to count the unique fields of the following entries:
"Chromosome": "chr10", "Position": 47663, "Reference": "C", "Mutation": "T"
"Chromosome": "chr10", "Position": 47876, "Reference": "T", "Mutation": "C"
"Chromosome": "chr10", "Position": 48005, "Reference": "G", "Mutation": "A"
"Chromosome": "chr10", "Position": 48005, "Reference": "G", "Mutation": "A"
which should be 3 distinct rows.
I have checked multiple questions like this one on how to print the distinct values for one field or using $unwind/$project.
For the latter, I thought why not concatenate the 4 fields and then print the number using length with $unwind/$project?
I managed to get that far:
db.myCollection.aggregate(
[
{
$group:
{
_id: null,
newfield: {
$addToSet:
{
$concat:
[
"$Chromosome",
"_",
{"$toString":"$Position"},
"_",
"$Reference",
"_",
"$Mutation"
]
}
}
}
},
{
$unwind: "$newfield"
},
{
$project: { _id: 0 }
}
]).length
However, adding .length to this query does not return anything but without returns:
{ "newfield" : "chr10_47663_C_T" }
{ "newfield" : "chr10_47876_T_C" }
{ "newfield" : "chr10_48005_G_A" }
For information, my actual data contains 2 billion documents.

The fields should pass in _id in $group stage, and also use $count stage to get total elements instead of returning all documents,
db.myCollection.aggregate([
{
$group: {
_id: {
Chromosome: "$Chromosome",
Position: "$Position",
Reference: "$Reference",
Mutation: "$Mutation"
}
}
},
{ $count: "count" }
])
Playground

Related

Calculate running total across for different groups by day

I'm trying to aggreate a collection of transactions into a running total of owners by day.
The initial collection looks like this:
[
{ "to": "A", "from": "0", "ts": 1 },
{ "to": "A", "from": "0", "ts": 1 },
{ "to": "B", "from": "0", "ts": 1 },
{ "to": "B", "from": "0", "ts": 2 },
{ "to": "C", "from": "0", "ts": 3 },
{ "to": "A", "from": "B", "ts": 4 }
]
What I would like to get is something like this:
[
{
"ts": 1,
"holdings": [
{ "owner": "0", "holdings": -3 },
{ "owner": "A", "holdings": 2 },
{ "owner": "B", "holdings": 1 }
]
},
{
"ts": 2,
"holdings": [
{ "owner": "0", "holdings": -4 },
{ "owner": "A", "holdings": 2 },
{ "owner": "B", "holdings": 2 }
]
},
{
"ts": 4,
"holdings": [
{ "owner": "0", "holdings": -5 },
{ "owner": "A", "holdings": 3 },
{ "owner": "B", "holdings": 1 },
{ "owner": "C", "holdings": 1 }
]
}
]
I've already understood how to generate this for a single ts that I'm setting, but I don't know how to do it across all ts.
The aggregation pipeline for a single ts looks like this:
db.collection.aggregate([
// start with: { "to": "A", "from": "0", "ts": 1 }
{
// create a doc with an array with subset of fields:
// { "_id": ObjectId("5a934e000102030405000000"),
// "data": [ { "change": 1, "owner": "A", "ts": "1" },
// { "change": -1, "owner": "0", "ts": "1" } ] }
$project: {
data: [
{
owner: '$to',
ts: '$ts',
change: 1,
},
{
owner: '$from',
ts: '$ts',
change: -1,
},
],
},
},
{
// unwind the array into 2 docs:
// { "_id": ObjectId("5a934e000102030405000000"), "data": { "change": 1, "owner": "A", "ts": "1" } },
// { "_id": ObjectId("5a934e000102030405000000"), "data": { "change": -1, "owner": "0", "ts": "1" } },
$unwind: '$data',
},
{
// use data as root:
// { "data": { "change": 1, "owner": "A", "ts": "1" } },
// { "data": { "change": -1, "owner": "0", "ts": "1" } }
$replaceRoot: {
newRoot: '$data',
},
},
{
// select day to calc totals
$match: {
ts: {
$lt: 6,
},
},
},
{
// sum totals, grouped by owner
$group: {
_id: '$owner',
//_id: null,
holdings: {
$sum: '$change',
},
},
},
])
This gives the correct result for a particular day (selected in the match stage). I don't understand how I can now generalize that to all days.
One way to do it is using $setWindowFields, which has a built-in accumulation:
db.collection.aggregate([
{
$project: {
ts: "$ts",
data: [{owner: "$to", change: 1}, {owner: "$from", change: -1}]
}
},
{$unwind: "$data"},
{
$group: {
_id: {ts: "$ts", owner: "$data.owner"},
holdings: {$sum: "$data.change"}
}
},
{
$setWindowFields: {
partitionBy: "$_id.owner",
sortBy: {"_id.ts": 1},
output: {
cumulativeHoldings: {
$sum: "$holdings",
window: {documents: ["unbounded", "current"]}
}
}
}
},
{
$group: {
_id: "$_id.ts",
holdings: {$push: {owner: "$_id.owner", holdings: "$cumulativeHoldings"}}
}
}
])
Playground

Group by an optional field in mongodb

I would like to independently group the results of an or clause, including overlap. The data set is rather large so running 2 queries sequentially will result in an undesirable wait time. I am hoping I can somehow project which clause returned the corresponding data. Given this data set:
[
{
"_id": 1,
"item": "abc",
"name": "Michael",
"price": NumberDecimal("10"),
"quantity": NumberInt("2"),
"date": ISODate("2014-03-01T08:00:00Z")
},
{
"_id": 2,
"item": "jkl",
"name": "Toby",
"price": NumberDecimal("20"),
"quantity": NumberInt("1"),
"date": ISODate("2014-03-01T09:00:00Z")
},
{
"_id": 3,
"item": "xyz",
"name": "Keith",
"price": NumberDecimal("5"),
"quantity": NumberInt("10"),
"date": ISODate("2014-03-15T09:00:00Z")
},
{
"_id": 4,
"item": "abc",
"name": "Dwight",
"price": NumberDecimal("5"),
"quantity": NumberInt("20"),
"date": ISODate("2014-04-04T11:21:39.736Z")
},
{
"_id": 5,
"item": "abc",
"name": "Ryan",
"price": NumberDecimal("10"),
"quantity": NumberInt("10"),
"date": ISODate("2014-04-04T21:23:13.331Z")
},
{
"_id": 6,
"item": "def",
"name": "Jim",
"price": NumberDecimal("7.5"),
"quantity": NumberInt("5"),
"date": ISODate("2015-06-04T05:08:13Z")
},
{
"_id": 7,
"item": "abc",
"name": "Keith",
"price": NumberDecimal("7.5"),
"quantity": NumberInt("10"),
"date": ISODate("2015-09-10T08:43:00Z")
},
{
"_id": 8,
"item": "abc",
"name": "Michael",
"price": NumberDecimal("10"),
"quantity": NumberInt("5"),
"date": ISODate("2016-02-06T20:20:13Z")
},
]
I would like to receive this result:
[{
"_id": {
"name": "Keith"
},
"count": 2
},
{
"_id": {
"item": "abc",
},
"count": 5
}]
Here is what I have tried so far:
db.collection.aggregate([
{
$match: {
$or: [
{
item: "abc"
},
{
name: "Keith"
}
]
}
},
{
$group: {
_id: {
item: "$item",
name: "$name"
},
count: {
$sum: 1
}
}
}
])
You can use $facet to get multiple aggregation pipelines into the same stage in this way:
Using $facet there are two "outputs" one group by name and other by item.
In each one there are multiple stages:
First $match to process only documents you want.
Then $group with _id name or item, and $count to get the total.
db.collection.aggregate([
{
"$facet": {
"groupByName": [
{
"$match": {"name": "Keith"}
},
{
"$group": {"_id": "$name","count": {"$sum": 1}}
}
],
"groupByItem": [
{
"$match": {"item": "abc"}
},
{
"$group": {"_id": "$item","count": {"$sum": 1}}
}
]
}
}
])
Example here
The output is:
{
"groupByItem": [
{
"_id": "abc",
"count": 5
}
],
"groupByName": [
{
"_id": "Keith",
"count": 2
}
]
}
Here it is:
mongos> db.n.aggregate([ { $facet:{ names:[ {$match:{name:"Keith"}} , {$group:{_id:{name:"$name"}, count:{$sum:1}}} ] , items:[ {$match:{item:"abc"}},{ $group:{_id:{item:"$item"}, count:{$sum:1}} } ] } } , {$project:{ "namesANDitems":{$concatArrays:[ "$names","$items" ]} }} ,{$unwind:"$namesANDitems"} ,{$replaceRoot:{newRoot:"$namesANDitems"} } ]).pretty()
{ "_id" : { "name" : "Keith" }, "count" : 2 }
{ "_id" : { "item" : "abc" }, "count" : 5 }
mongos>
explained:
You create two pipes via $facet
Match in every facet pipe what you need to group pipe1=names , pipe2=items
Join the arrays from the two pipes in single array named "namesANDitems"
Convert the array to object with $unwind
Remove the temporary object name namesANDitems so you have only the two objects as requested

Combining unique elements of arrays without $unwind

I would like to get the unique elements of all arrays in a collection. Consider the following collection
[
{
"collection": "collection",
"myArray": [
{
"name": "ABC",
"code": "AB"
},
{
"name": "DEF",
"code": "DE"
}
]
},
{
"collection": "collection",
"myArray": [
{
"name": "GHI",
"code": "GH"
},
{
"name": "DEF",
"code": "DE"
}
]
}
]
I can achieve this by using $unwind and $group like this:
db.collection.aggregate([
{
$unwind: "$myArray"
},
{
$group: {
_id: null,
data: {
$addToSet: "$myArray"
}
}
}
])
And get the output:
[
{
"_id": null,
"data": [
{
"code": "GH",
"name": "GHI"
},
{
"code": "DE",
"name": "DEF"
},
{
"code": "AB",
"name": "ABC"
}
]
}
]
However, the array "myArray" will have a lot of elements (about 6) and the number of documents passed into this stage of the pipeline will be about 600. So unwinding the array would give me a total of 3600 documents being processed. I would like to know if there's a way for me to achieve the same result without unwinding
You can use below aggregation
db.collection.aggregate([
{ "$group": {
"_id": null,
"data": { "$push": "$myArray" }
}},
{ "$project": {
"data": {
"$reduce": {
"input": "$data",
"initialValue": [],
"in": { "$setUnion": ["$$this", "$$value"] }
}
}
}}
])
Output
[
{
"_id": null,
"data": [
{
"code": "AB",
"name": "ABC"
},
{
"code": "DE",
"name": "DEF"
},
{
"code": "GH",
"name": "GHI"
}
]
}
]

Group by in MongoDB and get all matching documents with latest date of each group

Below is the data that I have in my DB:
{ "School": "A",
"Class": "A",
"Student": "XXX",
"Date": ISODate("2018-01-31T00:00:00.000Z")
},
{ "School": "A",
"Class": "B",
"Student": "YYY",
"Date": ISODate("2018-01-10T00:00:00.000Z")
},
{ "School": "A",
"Class": "C",
"Student": "ZZZ",
"Date": ISODate("2018-01-31T00:00:00.000Z")
},
{ "School": "B",
"Class": "A",
"Student": "ABC",
"Date": ISODate("2019-01-31T00:00:00.000Z")
},
{ "School": "B",
"Class": "B",
"Student": "DEF",
"Date": ISODate("2019-01-31T00:00:00.000Z")
},
{ "School": "B",
"Class": "C",
"Student": "GHI",
"Date": ISODate("2019-02-03T00:00:00.000Z")
}
My objective is to get all the documents in each 'School' group with the latest "Date" in each 'School' Group, not the latest "Date' of the whole DB.
Expected Result:
{ "School": "A",
"Class": "A",
"Student": "XXX",
"Date": ISODate("2018-01-31T00:00:00.000Z")
},
{ "School": "A",
"Class": "C",
"Student": "ZZZ",
"Date": ISODate("2018-01-31T00:00:00.000Z")
},
{ "School": "B",
"Class": "C",
"Student": "GHI",
"Date": ISODate("2019-02-03T00:00:00.000Z")
}
I have tried using
db.myDB.aggregate([
{ "$sort": {
"School":1,
"Date":1
}
},
{ "$group": {"_id": {School:"$School"},
"fullDocument": {
$push:
{School: "$School", Class: "$Class", Date: $Date"}
},
"LatestDate": {"$max": "$Date"}
}
}
])
What I get in 'fullDocument' still includes all documents not with the latest 'Date'
First, you need to group on the basis of School and get the entire document for a particular school in an array by using $ROOT
Then you just need to get the documents having Date as the max date in that array using filter
db.collection.aggregate([
{
$group: {
_id: "$School",
data: {
"$push": "$$ROOT"
}
}
},
{
$project: {
data: {
$filter: {
input: "$data",
as: "item",
cond: {
$eq: [
"$$item.Date",
{
$max: "$data.Date"
}
]
}
}
}
}
}
])

MongoDB projection. Operator $add field|expression array awareness or after $slice

I've got collection that looks like:
[{
"org": "A",
"type": "simple",
"payFor": 3,
"price": 100
},
{
"org": "A",
"type": "custom",
"payFor": 2,
"price": 115
},
{
"org": "B",
"type": "simple",
"payFor": 1,
"price": 110
},
{
"org": "B",
"type": "custom",
"payFor": 2,
"price": 200
},
{
"org": "B",
"type": "custom",
"payFor": 4,
"price": 220
}]
And need to produce result with query to perform group by "org" where payments appears for only first "payFor" prices in "type".
I'm trying to use expression result by $slice operator in $add but this is not works.
pipeline:
[{
"$group": {
"_id": {
"org": "$org",
"type": "$type"
},
"payFor": {
"$max": "$payFor"
},
"count": {
"$sum": 1
},
"prices": {
"$push": "$price"
}
}
},
{
"$group": {
"_id": "$_id.org",
"payments": {
"$push": {
"type": "$_id.type",
"forFirst": "$payFor",
"sum": {
"$cond": [
{
"$gte": [
"$payFor",
"$count"
]
},
{
"$add": {
"$prices": {
"$slice": "$count"
}
}
},
{
"$add": "$prices"
}
]
}
}
}
}
}]
I know that it is possible to traverse unwinded prices and pick only "payFor" count of them. but result collections are more rich than in example above and this operation will produce some unecessary overheads.
Need some advice from community. Please. Thanks.