I have a MongoDB Collection for weather data with each document consisting about 50 different weather parameters fields. Simple Example below:
{
"wind":7,
"swell":6,
"temp":32,
...
"50th_field":32
}
If I only need one field from all documents, say temp, my query would be this:
db.weather.find({},{ temp: 1})
So internally, does MongoDB has to fetch the entire document for just 1 field which was requested(projected)? Wouldn't it be an expensive operation?
I tried MongoDB Compass to benchmark timings, but the time required was <1ms so couldn't figure out.
MonogDB will read all data, however only field temp (and _id) will be transmitted over your network to the client. In case your document are rather big, then the over all performance should be better when you project only the fields you need to get.
Yes. This is how to avoid it:
create an index on temp
Use find(Temp)
turn off _id (necessary).
Run:
db.coll.find({ temp:{ $ne:null }},{ temp:1, _id:0 })`
{} triggers collscan because the algorithm tries to match the query fields with project
With {temp}, {temp, _id:0} it says: "Oh, I only need temp".
It should also be smart to tell that {}, {temp, _id:0} only needs index, but it's not.
Basically using projection with limiting fields is always faster then fetch full document, You can even use the covered index to avoid examining the documents(no disk IO) the archive better performance.
Check the executionStats of demo below, the totalDocsExamined was 0! but you must remove the _id field in projection because it's not included in index.
See also:
https://docs.mongodb.com/manual/core/query-optimization/#covered-query
> db.test.insertOne({name: 'TJT'})
{
"acknowledged" : true,
"insertedId" : ObjectId("5faa0c8469dffee69357dde3")
}
> db.test.createIndex({name: 1})
{
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
db.test.explain('executionStats').find({name: 'TJT'}, {_id: 0, name: 1})
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "memo.test",
"indexFilterSet" : false,
"parsedQuery" : {
"name" : {
"$eq" : "TJT"
}
},
"winningPlan" : {
"stage" : "PROJECTION",
"transformBy" : {
"_id" : 0,
"name" : 1
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"name" : 1
},
"indexName" : "name_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"name" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"name" : [
"[\"TJT\", \"TJT\"]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1,
"executionTimeMillis" : 0,
"totalKeysExamined" : 1,
"totalDocsExamined" : 0,
"executionStages" : {
"stage" : "PROJECTION",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"transformBy" : {
"_id" : 0,
"name" : 1
},
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1,
"executionTimeMillisEstimate" : 0,
"works" : 2,
"advanced" : 1,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"name" : 1
},
"indexName" : "name_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"name" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"name" : [
"[\"TJT\", \"TJT\"]"
]
},
"keysExamined" : 1,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
}
}
Related
I have two env mongodbs,
the difference between them is:
test mongodb version: 3.2.20 , prod mongodb version : 4.0.18
and test env query plan first stage is Limit , however the other is Sort.
in my test env, it's very quick and totalDocsExamined == limit
they both hit the index:
{
"v" : 1,
"key" : {
"appIds" : 1,
"ctime" : -1,
"background" : 1
},
"name" : "appIds_1_ctime_-1_background_1",
"ns" : "newsmine.newstoapp"
}
query: db.newstoapp.find({"appIds":{"$in":[999]}}).sort({"ctime":-1}).limit(10).explain('executionStats')
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "newsmine.newstoapp",
"indexFilterSet" : false,
"parsedQuery" : {
"appIds" : {
"$in" : [
999
]
}
},
"winningPlan" : {
"stage" : "LIMIT",
"limitAmount" : 10,
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"appIds" : 1,
"ctime" : -1,
"background" : 1
},
"indexName" : "appIds_1_ctime_-1_background_1",
"isMultiKey" : true,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"appIds" : [
"[999.0, 999.0]"
],
"ctime" : [
"[MaxKey, MinKey]"
],
"background" : [
"[MinKey, MaxKey]"
]
}
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 10,
"executionTimeMillis" : 0,
"totalKeysExamined" : 10,
"totalDocsExamined" : 10,
"executionStages" : {
"stage" : "LIMIT",
"nReturned" : 10,
"executionTimeMillisEstimate" : 0,
"works" : 11,
"advanced" : 10,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"limitAmount" : 10,
"inputStage" : {
"stage" : "FETCH",
"nReturned" : 10,
"executionTimeMillisEstimate" : 0,
"works" : 10,
"advanced" : 10,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 0,
"invalidates" : 0,
"docsExamined" : 10,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 10,
"executionTimeMillisEstimate" : 0,
"works" : 10,
"advanced" : 10,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 0,
"invalidates" : 0,
"keyPattern" : {
"appIds" : 1,
"ctime" : -1,
"background" : 1
},
"indexName" : "appIds_1_ctime_-1_background_1",
"isMultiKey" : true,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"appIds" : [
"[999.0, 999.0]"
],
"ctime" : [
"[MaxKey, MinKey]"
],
"background" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 10,
"dupsTested" : 10,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
}
},
"serverInfo" : {
"host" : "",
"port" : ,
"version" : "3.2.20",
"gitVersion" : "a7a144f40b70bfe290906eb33ff2714933544af8"
},
"ok" : 1
}
in my prod env, it's getting slow query
query: datamongo:PRIMARY> db.newstoapp.find({"appIds":{"$in":[1460]}}).sort({"ctime":-1}).limit(10).explain('executionStats')
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "newsmine.newstoapp",
"indexFilterSet" : false,
"parsedQuery" : {
"appIds" : {
"$eq" : 1460
}
},
"winningPlan" : {
"stage" : "SORT",
"sortPattern" : {
"ctime" : -1
},
"limitAmount" : 10,
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"appIds" : 1,
"ctime" : -1,
"background" : 1
},
"indexName" : "appIds_1_ctime_-1_background_1",
"isMultiKey" : true,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"appIds" : [
"[1460.0, 1460.0]"
],
"ctime" : [
"[MaxKey, MinKey]"
],
"background" : [
"[MinKey, MaxKey]"
]
}
}
}
}
},
"rejectedPlans" : [
{
"stage" : "SORT",
"sortPattern" : {
"ctime" : -1
},
"limitAmount" : 10,
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"appIds" : 1
},
"indexName" : "appIds_1",
"isMultiKey" : true,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"appIds" : [
"[1460.0, 1460.0]"
]
}
}
}
}
},
{
"stage" : "LIMIT",
"limitAmount" : 10,
"inputStage" : {
"stage" : "FETCH",
"filter" : {
"appIds" : {
"$eq" : 1460
}
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"ctime" : 1
},
"indexName" : "ctime_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "backward",
"indexBounds" : {
"ctime" : [
"[MaxKey, MinKey]"
]
}
}
}
}
]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 10,
"executionTimeMillis" : 40,
"totalKeysExamined" : 405,
"totalDocsExamined" : 405,
"executionStages" : {
"stage" : "SORT",
"nReturned" : 10,
"executionTimeMillisEstimate" : 3,
"works" : 418,
"advanced" : 10,
"needTime" : 407,
"needYield" : 0,
"saveState" : 9,
"restoreState" : 9,
"isEOF" : 1,
"invalidates" : 0,
"sortPattern" : {
"ctime" : -1
},
"memUsage" : 8471,
"memLimit" : 33554432,
"limitAmount" : 10,
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"nReturned" : 405,
"executionTimeMillisEstimate" : 3,
"works" : 407,
"advanced" : 405,
"needTime" : 1,
"needYield" : 0,
"saveState" : 9,
"restoreState" : 9,
"isEOF" : 1,
"invalidates" : 0,
"inputStage" : {
"stage" : "FETCH",
"nReturned" : 405,
"executionTimeMillisEstimate" : 3,
"works" : 406,
"advanced" : 405,
"needTime" : 0,
"needYield" : 0,
"saveState" : 9,
"restoreState" : 9,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 405,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 405,
"executionTimeMillisEstimate" : 1,
"works" : 406,
"advanced" : 405,
"needTime" : 0,
"needYield" : 0,
"saveState" : 9,
"restoreState" : 9,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"appIds" : 1,
"ctime" : -1,
"background" : 1
},
"indexName" : "appIds_1_ctime_-1_background_1",
"isMultiKey" : true,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"appIds" : [
"[1460.0, 1460.0]"
],
"ctime" : [
"[MaxKey, MinKey]"
],
"background" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 405,
"seeks" : 1,
"dupsTested" : 405,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
}
}
},
"serverInfo" : {
"host" : "",
"port" : ,
"version" : "4.0.18",
"gitVersion" : "6883bdfb8b8cff32176b1fd176df04da9165fd67"
},
"ok" : 1,
"operationTime" : Timestamp(1629988625, 146),
"$clusterTime" : {
"clusterTime" : Timestamp(1629988625, 146),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
i followed Joe, changed my index, ctime before appIds.
but it does not work very well. there still be slow log for my new sql. it's hard to find out why it is slow
2021-10-19T22:38:16.918+0800 I COMMAND [conn2434281164] command newsmine.newstoapp command: find { find: "newstoapp", filter: { appIds: { $elemMatch: { $in: [ 2433 ] } }, ctime: { $gte: 0 } }, sort: { ctime: -1 }, hint: { ctime: -1, appIds: 1 }, skip: 0, limit: 50, batchSize: 50, $readPreference: { mode: "secondaryPreferred" }, $db: "newsmine" } planSummary: IXSCAN { ctime: -1, appIds: 1 } keysExamined:1471582 docsExamined:50 cursorExhausted:1 numYields:11496 nreturned:50 reslen:34043 locks:{ Global: { acquireCount: { r: 11497 } }, Database: { acquireCount: { r: 11497 } }, Collection: { acquireCount: { r: 11497 } } } storage:{ data: { bytesRead: 44958, timeReadingMicros: 618 } } protocol:op_query 7038ms
The cause of the slowness is that MongoDB 4.0.18 hash a blocking sort stage, so all matching documents must be found, retrieved, and sorted in memory before returning the requested batch.
In prior versions of MongoDB it was found that under certain conditions using a multi-key index to support a sort would provide incorrect result.
I never fully understood these conditions or why the results were incorrect, so if you are able to find those details, please edit or comment.
Prior to MongoDB 3.4 the index metadata contained a boolean value to indicate whether or not the index was multi-key (indexed a field that contained an array for at least one document).
MongoDB 3.4 introduced a new index version that also keeps track of which fields in the index are multi-key.
MongoDB 3.6 introduced a change to sorting to avoid the situations where results would be incorrect. This is why your query has a sort stage and is taking longer.
There are a couple things you could try to get back to the previous behavior without a blocking sort:
Drop and rebuild the index.
The existing index is version 1, which does not track multi-key paths. When rebuilding, the index should be created at version 2, which does track these, and may permit the query executor to use the index for sorting.
Create a new index with ctime before appIds.
A multi-key index has an entry in the index for each value in the indexed array. This may cause the query planner to assume it will disrupt sorting on a following key.
An index on {ctime:-1, appIds:1, background:1} would place the sort key ahead of the multi-key field, and while this may require reading more of the index, it may also permit the query executor to use the index for sorting.
I have around 10 millions document in MongoDB.
I'm trying to search for text inside the db db.outMessage.find({ "text" : /.*m.*/}) but it took too long (around 30 second) with no result, but if I search for existing text it took less than a second.
I tried to put index on text with same result.
db.outMessage.find({ "text" : /.*m.*/}).explain(true)
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "notification_center.outMessage",
"indexFilterSet" : false,
"parsedQuery" : {
"text" : {
"$regex" : ".*m.*"
}
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"filter" : {
"text" : {
"$regex" : ".*m.*"
}
},
"keyPattern" : {
"text" : 1
},
"indexName" : "text",
"isMultiKey" : false,
"multiKeyPaths" : {
"text" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"text" : [
"[\"\", {})",
"[/.*m.*/, /.*m.*/]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 0,
"executionTimeMillis" : 14354,
"totalKeysExamined" : 10263270,
"totalDocsExamined" : 0,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 0,
"executionTimeMillisEstimate" : 12957,
"works" : 10263271,
"advanced" : 0,
"needTime" : 10263270,
"needYield" : 0,
"saveState" : 80258,
"restoreState" : 80258,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 0,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"filter" : {
"text" : {
"$regex" : ".*m.*"
}
},
"nReturned" : 0,
"executionTimeMillisEstimate" : 12461,
"works" : 10263271,
"advanced" : 0,
"needTime" : 10263270,
"needYield" : 0,
"saveState" : 80258,
"restoreState" : 80258,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"text" : 1
},
"indexName" : "text",
"isMultiKey" : false,
"multiKeyPaths" : {
"text" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"text" : [
"[\"\", {})",
"[/.*m.*/, /.*m.*/]"
]
},
"keysExamined" : 10263270,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
},
"allPlansExecution" : [ ]
},
"serverInfo" : {
"host" : "acsdptest.arabiacell.net",
"port" : 27017,
"version" : "3.4.7",
"gitVersion" : "cf38c1b8a0a8dca4a11737581beafef4fe120bcd"
},
The index will essentially be a list of all the values of the text field, in lexicographical order, i.e. sorted by the first letter.
Since the query executor has no way to predict which values might contain an 'm', it must examine all of the index entries.
In the case of this query, that means 10,263,270 index keys were examined, after being read from disk if the index was not already in the cache.
If this is actually a keyword search and not a single-letter match, instead of $regex, you might be able to make use of the $text query operator, which requires a text index
I have a huge collection (1000 Million) and I would like to search and remove the records older than a timestamp.
I have an index created on the field lastUpdatedTime
db.MyCol.remove({"lastUpdatedTime" : {$lt: ISODate("2016-10-06 00:00:00 AM") }})
The above remove query timeouts and I modified to use BulkOperation as well.
Failed execution of command 'delete' with id 4334 on connection
'connectionId{localValue:13, serverValue:22}' to server 'XXXXX:27017'
with exception 'com.mongodb.MongoSocketReadTimeoutException: Timeout
while receiving message'
I understand mongo doesn't support limit in remove yet. So, I am implementing something like below
//Read 10K records
BasicDBObject query = new BasicDBObject();
query.append("lastUpdatedTime",
new BasicDBObject("$lte", new Timestamp(cal.getTimeInMillis())));
DBCursor cursorDocBuilder = myCol.find(query).limit(10000);
// Get Ids
BasicDBList inList = new BasicDBList();
while (cursorDocBuilder.hasNext())
{
inList.add(cursorDocBuilder.next().get("_id"));
}
//construct In clause
BasicDBObject deleteQuery = new BasicDBObject();
deleteQuery.put("_id", new BasicDBObject(MongoOps.$IN, inList));
WriteResult result =myCol.remove(deleteQuery);
What would be a good number to remove using $IN clause?
Will it better to fire multiple remove statements instead a big one with many IN clause?
I think this is a everyday situation to delete the top N records in a database. Is there a better way of achieving this?
P.S : I can do multiple threads to clean up. I don't want to throttle the database as I anticipate high read /write operations to the same collection.
Adding the explain() for fetching 1000 records
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "XXX",
"indexFilterSet" : false,
"parsedQuery" : {
"lastUpdatedTime" : {
"$lt" : ISODate("2016-10-06T00:00:00Z")
}
},
"winningPlan" : {
"stage" : "LIMIT",
"limitAmount" : 1000,
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"lastUpdatedTime" : 1
},
"indexName" : "lastUpdatedTime_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"lastUpdatedTime" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"lastUpdatedTime" : [
"(true, new Date(1475712000000))"
]
}
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1000,
"executionTimeMillis" : 200,
"totalKeysExamined" : 1000,
"totalDocsExamined" : 1000,
"executionStages" : {
"stage" : "LIMIT",
"nReturned" : 1000,
"executionTimeMillisEstimate" : 201,
"works" : 1001,
"advanced" : 1000,
"needTime" : 0,
"needYield" : 0,
"saveState" : 10,
"restoreState" : 10,
"isEOF" : 1,
"invalidates" : 0,
"limitAmount" : 1000,
"inputStage" : {
"stage" : "FETCH",
"nReturned" : 1000,
"executionTimeMillisEstimate" : 201,
"works" : 1000,
"advanced" : 1000,
"needTime" : 0,
"needYield" : 0,
"saveState" : 10,
"restoreState" : 10,
"isEOF" : 0,
"invalidates" : 0,
"docsExamined" : 1000,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1000,
"executionTimeMillisEstimate" : 0,
"works" : 1000,
"advanced" : 1000,
"needTime" : 0,
"needYield" : 0,
"saveState" : 10,
"restoreState" : 10,
"isEOF" : 0,
"invalidates" : 0,
"keyPattern" : {
"lastUpdatedTime" : 1
},
"indexName" : "lastUpdatedTime_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"lastUpdatedTime" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"lastUpdatedTime" : [
"(true, new Date(1475712000000))"
]
},
"keysExamined" : 1000,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
}
}
I have been running a mongo test server for a few weeks now and everything has been going good. I had a query to get a count of deleted files and it was running in the sub 1 second range for 140 million documents (44 gb of data). Today, all of the sudden that same query is taking a while. When running explain on the find query it shows it is using the index that is in place but it still takes a long time. The weird thing is, is that this isn't the only query that appears to do this. If I run a query that returns 1 item, it's almost instantaneous. When its returning a set of documents it takes forever (multiple minutes). Below is the query and the explain output from Mongo shell. I've removed the rejected plan to save space, but can put it back if needed. I'm new to mongo so any help is greatly appreciated. I have another query that I've been waiting on for 24 minutes that is just a search on an EventID field which is an indexed field.
db.EventEntries.find({IsDelete:true, TimeCreated:{$gte:ISODate('2017-06-30')}}).explain('executionStats')
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "Events.EventEntries",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"IsDelete" : {
"$eq" : true
}
},
{
"TimeCreated" : {
"$gte" : ISODate("2017-06-30T00:00:00Z")
}
}
]
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"IsDelete" : 1,
"TimeCreated" : 1
},
"indexName" : "IsDelete_1_TimeCreated_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"IsDelete" : [ ],
"TimeCreated" : [
"TimeCreated"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"IsDelete" : [
"[true, true]"
],
"TimeCreated" : [
"[new Date(1498780800000), new Date(9223372036854775807)]"
]
}
}
},
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 138570,
"executionTimeMillis" : 177933,
"totalKeysExamined" : 138570,
"totalDocsExamined" : 138570,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 138570,
"executionTimeMillisEstimate" : 177834,
"works" : 138571,
"advanced" : 138570,
"needTime" : 0,
"needYield" : 0,
"saveState" : 8887,
"restoreState" : 8887,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 138570,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 138570,
"executionTimeMillisEstimate" : 561,
"works" : 138571,
"advanced" : 138570,
"needTime" : 0,
"needYield" : 0,
"saveState" : 8887,
"restoreState" : 8887,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"IsDelete" : 1,
"TimeCreated" : 1
},
"indexName" : "IsDelete_1_TimeCreated_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"IsDelete" : [ ],
"TimeCreated" : [
"TimeCreated"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"IsDelete" : [
"[true, true]"
],
"TimeCreated" : [
"[new Date(1498780800000), new Date(9223372036854775807)]"
]
},
"keysExamined" : 138570,
"seeks" : 1,
"dupsTested" : 138570,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "",
"port" : ,
"version" : "3.4.4",
"gitVersion" : "888390515874a9debd1b6c5d36559ca86b44babd"
},
"ok" : 1
}
I am having difficulty persuading Mongo to run a distinct query that looks like it should be covered by the indexes without fetching a large number of documents in the collection.
My documents have the general form:
{
_tenantId: 'someString',
_productCategory: 'some string from a smallish set'
...
}
I have an index on (_tenantId, _productCategory).
I want to find out what the set of distinct product categories is for a given tenant, so the query is:
db.products.distinct( '_productCategory', { _tenantId: '463171c3-d15f-4699-893d-3046327f8e1f'})
This runs rather slowly (several seconds for a collection of around half a million products against a local DB, which is Mongo 3.2.9). Against our pre-production SaaS-based Mongo (which is probably more memory constrained than my local instance which has free run of my machine) it take several 10s of seconds for the same data.
Explaining the query yields:
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "engage-prod.products",
"indexFilterSet" : false,
"parsedQuery" : {
"_tenantId" : {
"$eq" : "463171c3-d15f-4699-893d-3046327f8e1f"
}
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"_tenantId" : 1,
"_productCategory" : 1
},
"indexName" : "_tenantId_1__productCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_tenantId" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1f\"]"
],
"_productCategory" : [
"[MinKey, MaxKey]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 406871,
"executionTimeMillis" : 358,
"totalKeysExamined" : 406871,
"totalDocsExamined" : 406871,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 406871,
"executionTimeMillisEstimate" : 80,
"works" : 406872,
"advanced" : 406871,
"needTime" : 0,
"needYield" : 0,
"saveState" : 3178,
"restoreState" : 3178,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 406871,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 406871,
"executionTimeMillisEstimate" : 40,
"works" : 406872,
"advanced" : 406871,
"needTime" : 0,
"needYield" : 0,
"saveState" : 3178,
"restoreState" : 3178,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"_tenantId" : 1,
"_productCategory" : 1
},
"indexName" : "_tenantId_1__productCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_tenantId" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1f\"]"
],
"_productCategory" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 406871,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "Stevens-MacBook-Pro.local",
"port" : 27017,
"version" : "3.2.9",
"gitVersion" : "22ec9e93b40c85fc7cae7d56e7d6a02fd811088c"
},
"ok" : 1
}
Note that even though it runs an IXSCAN it still returns over 400K documents (nReturned).
If I create a compound field _tenantAndProductCategory containing a lexical concatenation (with a : separator) and index that so it's a single field index, then the query:
db.products.explain('executionStats').distinct( '_productTenantAndCategory', { _productTenantAndCategory: {$gte: '463171c3-d15f-4699-893d-3046327f8e1f',$lt: '463171c3-d15f-4699-893d-3046327f8e1g'}})
works entirely within the index and yields:
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "engage-prod.products",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"_productTenantAndCategory" : {
"$lt" : "463171c3-d15f-4699-893d-3046327f8e1g"
}
},
{
"_productTenantAndCategory" : {
"$gte" : "463171c3-d15f-4699-893d-3046327f8e1f"
}
}
]
},
"winningPlan" : {
"stage" : "PROJECTION",
"transformBy" : {
"_id" : 0,
"_productTenantAndCategory" : 1
},
"inputStage" : {
"stage" : "DISTINCT_SCAN",
"keyPattern" : {
"_productTenantAndCategory" : 1
},
"indexName" : "_productTenantAndCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_productTenantAndCategory" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1g\")"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 62,
"executionTimeMillis" : 0,
"totalKeysExamined" : 63,
"totalDocsExamined" : 0,
"executionStages" : {
"stage" : "PROJECTION",
"nReturned" : 62,
"executionTimeMillisEstimate" : 0,
"works" : 63,
"advanced" : 62,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"transformBy" : {
"_id" : 0,
"_productTenantAndCategory" : 1
},
"inputStage" : {
"stage" : "DISTINCT_SCAN",
"nReturned" : 62,
"executionTimeMillisEstimate" : 0,
"works" : 63,
"advanced" : 62,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"_productTenantAndCategory" : 1
},
"indexName" : "_productTenantAndCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_productTenantAndCategory" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1g\")"
]
},
"keysExamined" : 63
}
}
},
"serverInfo" : {
"host" : "Stevens-MacBook-Pro.local",
"port" : 27017,
"version" : "3.2.9",
"gitVersion" : "22ec9e93b40c85fc7cae7d56e7d6a02fd811088c"
},
"ok" : 1
}
Having to build single field indexes with manually compounded keys for all the aggregation queries I need is not a very desirable path to follow. Since all the information is present in the compound index I started with, why can't Mongo execute the original distinct query with cover by that index? Is there anything I can do to overcome this in the way of query optimization?
Note This is actually a sub-problem of a slightly more complex one involving an aggregation pipeline to actually count the number of occurrences of each category, but I am restricting my question for now to the simpler distinct query since it seems to capture the essence of failure to use an index that should cover things (which I was also seeing in the aggregation pipeline case), while being a simpler overall query.