Related
I have a Mongo collection which hold millions of IoT device data.
The structure of the document is like this :
{ ObjectID:"...", device:"DEVICE3", topic:"TEMP", vhost:"client1", date:ISODate("2017-08-23T08:00:00.000Z"), value:23.5 }
I have a Rest API with a request that finds all the devices for one specific vhost.
The request looks like that : db.data.distinct("device", { vhost:"client1" })
I added an index on vhost and device : db.data.createIndex( { vhost:1, device:1 }) but it is still a lot of examined documents. What kind of index can I use to optimize the request ?
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 15848881,
"executionTimeMillis" : 42425,
"totalKeysExamined" : 15848881,
"totalDocsExamined" : 15848881,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 15848881,
"executionTimeMillisEstimate" : 36240,
"works" : 15848882,
"advanced" : 15848881,
"needTime" : 0,
"needYield" : 0,
"saveState" : 123949,
"restoreState" : 123949,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 15848881,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 15848881,
"executionTimeMillisEstimate" : 9890,
"works" : 15848882,
"advanced" : 15848881,
"needTime" : 0,
"needYield" : 0,
"saveState" : 123949,
"restoreState" : 123949,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"vhost" : 1,
"device" : 1
},
"indexName" : "vhost_1_device_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"vhost" : [
"[\"client1\", \"client1\"]"
],
"device" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 15848881,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
In final, there is about 30 distinct device.
EDIT :
Here is the queryPlanner as asked :
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "thingsplay.data",
"indexFilterSet" : false,
"parsedQuery" : {
"vhost" : {
"$eq" : "client1"
}
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"vhost" : 1,
"device" : 1
},
"indexName" : "vhost_1_device_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"vhost" : [
"[\"client1\", \"client1\"]"
],
"device" : [
"[MinKey, MaxKey]"
]
}
}
},
"rejectedPlans" : [ ]
},
The result :
[
"F000105",
"F000107",
"F000109",
"F000110",
"F000113",
"F000119",
"F000121",
"F000124",
"F000128",
"F000131",
"F000134",
"F000138",
"F000144",
"F000146",
"F000147",
"F000148",
"F000149",
"F000150",
"F000153",
"F000155",
"F000156",
"F000159",
"F000161",
"F000164",
"F000166",
"F000167",
"F000168",
"F000169",
"F000170",
"F000171",
"F000172",
"F000181",
"F000183",
"F000184",
"F000187",
"F000190",
"F000192",
"F000193",
"F000203",
"F000204",
"F000205",
"F000208",
"F000209",
"F000215",
"F000221",
"F000223",
"F000243",
"F000249",
"F000250",
"F000251",
"F000253",
"F000255",
"S0E190E",
"S0E1A45",
"S0E1AC0",
"SYS_STATUS_ID",
"TS4D9292",
"TS4D9294",
"TS4D9296",
"TS4D9297",
"TS4D9298",
"TS4D9299",
"TS4D929B",
"TS4D929D",
"TS4D929F",
"TS4D92A0",
"TS4D92A2",
"TS4D92A6",
"TS4D92AA",
"TS4D92B1",
"TS4D92B2",
"TS4D92B3",
"TS4D92B4",
"TS4D92C2"
]
I have been running a mongo test server for a few weeks now and everything has been going good. I had a query to get a count of deleted files and it was running in the sub 1 second range for 140 million documents (44 gb of data). Today, all of the sudden that same query is taking a while. When running explain on the find query it shows it is using the index that is in place but it still takes a long time. The weird thing is, is that this isn't the only query that appears to do this. If I run a query that returns 1 item, it's almost instantaneous. When its returning a set of documents it takes forever (multiple minutes). Below is the query and the explain output from Mongo shell. I've removed the rejected plan to save space, but can put it back if needed. I'm new to mongo so any help is greatly appreciated. I have another query that I've been waiting on for 24 minutes that is just a search on an EventID field which is an indexed field.
db.EventEntries.find({IsDelete:true, TimeCreated:{$gte:ISODate('2017-06-30')}}).explain('executionStats')
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "Events.EventEntries",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"IsDelete" : {
"$eq" : true
}
},
{
"TimeCreated" : {
"$gte" : ISODate("2017-06-30T00:00:00Z")
}
}
]
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"IsDelete" : 1,
"TimeCreated" : 1
},
"indexName" : "IsDelete_1_TimeCreated_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"IsDelete" : [ ],
"TimeCreated" : [
"TimeCreated"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"IsDelete" : [
"[true, true]"
],
"TimeCreated" : [
"[new Date(1498780800000), new Date(9223372036854775807)]"
]
}
}
},
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 138570,
"executionTimeMillis" : 177933,
"totalKeysExamined" : 138570,
"totalDocsExamined" : 138570,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 138570,
"executionTimeMillisEstimate" : 177834,
"works" : 138571,
"advanced" : 138570,
"needTime" : 0,
"needYield" : 0,
"saveState" : 8887,
"restoreState" : 8887,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 138570,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 138570,
"executionTimeMillisEstimate" : 561,
"works" : 138571,
"advanced" : 138570,
"needTime" : 0,
"needYield" : 0,
"saveState" : 8887,
"restoreState" : 8887,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"IsDelete" : 1,
"TimeCreated" : 1
},
"indexName" : "IsDelete_1_TimeCreated_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"IsDelete" : [ ],
"TimeCreated" : [
"TimeCreated"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"IsDelete" : [
"[true, true]"
],
"TimeCreated" : [
"[new Date(1498780800000), new Date(9223372036854775807)]"
]
},
"keysExamined" : 138570,
"seeks" : 1,
"dupsTested" : 138570,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "",
"port" : ,
"version" : "3.4.4",
"gitVersion" : "888390515874a9debd1b6c5d36559ca86b44babd"
},
"ok" : 1
}
I am having difficulty persuading Mongo to run a distinct query that looks like it should be covered by the indexes without fetching a large number of documents in the collection.
My documents have the general form:
{
_tenantId: 'someString',
_productCategory: 'some string from a smallish set'
...
}
I have an index on (_tenantId, _productCategory).
I want to find out what the set of distinct product categories is for a given tenant, so the query is:
db.products.distinct( '_productCategory', { _tenantId: '463171c3-d15f-4699-893d-3046327f8e1f'})
This runs rather slowly (several seconds for a collection of around half a million products against a local DB, which is Mongo 3.2.9). Against our pre-production SaaS-based Mongo (which is probably more memory constrained than my local instance which has free run of my machine) it take several 10s of seconds for the same data.
Explaining the query yields:
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "engage-prod.products",
"indexFilterSet" : false,
"parsedQuery" : {
"_tenantId" : {
"$eq" : "463171c3-d15f-4699-893d-3046327f8e1f"
}
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"_tenantId" : 1,
"_productCategory" : 1
},
"indexName" : "_tenantId_1__productCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_tenantId" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1f\"]"
],
"_productCategory" : [
"[MinKey, MaxKey]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 406871,
"executionTimeMillis" : 358,
"totalKeysExamined" : 406871,
"totalDocsExamined" : 406871,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 406871,
"executionTimeMillisEstimate" : 80,
"works" : 406872,
"advanced" : 406871,
"needTime" : 0,
"needYield" : 0,
"saveState" : 3178,
"restoreState" : 3178,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 406871,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 406871,
"executionTimeMillisEstimate" : 40,
"works" : 406872,
"advanced" : 406871,
"needTime" : 0,
"needYield" : 0,
"saveState" : 3178,
"restoreState" : 3178,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"_tenantId" : 1,
"_productCategory" : 1
},
"indexName" : "_tenantId_1__productCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_tenantId" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1f\"]"
],
"_productCategory" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 406871,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "Stevens-MacBook-Pro.local",
"port" : 27017,
"version" : "3.2.9",
"gitVersion" : "22ec9e93b40c85fc7cae7d56e7d6a02fd811088c"
},
"ok" : 1
}
Note that even though it runs an IXSCAN it still returns over 400K documents (nReturned).
If I create a compound field _tenantAndProductCategory containing a lexical concatenation (with a : separator) and index that so it's a single field index, then the query:
db.products.explain('executionStats').distinct( '_productTenantAndCategory', { _productTenantAndCategory: {$gte: '463171c3-d15f-4699-893d-3046327f8e1f',$lt: '463171c3-d15f-4699-893d-3046327f8e1g'}})
works entirely within the index and yields:
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "engage-prod.products",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"_productTenantAndCategory" : {
"$lt" : "463171c3-d15f-4699-893d-3046327f8e1g"
}
},
{
"_productTenantAndCategory" : {
"$gte" : "463171c3-d15f-4699-893d-3046327f8e1f"
}
}
]
},
"winningPlan" : {
"stage" : "PROJECTION",
"transformBy" : {
"_id" : 0,
"_productTenantAndCategory" : 1
},
"inputStage" : {
"stage" : "DISTINCT_SCAN",
"keyPattern" : {
"_productTenantAndCategory" : 1
},
"indexName" : "_productTenantAndCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_productTenantAndCategory" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1g\")"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 62,
"executionTimeMillis" : 0,
"totalKeysExamined" : 63,
"totalDocsExamined" : 0,
"executionStages" : {
"stage" : "PROJECTION",
"nReturned" : 62,
"executionTimeMillisEstimate" : 0,
"works" : 63,
"advanced" : 62,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"transformBy" : {
"_id" : 0,
"_productTenantAndCategory" : 1
},
"inputStage" : {
"stage" : "DISTINCT_SCAN",
"nReturned" : 62,
"executionTimeMillisEstimate" : 0,
"works" : 63,
"advanced" : 62,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"_productTenantAndCategory" : 1
},
"indexName" : "_productTenantAndCategory_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"_productTenantAndCategory" : [
"[\"463171c3-d15f-4699-893d-3046327f8e1f\", \"463171c3-d15f-4699-893d-3046327f8e1g\")"
]
},
"keysExamined" : 63
}
}
},
"serverInfo" : {
"host" : "Stevens-MacBook-Pro.local",
"port" : 27017,
"version" : "3.2.9",
"gitVersion" : "22ec9e93b40c85fc7cae7d56e7d6a02fd811088c"
},
"ok" : 1
}
Having to build single field indexes with manually compounded keys for all the aggregation queries I need is not a very desirable path to follow. Since all the information is present in the compound index I started with, why can't Mongo execute the original distinct query with cover by that index? Is there anything I can do to overcome this in the way of query optimization?
Note This is actually a sub-problem of a slightly more complex one involving an aggregation pipeline to actually count the number of occurrences of each category, but I am restricting my question for now to the simpler distinct query since it seems to capture the essence of failure to use an index that should cover things (which I was also seeing in the aggregation pipeline case), while being a simpler overall query.
Background
I have a collection of users with structure of documents like this:
{
"_id" : ObjectId("54e61137cca5d2ff0a8b4567"),
"login" : "test1",
"emails" : [
{
"email" : "test1#example.com",
"is_primary" : true,
"_id" : ObjectId("57baf3e97323afb2688e639c")
},
{
"email" : "test1_1#example.com",
"is_primary" : false,
"_id" : ObjectId("57baf3e97323afb2688e639d")
}
]
}
Indexes:
{
"v" : 1,
"key" : {
"login" : 1
},
"name" : "login_1",
"ns" : "mydb.users",
"background" : true
},
{
"v" : 1,
"key" : {
"emails.email" : 1
},
"name" : "emails.email_1",
"ns" : "mydb.users"
}
Count of documents is ~700000
Scenario
To explain the search of users by login, I make this:
rs0:PRIMARY> db.users.explain('executionStats').find({'login' : /test123123123/})
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "mydb.users",
"indexFilterSet" : false,
"parsedQuery" : {
"login" : /test123123123/
},
"winningPlan" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"filter" : {
"login" : /test123123123/
},
"keyPattern" : {
"login" : 1
},
"indexName" : "login_1",
"isMultiKey" : false,
"direction" : "forward",
"indexBounds" : {
"login" : [
"[\"\", {})",
"[/test123123123/, /test123123123/]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 0,
"executionTimeMillis" : 1040,
"totalKeysExamined" : 698993,
"totalDocsExamined" : 0,
"executionStages" : {
"stage" : "FETCH",
"nReturned" : 0,
"executionTimeMillisEstimate" : 930,
"works" : 698994,
"advanced" : 0,
"needTime" : 698993,
"needFetch" : 0,
"saveState" : 5460,
"restoreState" : 5460,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 0,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"filter" : {
"login" : /test123123123/
},
"nReturned" : 0,
"executionTimeMillisEstimate" : 920,
"works" : 698993,
"advanced" : 0,
"needTime" : 698993,
"needFetch" : 0,
"saveState" : 5460,
"restoreState" : 5460,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"login" : 1
},
"indexName" : "login_1",
"isMultiKey" : false,
"direction" : "forward",
"indexBounds" : {
"login" : [
"[\"\", {})",
"[/test123123123/, /test123123123/]"
]
},
"keysExamined" : 698993,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0,
"matchTested" : 0
}
}
},
"serverInfo" : {
"host" : "myhost",
"port" : 27017,
"version" : "3.0.12",
"gitVersion" : "33934938e0e95d534cebbaff656cde916b9c3573"
},
"ok" : 1
}
As you can see executionStats.executionStages.inputStage.nReturned is 0 and executionStats.totalDocsExamined is so 0. It's ok, I guess there is no documents with login like entered. But if I want search users by email I'll do next:
rs0:PRIMARY> db.users.explain('executionStats').find({'emails.email' : /test123123123/})
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "mydb.users",
"indexFilterSet" : false,
"parsedQuery" : {
"emails.email" : /test123123123/
},
"winningPlan" : {
"stage" : "FETCH",
"filter" : {
"emails.email" : /test123123123/
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"emails.email" : 1
},
"indexName" : "emails.email_1",
"isMultiKey" : true,
"direction" : "forward",
"indexBounds" : {
"emails.email" : [
"[\"\", {})",
"[/test123123123/, /test123123123/]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 0,
"executionTimeMillis" : 7666,
"totalKeysExamined" : 699016,
"totalDocsExamined" : 698993,
"executionStages" : {
"stage" : "FETCH",
"filter" : {
"emails.email" : /test123123123/
},
"nReturned" : 0,
"executionTimeMillisEstimate" : 7355,
"works" : 699017,
"advanced" : 0,
"needTime" : 699016,
"needFetch" : 0,
"saveState" : 5462,
"restoreState" : 5462,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 698993,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 698993,
"executionTimeMillisEstimate" : 1630,
"works" : 699016,
"advanced" : 698993,
"needTime" : 23,
"needFetch" : 0,
"saveState" : 5462,
"restoreState" : 5462,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"emails.email" : 1
},
"indexName" : "emails.email_1",
"isMultiKey" : true,
"direction" : "forward",
"indexBounds" : {
"emails.email" : [
"[\"\", {})",
"[/test123123123/, /test123123123/]"
]
},
"keysExamined" : 699016,
"dupsTested" : 699016,
"dupsDropped" : 23,
"seenInvalidated" : 0,
"matchTested" : 0
}
}
},
"serverInfo" : {
"host" : "myhost",
"port" : 27017,
"version" : "3.0.12",
"gitVersion" : "33934938e0e95d534cebbaff656cde916b9c3573"
},
"ok" : 1
}
And here executionStats.executionStages.inputStage.nReturned (and executionStats.totalDocsExamined) is equal 698993 (executionStats.nReturned is 0 like in first query)
Question
Why when I use search with multikey index (users.user) on the ixscan stage returns all my collection and fetch stage occurs all collection. But If I use search by non-multikey index (login) ixscan stage scans expected values and on the fetch stage I give what I want.
UPD: when I use regular expression not like /smth/, but /^smth/ then scan by emails.email field returns also 0 elements. Why multikey and ordinary index give me different results for regular expression like /smth/ ?
Because it is multikey index.
explained here
When a query filter specifies an exact match for an array as a whole, MongoDB can use the multikey index to look up the first element of the query array but cannot use the multikey index scan to find the whole array. Instead, after using the multikey index to look up the first element of the query array, MongoDB retrieves the associated documents and filters for documents whose array matches the array in the query.
I would like to ask about mongodb indexes. Can I use a different index in the find and the sort. By example I have two indexes:
(a:-1)
(b:1,c:1)
What indexes uses this sentence?
({a:[$gt30}},{a:[$lt50}}]}.sort({c:1})
Can I use a different index in the find and the sort.
After reading some more into this you will see at the bottom of the documentation page on index intersectioning: http://docs.mongodb.org/manual/core/index-intersection/#index-intersection-and-sort
Index intersection does not apply when the sort() operation requires an index completely separate from the query predicate.
So no, even if ypou created an index of {c:1} it could not be used independantly to intersect {a:1}
What indexes uses this sentence?
In this case only {a:1} will be used.
Creating an index on a single field called Single Field Index.
Creating multiple Single Field indexes to boost your query and the sort performance won't help much!. You should use Compound Indexes instead.
Check the documentation on MongoDB: https://docs.mongodb.com/manual/core/index-compound/
If you want to learn how to index your fields and how to measure the performance of your queries.
And Check this tutorial on Youtube: https://dplink.app/nxLgvk7lR
Suppose I am having persons collection having documents like below :
{
dob:
{ age : 50} ,
gender : "male" ,
phone : ""
}
Now i create indexes as below .
1 : db.persons.createIndex({"dob.age" : -1})
2 : db.persons.createIndex({phone : 1 , gender : 1})
Now If i execute below query like yours
db.persons.explain("executionStats").find({$and : [ {"dob.age" : {$lt : 50} } ,
{"dob.age" : {$gt : 30} } ] } ).sort({gender : 1 })
I will get below execution stats :
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "college.persons",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"dob.age" : {
"$lt" : 50
}
},
{
"dob.age" : {
"$gt" : 30
}
}
]
},
"queryHash" : "22FEA299",
"planCacheKey" : "5E8F38C1",
"winningPlan" : {
"stage" : "SORT",
"sortPattern" : {
"phone" : 1
},
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"dob.age" : -1
},
"indexName" : "dob.age_-1",
"isMultiKey" : false,
"multiKeyPaths" : {
"dob.age" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"dob.age" : [
"(50.0, 30.0)"
]
}
}
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1734,
"executionTimeMillis" : 10,
"totalKeysExamined" : 1734,
"totalDocsExamined" : 1734,
"executionStages" : {
"stage" : "SORT",
"nReturned" : 1734,
"executionTimeMillisEstimate" : 0,
"works" : 3471,
"advanced" : 1734,
"needTime" : 1736,
"needYield" : 0,
"saveState" : 27,
"restoreState" : 27,
"isEOF" : 1,
"sortPattern" : {
"phone" : 1
},
"memUsage" : 1914799,
"memLimit" : 33554432,
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"nReturned" : 1734,
"executionTimeMillisEstimate" : 0,
"works" : 1736,
"advanced" : 1734,
"needTime" : 1,
"needYield" : 0,
"saveState" : 27,
"restoreState" : 27,
"isEOF" : 1,
"inputStage" : {
"stage" : "FETCH",
"nReturned" : 1734,
"executionTimeMillisEstimate" : 0,
"works" : 1735,
"advanced" : 1734,
"needTime" : 0,
"needYield" : 0,
"saveState" : 27,
"restoreState" : 27,
"isEOF" : 1,
"docsExamined" : 1734,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 1734,
"executionTimeMillisEstimate" : 0,
"works" : 1735,
"advanced" : 1734,
"needTime" : 0,
"needYield" : 0,
"saveState" : 27,
"restoreState" : 27,
"isEOF" : 1,
"keyPattern" : {
"dob.age" : -1
},
"indexName" : "dob.age_-1",
"isMultiKey" : false,
"multiKeyPaths" : {
"dob.age" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"dob.age" : [
"(50.0, 30.0)"
]
},
"keysExamined" : 1734,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0
}
}
}
}
},
"serverInfo" : {
"host" : "RGGYSLT-0483",
"port" : 27017,
"version" : "4.2.0",
"gitVersion" : "a4b751dcf51dd249c5865812b390cfd1c0129c30"
},
"ok" : 1
}
This Means Data was fetched with IXScan on single field first and then sorted on
third field separately .
But the moment i change the query to sort on fields for which the index is already created things change . Now {"dob.age" : -1} index gets rejected .
In Mongo db Winning plan is the one for which 100 docs can be fetched early and Mongo db caches that plan for a query . Now this cache will be purged after 1000 docs insertions , index rebuild , server restarts or new index insertions.
Hence which index will be used depends upon winning plans .
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "college.persons",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"dob.age" : {
"$lt" : 50
}
},
{
"dob.age" : {
"$gt" : 30
}
}
]
},
"queryHash" : "DA8248FA",
"planCacheKey" : "E779554F",
"winningPlan" : {
"stage" : "FETCH",
"filter" : {
"$and" : [
{
"dob.age" : {
"$lt" : 50
}
},
{
"dob.age" : {
"$gt" : 30
}
}
]
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"gender" : 1,
"phone" : 1
},
"indexName" : "gender_1_phone_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"gender" : [ ],
"phone" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"gender" : [
"[MinKey, MaxKey]"
],
"phone" : [
"[MinKey, MaxKey]"
]
}
}
},
"rejectedPlans" : [
{
"stage" : "SORT",
"sortPattern" : {
"gender" : 1,
"phone" : 1
},
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"dob.age" : -1
},
"indexName" : "dob.age_-1",
"isMultiKey" : false,
"multiKeyPaths" : {
"dob.age" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"dob.age" : [
"(50.0, 30.0)"
]
}
}
}
}
}
]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1734,
"executionTimeMillis" : 12,
"totalKeysExamined" : 5002,
"totalDocsExamined" : 5002,
"executionStages" : {
"stage" : "FETCH",
"filter" : {
"$and" : [
{
"dob.age" : {
"$lt" : 50
}
},
{
"dob.age" : {
"$gt" : 30
}
}
]
},
"nReturned" : 1734,
"executionTimeMillisEstimate" : 0,
"works" : 5003,
"advanced" : 1734,
"needTime" : 3268,
"needYield" : 0,
"saveState" : 41,
"restoreState" : 41,
"isEOF" : 1,
"docsExamined" : 5002,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 5002,
"executionTimeMillisEstimate" : 0,
"works" : 5003,
"advanced" : 5002,
"needTime" : 0,
"needYield" : 0,
"saveState" : 41,
"restoreState" : 41,
"isEOF" : 1,
"keyPattern" : {
"gender" : 1,
"phone" : 1
},
"indexName" : "gender_1_phone_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"gender" : [ ],
"phone" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"gender" : [
"[MinKey, MaxKey]"
],
"phone" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 5002,
"seeks" : 1,
"dupsTested" : 0,
"dupsDropped" : 0
}
}
},
"serverInfo" : {
"host" : "RGGYSLT-0483",
"port" : 27017,
"version" : "4.2.0",
"gitVersion" : "a4b751dcf51dd249c5865812b390cfd1c0129c30"
},
"ok" : 1
}