Improve slow query count mongodb - mongodb

I'm trying to improve the performance of a count query (to calculate pagination to display on a screen) on a collection of 1138633 documents. The query analyze 391232 document for 364497 returned but it takes ~2sc to be executed and i think it's too long.
My query looks like this:
db.myCollection.count({
"$or" : [
{
"field_1" : {
"$lte" : 1.0
}
},
{"field_1" : {
"$eq" : null
}
}
],
"field_2" : {
"$eq" : false
},
"field_3" : {
"$ne" : true
},
"field_4" : {
"$eq" : "fr-FR"
},
"field_5" : {
"$ne" : null
},
"field_6" : {
"$ne" : null
},
"field_7" : {
"$gte" : ISODate("2016-10-14T00:00:00.000Z")
}
})
field_1 is a number , field_2 and field_3 a boolean, field_5 a string and field_6 an object ID which refer to a collection of 2 documents.
Here are my indexes (db.myCollection.getIndexes() ) :
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "db.myCollection"
},
{
"v" : 2,
"key" : {
"field_6" : 1,
"field_7" : -1
},
"name" : "field_6_1_field_7_-1",
"ns" : "db.myCollection",
"background" : true
},
{
"v" : 2,
"key" : {
"field_7" : 1
},
"name" : "field_7_1",
"background" : true,
"ns" : "db.myCollection"
},
{
"v" : 2,
"key" : {
"field_6" : 1
},
"name" : "field_6_1",
"ns" : "db.myCollection",
"background" : true
},
{
"v" : 2,
"key" : {
"field_1" : 1.0
},
"name" : "field_1_1",
"ns" : "db.myCollection"
}
]
I tried everything , like force indexe using hint , change the order of the query ( and the order of the multi key index) but nothing work.
Someone have an idea on what can I try to improve the execution time of this query? Do you need more details? like informations of the executionStats?
Thanks.
EDIT : More Detail, i calculated how much document are concerned by the clause and here is my result :
field 6 : 391232
field 1 lte 1 :721005
field 1 eq null : 417625
field 5 : 819688
field 4: 1123301
field 2 : 1138620
field 7: 1138630 (all document)
field 3: 1138630 (all document)
i reordered my query in the above order and i get ~1.82sc (0.2sc winned xD)
I assume the problem is because of the indexes which are maybe wrong.
For the detail index in explain do you know what section i have to check? here is what i found in execution plan about my indexes :
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 391232,
"executionTimeMillisEstimate" : 427,
"works" : 391234,
"advanced" : 391232,
"needTime" : 1,
"needYield" : 0,
"saveState" : 3060,
"restoreState" : 3060,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"field_6" : 1,
"field_7" : -1
},
"indexName" : "field_6_1_field_7_-1",
"isMultiKey" : false,
"multiKeyPaths" : {
"field_6" : [],
"field_7" : []
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"field_6" : [
"[MinKey, null)",
"(null, MaxKey]"
],
"field_7" : [
"[new Date(9223372036854775807), new Date(1491350400000)]"
]
},
"keysExamined" : 391233,
"seeks" : 2,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}

Related

mongodb is not using an index for a find command

I have approximately 40M documents in a mongo collection. There is an index on the location.country field:
MongoDB Enterprise cluster-0-shard-0:PRIMARY> db.cases.getIndexes()
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_"
},
//...
{
"v" : 2,
"key" : {
"location.country" : -1
},
"name" : "countriesIdx",
"collation" : {
"locale" : "en_US",
"caseLevel" : false,
"caseFirst" : "off",
"strength" : 2,
"numericOrdering" : false,
"alternate" : "non-ignorable",
"maxVariable" : "punct",
"normalization" : false,
"backwards" : false,
"version" : "57.1"
}
},
//...
]
But queries don't use it:
MongoDB Enterprise cluster-0-shard-0:PRIMARY> db.cases.find({'location.country':'ghana'}).explain({verbosity: 'executionStats'})
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "covid19.cases",
"indexFilterSet" : false,
"parsedQuery" : {
"location.country" : {
"$eq" : "ghana"
}
},
"winningPlan" : {
"stage" : "COLLSCAN",
"filter" : {
"location.country" : {
"$eq" : "ghana"
}
},
"direction" : "forward"
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 0,
"executionTimeMillis" : 195892,
"totalKeysExamined" : 0,
"totalDocsExamined" : 39264034,
"executionStages" : {
"stage" : "COLLSCAN",
"filter" : {
"location.country" : {
"$eq" : "ghana"
}
},
"nReturned" : 0,
"executionTimeMillisEstimate" : 99032,
"works" : 39264036,
"advanced" : 0,
"needTime" : 39264035,
"needYield" : 0,
"saveState" : 39503,
"restoreState" : 39503,
"isEOF" : 1,
"direction" : "forward",
"docsExamined" : 39264034
},
"allPlansExecution" : [ ]
},
"serverInfo" : {
"host" : "cluster-0-shard-00-01-vwhx6.mongodb.net",
"port" : 27017,
"version" : "4.4.8",
"gitVersion" : "83b8bb8b6b325d8d8d3dfd2ad9f744bdad7d6ca0"
},
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1629732226, 1),
"signature" : {
"hash" : BinData(0,"piKWDwLDv7FRcnwCe51PZDLR4UM="),
"keyId" : NumberLong("6958739380580122625")
}
},
"operationTime" : Timestamp(1629732226, 1)
}
Do I need to set up the index differently or do something else to get mongo to use the index? I have tried to hint that it should, but it still does a COLLSCAN. While the examples I've shown above are using mongosh, the behaviour is the same in my node app using mongoose.

MongoDB - count() takes too long despite using an index

I have a collection with 62k documents in it. The same collection has a bunch of indexes too, most of them simple, single field ones. What I am observing is that the following query takes extremely long to return:
db.jobs.count({"status":"complete","$or":[{"groups":{"$exists":false}},{"groups":{"$size":0}},{"groups":{"$in":["5e65ffc2a1e6ef0007bc5fa8"]}}]})
The executionStats for the above query are as follows
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "xxxxxx.jobs",
"indexFilterSet" : false,
"parsedQuery" : {
"$and" : [
{
"$or" : [
{
"groups" : {
"$size" : 0
}
},
{
"groups" : {
"$eq" : "5e65ffc2a1e6ef0007bc5fa8"
}
},
{
"$nor" : [
{
"groups" : {
"$exists" : true
}
}
]
}
]
},
{
"status" : {
"$eq" : "complete"
}
}
]
},
"winningPlan" : {
"stage" : "FETCH",
"filter" : {
"$or" : [
{
"groups" : {
"$size" : 0
}
},
{
"groups" : {
"$eq" : "5e65ffc2a1e6ef0007bc5fa8"
}
},
{
"$nor" : [
{
"groups" : {
"$exists" : true
}
}
]
}
]
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"status" : 1,
"groups" : 1
},
"indexName" : "status_1_groups_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"status" : [ ],
"groups" : [
"groups"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"status" : [
"[\"complete\", \"complete\"]"
],
"groups" : [
"[MinKey, MaxKey]"
]
}
}
},
"rejectedPlans" : [
{
"stage" : "FETCH",
"filter" : {
"$or" : [
{
"groups" : {
"$size" : 0
}
},
{
"groups" : {
"$eq" : "5e65ffc2a1e6ef0007bc5fa8"
}
},
{
"$nor" : [
{
"groups" : {
"$exists" : true
}
}
]
}
]
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"status" : 1
},
"indexName" : "status_1",
"isMultiKey" : false,
"multiKeyPaths" : {
"status" : [ ]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"status" : [
"[\"complete\", \"complete\"]"
]
}
}
}
]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 62092,
"executionTimeMillis" : 9992,
"totalKeysExamined" : 62092,
"totalDocsExamined" : 62092,
"executionStages" : {
"stage" : "FETCH",
"filter" : {
"$or" : [
{
"groups" : {
"$size" : 0
}
},
{
"groups" : {
"$eq" : "5e65ffc2a1e6ef0007bc5fa8"
}
},
{
"$nor" : [
{
"groups" : {
"$exists" : true
}
}
]
}
]
},
"nReturned" : 62092,
"executionTimeMillisEstimate" : 9929,
"works" : 62093,
"advanced" : 62092,
"needTime" : 0,
"needYield" : 0,
"saveState" : 682,
"restoreState" : 682,
"isEOF" : 1,
"invalidates" : 0,
"docsExamined" : 62092,
"alreadyHasObj" : 0,
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 62092,
"executionTimeMillisEstimate" : 60,
"works" : 62093,
"advanced" : 62092,
"needTime" : 0,
"needYield" : 0,
"saveState" : 682,
"restoreState" : 682,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"status" : 1,
"groups" : 1
},
"indexName" : "status_1_groups_1",
"isMultiKey" : true,
"multiKeyPaths" : {
"status" : [ ],
"groups" : [
"groups"
]
},
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 2,
"direction" : "forward",
"indexBounds" : {
"status" : [
"[\"complete\", \"complete\"]"
],
"groups" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 62092,
"seeks" : 1,
"dupsTested" : 62092,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "xxxxxxx",
"port" : 27017,
"version" : "3.6.15",
"gitVersion" : "xxxxxx"
},
"ok" : 1}
What I am trying to understand is why does the FETCH stage take 10 seconds when the index scan in INPUT_STAGE takes 60ms. Since I am eventually doing a count() I don't really need mongoDB to return the documents, I only need it to $sum up the number of matching keys and give me the grand total.
Any idea what I am doing wrong?
The query explained there was not a count, it returned quite a few documents:
"nReturned" : 62092,
The estimated execution for each stage suggests that the index scan was expected to take 60ms, and fetching the documents from disk took the additional 9.8 seconds.
There are a couple of reasons this count required fetching the documents:
Key existence cannot be fully determined from the index
The {"$exists":false} predicate is also troublesome. When building an index the value for a document contains the value of each indexed field. There is no value for "nonexistent", so it uses null. Since a document that contains a field whose value is explicitly set to null should not match {"$exists":false}, the query executor must load each document from disk in order to determine if the field was null nor nonexistent. This means that a COUNTSCAN stage cannot be used, which further means that all of the documents to be counted must be loaded from disk.
The $or predicate does not ensure exclusivity
The query executor cannot know ahead of time that the clauses in the $or are mutually exclusive. They are in your query, but in the general case it is possible for a single document to match more than one clause in the $or, so the query executor must load the documents to ensure deduplication.
So how to eliminate the fetch stage?
If you were to query with only the $in clause, or with only the $size clause you should find the count is derived from the index scan, without needing to load any documents.
This is, if you were to run these queries separately from the client, and sum the results, you should find that the overall execution time is less than the query that requires fetching:
db.jobs.count({"status":"complete","groups":{"$size":0}})
db.jobs.count({"status":"complete","groups":{"$in":["5e65ffc2a1e6ef0007bc5fa8"]}})
For the {"groups":{"$exists":false}} predicate, you might modify the data slightly, such as ensure that the field always exists, but assign it a value that means "undefined" that can be indexed and queried.
As an example, if you were to run the following update, the groups field would then exist in all documents:
db.jobs.update({"groups":{"$exists":false}},{"$set":{"groups":false}})
And you could get the equivalent of the above count by running these 2 queries that should both be covered by an index scan, and should run faster together than the query that requires loading documents:
db.jobs.count({"status":"complete","groups":{"$size":0}})
db.jobs.count({"status":"complete","groups":{"$in":[false, "5e65ffc2a1e6ef0007bc5fa8"]}})
`
db.jobs.aggregate(
.{$match: {"$or":[
{"groups":{"$exists":false}},
{"groups":{"$in":["5e65ffc2a1e6ef0007bc5fa8"]}},
{"$size":0}
]}
},
.{$count:{"status":"complete"}
)`
If you can somehow avoid the empty array case, than the following query can be used: db.jobs.count({"status":"complete", "groups": { $in: [ null, "5e65ffc2a1e6ef0007bc5fa8" ] } })
null is equivalent to $exists: false.
Also: I'd suggest to use ObjectId instead of string as type for the groups field.
Update
$size never hit an index!
You can use the following query:
db.jobs.count({"status":"complete","$or":[
{"groups":[],
{"groups": {$in: [ null, "5e65ffc2a1e6ef0007bc5fa8" ]}
]})

MongoDB geospatial index on $center

Collection Schema
{
"_id" : ObjectId("5d3562bf1b48d90ea4b06a74"),
"name" : "19",
"location" : {
"type" : "Point",
"coordinates" : [
50.0480208,
30.5239127
]
}
}
Indexes
> db.places.getIndexes()
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "test.places"
},
{
"v" : 2,
"key" : {
"location" : "2dsphere"
},
"name" : "location_2dsphere",
"ns" : "test.places",
"2dsphereIndexVersion" : 3
}
There is 2 milion documents is stored in collection.
First I ran query like this.
db.places.find({ location: {$geoWithin: { $center: [[60.0478308, 40.5237227], 10] } }})
But it takes 2 seconds. So I examine query via explain().
> db.places.find({ location: {$geoWithin: { $center: [[60.0478308, 40.5237227], 10] } }}).explain('executionStats')
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "test.places",
"indexFilterSet" : false,
"parsedQuery" : {
"location" : {
"$geoWithin" : {
"$center" : [
[
60.0478308,
40.5237227
],
10
]
}
}
},
"winningPlan" : {
"stage" : "COLLSCAN",
"filter" : {
"location" : {
"$geoWithin" : {
"$center" : [
[
60.0478308,
40.5237227
],
10
]
}
}
},
"direction" : "forward"
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 1414213,
"executionTimeMillis" : 2093,
"totalKeysExamined" : 0,
"totalDocsExamined" : 2000000,
"executionStages" : {
"stage" : "COLLSCAN",
"filter" : {
"location" : {
"$geoWithin" : {
"$center" : [
[
60.0478308,
40.5237227
],
10
]
}
}
},
"nReturned" : 1414213,
"executionTimeMillisEstimate" : 1893,
"works" : 2000002,
"advanced" : 1414213,
"needTime" : 585788,
"needYield" : 0,
"saveState" : 15681,
"restoreState" : 15681,
"isEOF" : 1,
"invalidates" : 0,
"direction" : "forward",
"docsExamined" : 2000000
}
},
"serverInfo" : {
"host" : "Johnui-iMac",
"port" : 27017,
"version" : "4.0.3",
"gitVersion" : "7ea530946fa7880364d88c8d8b6026bbc9ffa48c"
},
"ok" : 1
}
You know that query stage is COLLSCAN.
I wonder that, I already created index for location fields, but it seems doesnt' work.
So I create more indexes.
"v" : 2,
"key" : {
"location.coordinates" : 1
},
"name" : "location.coordinates_1",
"ns" : "test.places"
},
{
"v" : 2,
"key" : {
"location" : 1
},
"name" : "location_1",
"ns" : "test.places"
}
But it doesn't work too.
Is there any issue on my index configuration?
You seem to have created a 2dsphere Index on your location, but the MongoDB docs on $centre specify that:
Only the 2d geospatial index supports $center.
Therefore, I suggest you create a 2d index on the location field and the scan will be performed using this index

MongoDB: What index should I use?

I got a highscore mongodb table that contains documents such as
{username:"Bob",score:10,category:"mostLikes"}
{username:"John",score:32,category:"mostLikes"}
{username:"Bob",score:2,category:"leastDeaths"}
The goal is to fetch the top 100 (sorted) of a specific category.
Important: Certain highscore categories are ascending (lower is better ex: leastDeaths) and others are descending (bigger is better ex: mostLikes). This means that depending on the category, I want either the 100 biggest scores or the 100 lowest scores.
There are two main queries in my application:
db.highscore.find({category:category}, {}).limit(100).sort({ score: 1 /*or -1*/ });
db.highscore.find({username:username});
What index would you recommend?
Would keeping ascending category and descending categories in different tables result in better performance?
Note: I do not want to have one table per category.
I did some test on my local with some sample datasets and i think the best option would be to create an index on "category_1_score_1_username_1"
Creating an index on the following fields gives you a covered query and thus the documents are returned from the index directly.
Find below my analysis
> db.usr.find();
{ "_id" : ObjectId("57bd20630744bd376277a795"), "username" : "Bob", "score" : 10, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a796"), "username" : "John", "score" : 32, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a797"), "username" : "Bob1", "score" : 2, "category" : "leastDeaths" }
{ "_id" : ObjectId("57bd20630744bd376277a798"), "username" : "John2", "score" : 132, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a799"), "username" : "Bob3", "score" : 20, "category" : "leastDeaths" }
{ "_id" : ObjectId("57bd20630744bd376277a79a"), "username" : "John4", "score" : 132, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a79b"), "username" : "Bob5", "score" : 22, "category" : "leastDeaths" }
{ "_id" : ObjectId("57bd20630744bd376277a79c"), "username" : "John6", "score" : 322, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a79d"), "username" : "Bob7", "score" : 232, "category" : "leastDeaths" }
{ "_id" : ObjectId("57bd20630744bd376277a79e"), "username" : "John8", "score" : 3112, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a79f"), "username" : "Bob4", "score" : 222, "category" : "leastDeaths" }
{ "_id" : ObjectId("57bd20630744bd376277a7a0"), "username" : "John22", "score" : 3210, "category" : "mostLikes" }
{ "_id" : ObjectId("57bd20630744bd376277a7a1"), "username" : "Bob33", "score" : 2111, "category" : "leastDeaths" }
Indexes:
> db.usr.getIndexes();
{
"v" : 1,
"key" : {
"category" : 1,
"score" : 1,
"username" : 1
},
"name" : "category_1_score_1_username_1",
"ns" : "test.usr"
}
]
>
Now you can modify your query slightly to make it return a covered query.
db.usr.find({"category":"mostLikes"},{"_id":0,"score":-1,"category":1,"username":1}).sort({"score":1}).explain("executionStats");
Output of Execution Stats:
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "test.usr",
"indexFilterSet" : false,
"parsedQuery" : {
"category" : {
"$eq" : "mostLikes"
}
},
"winningPlan" : {
"stage" : "PROJECTION",
"transformBy" : {
"_id" : 0,
"score" : -1,
"category" : 1,
"username" : 1
},
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"category" : 1,
"score" : 1,
"username" : 1
},
"indexName" : "category_1_score_1_username_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"category" : [
"[\"mostLikes\", \"mostLikes\"]"
],
"score" : [
"[MinKey, MaxKey]"
],
"username" : [
"[MinKey, MaxKey]"
]
}
}
},
"rejectedPlans" : [ ]
},
"executionStats" : {
"executionSuccess" : true,
"nReturned" : 7,
"executionTimeMillis" : 0,
"totalKeysExamined" : 7,
"totalDocsExamined" : 0,
"executionStages" : {
"stage" : "PROJECTION",
"nReturned" : 7,
"executionTimeMillisEstimate" : 0,
"works" : 8,
"advanced" : 7,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"transformBy" : {
"_id" : 0,
"score" : -1,
"category" : 1,
"username" : 1
},
"inputStage" : {
"stage" : "IXSCAN",
"nReturned" : 7,
"executionTimeMillisEstimate" : 0,
"works" : 8,
"advanced" : 7,
"needTime" : 0,
"needYield" : 0,
"saveState" : 0,
"restoreState" : 0,
"isEOF" : 1,
"invalidates" : 0,
"keyPattern" : {
"category" : 1,
"score" : 1,
"username" : 1
},
"indexName" : "category_1_score_1_username_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"category" : [
"[\"mostLikes\", \"mostLikes\"]"
],
"score" : [
"[MinKey, MaxKey]"
],
"username" : [
"[MinKey, MaxKey]"
]
},
"keysExamined" : 7,
"dupsTested" : 0,
"dupsDropped" : 0,
"seenInvalidated" : 0
}
}
},
"serverInfo" : {
"host" : "L4156409",
"port" : 27017,
"version" : "3.2.5",
"gitVersion" : "34e65e5383f7ea1726332cb175b73077ec4a1b02"
},
"ok" : 1
}
>
Thus as you can see the output the no of documents scanned is 0 while the records are fetched directly from the index. Thus choosing this index would be your best bet for the first query.
For the second query, that should be simple to create an index on the username field and that should solve the second query for you.
HTH.

MongoDB sort winingplan overrides hint

I create a collection with three fields as described below. After that, I create an index over second field and executed a search using sort and hint operations.
Why - even using a hint over index created previously - MongoDB set sort as winningPlan?
I believe that if we filter data with some criteria and sort the result could be better, right?
Collection
> db.values.find()
{ "_id" : ObjectId("5763ffebe5a81f569b1005e5"), "field1" : "A", "field2" : "B", "field3" : "C" }
Indexes
> db.values.getIndexes()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "peftest.values"
},
{
"v" : 1,
"key" : {
"field2" : 1
},
"name" : "field2_1",
"ns" : "peftest.values"
}
]
Query and Explain
> db.values.find({field2:"B"}).sort({field1:1}).hint({field2:1}).explain()
{
"queryPlanner" : {
"plannerVersion" : 1,
"namespace" : "peftest.values",
"indexFilterSet" : false,
"parsedQuery" : {
"field2" : {
"$eq" : "B"
}
},
"winningPlan" : {
"stage" : "SORT",
"sortPattern" : {
"field1" : 1
},
"inputStage" : {
"stage" : "SORT_KEY_GENERATOR",
"inputStage" : {
"stage" : "FETCH",
"inputStage" : {
"stage" : "IXSCAN",
"keyPattern" : {
"field2" : 1
},
"indexName" : "field2_1",
"isMultiKey" : false,
"isUnique" : false,
"isSparse" : false,
"isPartial" : false,
"indexVersion" : 1,
"direction" : "forward",
"indexBounds" : {
"field2" : [
"[\"B\", \"B\"]"
]
}
}
}
}
},
"rejectedPlans" : [ ]
},
"serverInfo" : {
"host" : "apstrd14501d.intraservice.corp",
"port" : 27017,
"version" : "3.2.4",
"gitVersion" : "e2ee9ffcf9f5a94fad76802e28cc978718bb7a30"
},
"ok" : 1
}
I think the plan is what you expect but you look at it from the wrong perspective :)
The input stage of the sort is an index scan so the query plan uses the index at first and the pass the result data to the sort.