Documents not expiring using TTL in mongodb 2.6.7 - mongodb

I'm trying the TTL feature in a mongo shell but I cannot seem to make it work. I have triple check everything using the documentation.
Here is what I did:
MongoDB shell version: 2.6.7
connecting to: test
> db.streamers.getIndexes()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "test.streamers"
},
{
"v" : 1,
"key" : {
"room" : 1
},
"name" : "room_1",
"ns" : "test.streamers",
"background" : true,
"safe" : null
},
{
"v" : 1,
"key" : {
"lastAlive" : 1
},
"name" : "lastAlive_1",
"ns" : "test.streamers",
"expiresAfterSeconds" : 60,
"background" : true,
"safe" : null
}
]
> db.streamers.insert({ _id: "hello", room: "yop", lastAlive: new Date() })
WriteResult({ "nInserted" : 1 })
[waiting for a while here...]
> db.streamers.find({ _id: "hello" })
{ "_id" : "hello", "room" : "yop", "lastAlive" : ISODate("2015-02-18T13:03:02.836Z") }
> new Date()
ISODate("2015-02-18T13:50:50.403Z")
So clearly, the document is not being removed even after waiting for more than an hour. db.currentOp() returns an empty array as well.
This is a dev environment so mongodb is in a standalone configuration with default settings. A getParameter on ttlMonitorEnabled returns true so that's fine too.
What is wrong here ?

There was a typo in your index creation.
The setting expiresAfterSeconds should be expireAfterSeconds.

Related

Slow query when sorting and filtering

I'm using mongodb version 3.6.5. I would like to do a query on a collection, and then sorting it based on date. I work on a (what I think) is a pretty large dataset, currently 195064301 data in this collection, and it's growing.
Doing the filter or the sort in separated query work perfectly
db.getCollection('logs').find({session: ObjectId("5af3baa173faa851f8b0090c")})
db.getCollection('logs').find({}).sort({date: 1})
The result is returned is less than 1 sec, but if I try to do it in a single query like so
db.getCollection('logs').find({session: ObjectId("5af3baa173faa851f8b0090c")}).sort({date: 1})
Now it take about 5minutes to return the data. I was thinking it was a index problem, but as far as I can tell, the index seems fine
> db.logs.getIndexes();
[
{
"v" : 2,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "client1.logs"
},
{
"v" : 2,
"key" : {
"session" : 1
},
"name" : "session_1",
"ns" : "client1.logs",
"background" : true
},
{
"v" : 2,
"key" : {
"date" : 1
},
"name" : "date_1",
"ns" : "client1.logs",
"background" : true
},
{
"v" : 2,
"key" : {
"user" : 1
},
"name" : "user_1",
"ns" : "client1.logs",
"background" : true
}
]
I'm still new to mongo, I try thoses request directly in the console, I also tried to use the reIndex() method, but nothing really help.
So I'm hoping there is a solution on this.
Thanks.

Mongo Case Insensitive index (collation) not getting results

I'm trying to use the CaseInsensitive Index using Mongoid 5. I even went down to the Mongo console to run the experiments but it won't pull the results at all:
> db.charges.getIndexes()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "paymentsapi_development.charges"
}
]
> db.charges.createIndex({'details.email':1},{name:'emails_index',collation:{locale:'en',strength:1}})
{
"createdCollectionAutomatically" : false,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
> db.charges.getIndexes()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "paymentsapi_development.charges"
},
{
"v" : 1,
"key" : {
"details.email" : 1
},
"name" : "emails_index",
"ns" : "paymentsapi_development.charges",
"collation" : {
"locale" : "en",
"strength" : 1
}
}
]
> db.charges.find({'details.email':'lolo#lolo.com'}).count()
1
> db.charges.find({'details.email':'LoLo#LoLo.cOm'}).collation({locale:'en',strength:1}).count()
0
>
I'm basing in the Collation (https://docs.mongodb.com/manual/reference/collation/) and the Case Insensitive Index (https://docs.mongodb.com/manual/core/index-case-insensitive/) .
I already checked the db version, the db.adminCommand( { setFeatureCompatibilityVersion: <version> } ) based on https://docs.mongodb.com/manual/reference/command/setFeatureCompatibilityVersion/ and all further things possible.
Any help is very appreciated.

MongoDB Migrate all existing indexes to new database

I have a MongoDB development cluster where i create indexes over time as a part of development improvements. On Testing/Production MongoDB cluster also, i want to maintain the same indexes.
So how do i get all indexes of existing collections and create the same collection indexes on new database?
From mongo shell, switch to database from where you want to collect indexes
Step 1: Switch to existing DB and Run below script
> use my_existing_db
Below script loops through all the collections and constructs a run command for each collection.
var database = ‘my_new_db' // SHOULD ALWAYS MATCH DESTINATION DB NAME
db.getCollectionNames().forEach(function(collection){
var command = {}
var indexes = []
idxs = db.getCollection(collection).getIndexes()
if(idxs.length>1){
idxs.forEach(function(idoc){
if(idoc.name!='_id_'){
var ns = database+"."+idoc.ns.substr(idoc.ns.indexOf('.') + 1 )
idoc.ns = ns
indexes.push(idoc)
}
})
command['createIndexes'] = collection
command['indexes'] = indexes
print('db.runCommand(')
printjson(command)
print(')')
}
})
Script outputs runCommand's for each collection
Step 2: Switch to new db and execute runCommands. Done, Cheers!
> use my_new_db
runCommands will be something like this. You can run all the commands in one shot.
db.runCommand(
{
"createIndexes" : "foo",
"indexes" : [
{
"v" : 2,
"key" : {
"xy_point" : "2d"
},
"name" : "xy_point_2d",
"ns" : "my_new_db.foo",
"min" : -99999,
"max" : 99999
},
{
"v" : 2,
"key" : {
"last_seen" : 1
},
"name" : "last_seen_1",
"ns" : "my_new_db.foo",
"expireAfterSeconds" : 86400
},
{
"v" : 2,
"key" : {
"point" : "2dsphere"
},
"name" : "point_2dsphere",
"ns" : "my_new_db.foo",
"background" : false,
"2dsphereIndexVersion" : 3
}
]
}
)
db.runCommand(
{
"createIndexes" : "bar",
"indexes" : [
{
"v" : 2,
"unique" : true,
"key" : {
"date" : 1,
"name" : 1,
"age" : 1,
"gender" : 1
},
"name" : "date_1_name_1_age_1_gender_1",
"ns" : "my_new_db.bar"
}
]
}
)

When renaming a MongoDB collection, are the indices intact?

When performing this operation, will indices remain intact?
db.collection('my-collection').rename('new-collection-name', {dropTarget:true});
Using the mongo cli, it's easy to test:
$ mongo
> db.bob.ensureIndex({ name: 1 })
{
"createdCollectionAutomatically" : true,
"numIndexesBefore" : 1,
"numIndexesAfter" : 2,
"ok" : 1
}
> db.bob.renameCollection('robert', { dropTarget: true })
{ "ok" : 1 }
> db.robert.getIndices()
[
{
"v" : 1,
"key" : {
"_id" : 1
},
"name" : "_id_",
"ns" : "test.robert"
},
{
"v" : 1,
"key" : {
"name" : 1
},
"name" : "name_1",
"ns" : "test.robert"
}
]
So, yes, it looks like the indices do remain intact.

can't shard a collection on mongodb

I have a db ("mydb") on mongo that contains 2 collections (c1 and c2). c1 is already hash sharded. I want to shard a second collection the same way. I get the following error :
use mydb
sh.shardCollection("mydb.c2", {"LOG_DATE": "hashed"})
{
"proposedKey" : {
"LOG_DATE" : "hashed"
},
"curIndexes" : [
{
"v" : 1,
"key" : {
"_id" : 1
},
"ns" : "mydb.c1",
"name" : "_id_"
}
],
"ok" : 0,
"errmsg" : "please create an index that starts with the shard key before sharding."
So I did
db.c2.ensureIndex({LOG_DATE: 1})
sh.shardCollection("mydb.c2", {"LOG_DATE": "hashed"})
Same error but it shows the new index.
"proposedKey" : {
"LOG_DATE" : "hashed"
},
"curIndexes" : [
{
"v" : 1,
"key" : {
"_id" : 1
},
"ns" : "mydb.c2",
"name" : "_id_"
},
{
"v" : 1,
"key" : {
"LOG_DATE" : 1
},
"ns" : "mydb.c2",
"name" : "LOG_DATE_1"
}
],
"ok" : 0,
"errmsg" : "please create an index that starts with the shard key before sharding."
Just to be sure, I run :
db.system.indexes.find()
{ "v" : 1, "key" : { "_id" : 1 }, "ns" : "mydb.c1", "name" : "_id_" }
{ "v" : 1, "key" : { "timestamp" : "hashed" }, "ns" : "mydb.c1", "name" : "timestamp_hashed" }
{ "v" : 1, "key" : { "_id" : 1 }, "ns": "mydb.c2", "name" : "_id_" }
{ "v" : 1, "key" : { "LOG_DATE" : 1 }, "ns" : "mydb.c2", "name" : "LOG_DATE_1" }
I try again the same commands on admin and it fails with the same error.
Then I tried on admin without "hashed" and it worked.
db.runCommand({shardCollection: "mydb.c2", key: {"LOG_DATE": 1}})
Problem : now my collection is sharded on something that is not hashed and I can't change it (error : "already sharded")
What was wrong with what I did ?
How can I fix this ?
Thanks in advance
Thomas
The problem initially was that you did not have a hashed index what you proposed to use for sharding this is the error message is about. After the first error message, when you created an index which is
{
"v" : 1,
"key" : {
"LOG_DATE" : 1
},
"ns" : "mydb.c2",
"name" : "LOG_DATE_1"
}
You still just have an ordinary index which is not a hashed one. If you would do this :
db.c2.ensureIndex({LOG_DATE: "hashed"})
Instead of this :
db.c2.ensureIndex({LOG_DATE: 1})
Than would be a hashed index. As you can see in the output of the db.system.indexes.find() on the other collection you have a hashed index for the timestamp i assume this is the shard key for that collection.
So if you have no data in the c2 collection:
db.c2.drop()
db.createCollection('c2')
db.c2.ensureIndex({LOG_DATE: "hashed"})
sh.shardCollection("mydb.c2", {"LOG_DATE": "hashed"})
This will work properly.