How can I display which one is the primary mongodb machine from a Secondary replication, without being forced to login to every machine and check?
Running ismaster command only provide that current machine is secondary
rs0:SECONDARY> db.runCommand("ismaster")
{
"hosts" : [
"dbRby1:27017",
"dbRby2:27017",
"dbKrstd1:27017"
],
"setName" : "rs0",
"setVersion" : 5,
"ismaster" : false,
"secondary" : true,
"me" : "dbRby1:27017",
"maxBsonObjectSize" : 16777216,
"maxMessageSizeBytes" : 48000000,
"maxWriteBatchSize" : 1000,
"localTime" : ISODate("2016-11-24T07:36:09.855Z"),
"maxWireVersion" : 4,
"minWireVersion" : 0,
"ok" : 1
}
or by using the rc.conf(), I can't see that either
rs0:SECONDARY> rs.conf()
{
"_id" : "rs0",
"version" : 5,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "dbRby1:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 2,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "dbRby2:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "dbKrstd1:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : "majority",
"wtimeout" : 5000
},
"replicaSetId" : ObjectId("5811ec4c70c224f06fba884b")
}
}
rs.status() will give the wanted information as #Xenwar
Related
I configured a replicatset correctly.
After having scaled down mongodb kubernetes pods, replicat set truned out to invalid status:
> rs.status();
{
"ok" : 0,
"errmsg" : "Our replica set config is invalid or we are not a member of it",
"code" : 93,
"codeName" : "InvalidReplicaSetConfig"
}
My configuration is:
> rs.config();
{
"_id" : "rs0",
"version" : 3,
"term" : 2,
"protocolVersion" : NumberLong(1),
"writeConcernMajorityJournalDefault" : true,
"members" : [
{
"_id" : 0,
"host" : "mongors-0.mongors-service.hes-all.svc:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "mongors-1.mongors-service.hes-all.svc:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : -1,
"catchUpTakeoverDelayMillis" : 30000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("626fb63f211511c4dcf938ac")
}
}
configuration details seem right, but when I run rs.initiate, or rs.reconfig(cfg):
> rs.reconfig(config);
{
"topologyVersion" : {
"processId" : ObjectId("6347bdffe3c3303e6f325b9a"),
"counter" : NumberLong(1)
},
"ok" : 0,
"errmsg" : "New config is rejected :: caused by :: replSetReconfig should only be run on a writable PRIMARY. Current state REMOVED;",
"code" : 10107,
"codeName" : "NotWritablePrimary"
}
> rs.initiate();
{
"ok" : 0,
"errmsg" : "already initialized",
"code" : 23,
"codeName" : "AlreadyInitialized"
}
Any ideas?
We have a MongoDB cluster with 1 primary server and 2 secondary servers.
All three servers are using using MongoDB version 3.6.11.
Recently, we found both secondary servers down and it is caused by the space being full. We found the file WiredTigerLAS.wt has grown very big to over 20GB. The whole mongodb data folder is supposed to be below 4GB. We tried to remove the WiredTigerLAS.wt and restarted the secondary servers, but the WiredTigerLAS.wt got created and started growing to be full of the disk again. The primary server has been OK, no impact is found.
Can someone please help and advice what we shall do now? If you know the reason behind the unexpected file growth, please let us know.
rs.conf:
rs0:PRIMARY> rs.conf()
{
"_id" : "rs0",
"version" : 7,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "primary:50001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "replica1:50001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 0,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 0
},
{
"_id" : 2,
"host" : "replica2:50001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 0,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 0
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : -1,
"catchUpTakeoverDelayMillis" : 30000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("5b52ac682b4bd7ae7913b1cf")
}
}
I have a replica set with two secondaries. These secondaries are not meant for failover, they are backups of the master, one of them with a delay of 1 day. I've set them both to hidden=true and priority=0
rs.conf().members on the master yields
[
{
"_id" : 0,
"host" : "localhost:4000",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "localhost:4001",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : true,
"priority" : 0,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "localhost:4002",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : true,
"priority" : 0,
"tags" : {
},
"slaveDelay" : NumberLong(86400),
"votes" : 1
}
]
I want to check their content, mainly to issue .count() queries.
Is it safe to call rs.slaveOk() on those secondaries?
I've created my mongodb replicaset and all is correct except is not replicate in remote host, but I've tried to access remote pc at the port 27017 and working properly.
I created the database on the remote PC to see if this solved but nothing, I have also inserted new records but nothing, Any ideas?
rs.status()
{
"set" : "meteor",
"date" : ISODate("2016-03-08T16:14:24.181Z"),
"myState" : 1,
"term" : NumberLong(3),
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 0,
"name" : "172.27.10.13:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 6920,
"optime" : {
"ts" : Timestamp(1457453535, 2),
"t" : NumberLong(3)
},
"optimeDate" : ISODate("2016-03-08T16:12:15Z"),
"electionTime" : Timestamp(1457446744, 1),
"electionDate" : ISODate("2016-03-08T14:19:04Z"),
"configVersion" : 1,
"self" : true
}
],
"ok" : 1
}
rs.conf
rs.conf( rs.config(
meteor:PRIMARY> rs.config()
{
"_id" : "meteor",
"version" : 1,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "172.27.10.13:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
}
}
}
According to your configuration settings you didn't add any other members (secondaries, arbiters etc.) to your replica-set configuration. Beacuse of this mongodb has no way of knowing where to replicat to.
Try adding your remote host to the replica-set configuration like this:
rs.add("your-remote-host:port")
See: https://docs.mongodb.org/manual/tutorial/expand-replica-set/
we are using mongodb for our collections .
This is my stats of my development server .
My concerns are that there is high lockTime on my development server and also there are high page_faults value also .
Please tell me how can we resolve this ??
PRIMARY> db.serverStatus()
{
"host" : "ubsc-aio:27018",
"version" : "2.0.4",
"process" : "mongod",
"uptime" : 3278692,
"uptimeEstimate" : 3098346,
"localTime" : ISODate("2013-08-30T10:55:06.997Z"),
"globalLock" : {
"totalTime" : 3278692551767,
"lockTime" : 139516930214,
"ratio" : 0.0425526114483682,
"currentQueue" : {
"total" : 0,
"readers" : 0,
"writers" : 0
},
"activeClients" : {
"total" : 1,
"readers" : 1,
"writers" : 0
}
},
"mem" : {
"bits" : 64,
"resident" : 2009,
"virtual" : 23455,
"supported" : true,
"mapped" : 11420,
"mappedWithJournal" : 22840
},
"connections" : {
"current" : 162,
"available" : 7838
},
"extra_info" : {
"note" : "fields vary by platform",
"heap_usage_bytes" : 3645040,
"page_faults" : 4147570
},
"indexCounters" : {
"btree" : {
"accesses" : 902898,
"hits" : 901095,
"misses" : 1803,
"resets" : 0,
"missRatio" : 0.0019969033046922245
}
},
"backgroundFlushing" : {
"flushes" : 54639,
"total_ms" : 36709498,
"average_ms" : 671.8552316111203,
"last_ms" : 81,
"last_finished" : ISODate("2013-08-30T10:54:43.013Z")
},
"cursors" : {
"totalOpen" : 1,
"clientCursors_size" : 1,
"timedOut" : 7
},
"network" : {
"bytesIn" : 77779294441,
"bytesOut" : 314231714161,
"numRequests" : 189861092
},
"repl" : {
"setName" : "at",
"ismaster" : true,
"secondary" : false,
"hosts" : [
"localhost:27018",
"localhost:27017"
],
"arbiters" : [
"localhost:27019"
],
"primary" : "localhost:27018",
"me" : "localhost:27018"
},
"opcounters" : {
"insert" : 303294,
"query" : 133717078,
"update" : 59123588,
"delete" : 234256,
"getmore" : 48037783,
"command" : 125805489
},
"asserts" : {
"regular" : 0,
"warning" : 0,
"msg" : 0,
"user" : 16576,
"rollovers" : 0
},
"writeBacksQueued" : false,
"dur" : {
"commits" : 28,
"journaledMB" : 0.08192,
"writeToDataFilesMB" : 0.116123,
"compression" : 0.6743163821345669,
"commitsInWriteLock" : 0,
"earlyCommits" : 0,
"timeMs" : {
"dt" : 3000,
"prepLogBuffer" : 0,
"writeToJournal" : 25,
"writeToDataFiles" : 2,
"remapPrivateView" : 1
}
},
"ok" : 1
}