alive replicas are not available if any replica is down - mongodb

I can not get any data from 192.168.14.7
I want to get data from 192.168.14.7 no matter
other replicas status.
rs.status()
rs.status()
{
"set" : "LALALA",
"date" : ISODate("2015-02-12T02:49:19Z"),
"myState" : 1,
"members" : [
{
"_id" : 1,
"name" : "172.19.16.109:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"optime" : Timestamp(1423649845, 1),
"optimeDate" : ISODate("2015-02-11T10:17:25Z"),
"lastHeartbeat" : ISODate("2015-02-12T02:49:04Z"),
"lastHeartbeatRecv" : ISODate("2015-02-11T10:31:04Z"),
"pingMs" : 0,
"syncingTo" : "192.168.14.7:27017"
},
{
"_id" : 2,
"name" : "172.19.16.104:27017",
"health" : 1,
"state" : 4,
"stateStr" : "FATAL",
"uptime" : 68294,
"optime" : Timestamp(1423545748, 1),
"optimeDate" : ISODate("2015-02-10T05:22:28Z"),
"lastHeartbeat" : ISODate("2015-02-12T02:49:18Z"),
"lastHeartbeatRecv" : ISODate("2015-02-12T02:49:19Z"),
"pingMs" : 0
},
{
"_id" : 3,
"name" : "192.168.14.7:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 68297,
"optime" : Timestamp(1423704914, 1),
"optimeDate" : ISODate("2015-02-12T01:35:14Z"),
"self" : true
}
],
"ok" : 1
}

Related

I want to know how to connect my mongo client to my replication set

I realized that the data oplog.rs is different from my client oplog.rs. Is the connection supposed to automatic once I initiated the replication set ? Or is there a step I missed in the process.
I was following this tutorial http://www.acemyskills.com/replica-sets-in-mongodb/
config = {
_id:"acemyskillsrepsets",
members: [
{_id: 0, host: "localhost:27017"},
{_id: 1, host: "localhost:27018"},
{_id: 2, host: "localhost:27019"}
]
};acemyskillsrepsets:PRIMARY> rs.status()
{
"set" : "acemyskillsrepsets",
"date" : ISODate("2016-07-24T01:35:16.869Z"),
"myState" : 1,
"term" : NumberLong(19),
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 0,
"name" : "localhost:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 63,
"optime" : {
"ts" : Timestamp(1469324080, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2016-07-24T01:34:40Z"),
"electionTime" : Timestamp(1469324079, 1),
"electionDate" : ISODate("2016-07-24T01:34:39Z"),
"configVersion" : 2,
"self" : true
},
{
"_id" : 1,
"name" : "localhost:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 25,
"optime" : {
"ts" : Timestamp(1469324080, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2016-07-24T01:34:40Z"),
"lastHeartbeat" : ISODate("2016-07-24T01:35:15.260Z"),
"lastHeartbeatRecv" : ISODate("2016-07-24T01:35:16.014Z"
),
"pingMs" : NumberLong(0),
"syncingTo" : "localhost:27019",
"configVersion" : 2
},
{
"_id" : 2,
"name" : "localhost:27019",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 42,
"optime" : {
"ts" : Timestamp(1469324080, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2016-07-24T01:34:40Z"),
"lastHeartbeat" : ISODate("2016-07-24T01:35:16.020Z"),
"lastHeartbeatRecv" : ISODate("2016-07-24T01:35:15.622Z"
),
"pingMs" : NumberLong(0),
"syncingTo" : "localhost:27017",
"configVersion" : 2
}
],
"ok" : 1
}

MongoDb primary replica becomes secondary if secondary fails

I have 2 mongo replicas. One is primary and the second one is secondary. If I stop the secondary replica, the primary one becomes secondary and I lost the write permissions.
Here are my configs:
replica:PRIMARY> rs.status()
{
"set" : "replica",
"date" : ISODate("2016-02-26T11:27:50.140Z"),
"myState" : 1,
"members" : [
{
"_id" : 1,
"name" : "192.168.5.44:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 80,
"optime" : Timestamp(1456486069, 1),
"optimeDate" : ISODate("2016-02-26T11:27:49Z"),
"electionTime" : Timestamp(1456485992, 1),
"electionDate" : ISODate("2016-02-26T11:26:32Z"),
"configVersion" : 82935,
"self" : true
},
{
"_id" : 2,
"name" : "192.168.5.34:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 79,
"optime" : Timestamp(1456486067, 2),
"optimeDate" : ISODate("2016-02-26T11:27:47Z"),
"lastHeartbeat" : ISODate("2016-02-26T11:27:48.874Z"),
"lastHeartbeatRecv" : ISODate("2016-02-26T11:27:48.930Z"),
"pingMs" : 1,
"syncingTo" : "192.168.5.44:27017",
"configVersion" : 82935
}
],
"ok" : 1
}
replica:SECONDARY> rs.status()
{
"set" : "replica",
"date" : ISODate("2016-02-26T11:21:38.574Z"),
"myState" : 2,
"syncingTo" : "192.168.5.44:27017",
"members" : [
{
"_id" : 1,
"name" : "192.168.5.44:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1306,
"optime" : Timestamp(1456485695, 3),
"optimeDate" : ISODate("2016-02-26T11:21:35Z"),
"lastHeartbeat" : ISODate("2016-02-26T11:21:36.602Z"),
"lastHeartbeatRecv" : ISODate("2016-02-26T11:21:37.412Z"),
"pingMs" : 0,
"electionTime" : Timestamp(1456484393, 1),
"electionDate" : ISODate("2016-02-26T10:59:53Z"),
"configVersion" : 82935
},
{
"_id" : 2,
"name" : "192.168.5.34:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 1306,
"optime" : Timestamp(1456485697, 1),
"optimeDate" : ISODate("2016-02-26T11:21:37Z"),
"syncingTo" : "192.168.5.44:27017",
"configVersion" : 82935,
"self" : true
}
],
"ok" : 1
}
replica:SECONDARY> cfg = rs.conf()
{
"_id" : "replica",
"version" : 82935,
"members" : [
{
"_id" : 1,
"host" : "192.168.5.44:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 2,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
},
{
"_id" : 2,
"host" : "192.168.5.34:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : 10,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
}
}
}
Even if I shutdown the primary replica, the secondary won't become primary.
If you need any other details notify me.
Thank you for help!
MongoDB need the majority of members to accomplish an election. In a 2 members replication set, both member must be available to reach the majority. If one is down, another cannot be elected to primary.

How to know when replica set initial sync completed

From the MongoDB documentation:
At this point, the mongod will perform an initial sync. The length of the initial sync process depends on the size of the database and network connection between members of the replica set.
Source
My question in very simple, how can I know when it's safe to stepDown the PRIMARY member of my replica set? I just upgrated my secondary to use WiredTiger.
Output of rs.status():
{
"set" : "m0",
"date" : ISODate("2015-03-18T09:59:21.486Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "example.com",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 4642,
"optime" : Timestamp(1426672500, 1),
"optimeDate" : ISODate("2015-03-18T09:55:00Z"),
"electionTime" : Timestamp(1426668268, 1),
"electionDate" : ISODate("2015-03-18T08:44:28Z"),
"configVersion" : 7,
"self" : true
},
{
"_id" : 1,
"name" : "example.com"",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 1309,
"optime" : Timestamp(1426672500, 1),
"optimeDate" : ISODate("2015-03-18T09:55:00Z"),
"lastHeartbeat" : ISODate("2015-03-18T09:59:20.968Z"),
"lastHeartbeatRecv" : ISODate("2015-03-18T09:59:20.762Z"),
"pingMs" : 0,
"syncingTo" : "example.com"",
"configVersion" : 7
},
{
"_id" : 2,
"name" : "example.com"",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 4640,
"lastHeartbeat" : ISODate("2015-03-18T09:59:21.009Z"),
"lastHeartbeatRecv" : ISODate("2015-03-18T09:59:21.238Z"),
"pingMs" : 59,
"configVersion" : 7
}
],
"ok" : 1
}
Found the solution:
While performing the inital sync, the status is RECOVERING

How come that primary member behind secondary in one mongodb replica set

On our production environment we have strange behavior of a mongo replica, our primary is always behind secondaries.
rs.status():
{
"set" : "repl01",
"date" : ISODate("2014-02-20T11:11:28.000Z"),
"myState" : 2,
"syncingTo" : "prodsrv04:27018",
"members" : [
{
"_id" : 0,
"name" : "prodsrv02:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6271,
"optime" : Timestamp(1392894670, 97),
"optimeDate" : ISODate("2014-02-20T11:11:10.000Z"),
"self" : true
},
{
"_id" : 1,
"name" : "prodsrv03:27018",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 6270,
"optime" : Timestamp(1392894670, 68),
"optimeDate" : ISODate("2014-02-20T11:11:10.000Z"),
"lastHeartbeat" : ISODate("2014-02-20T11:11:28.000Z"),
"lastHeartbeatRecv" : ISODate("1970-01-01T00:00:00.000Z"),
"pingMs" : 2
},
{
"_id" : 2,
"name" : "prodsrv04:27018",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 6270,
"optime" : Timestamp(1392894669, 113),
"optimeDate" : ISODate("2014-02-20T11:11:09.000Z"),
"lastHeartbeat" : ISODate("2014-02-20T11:11:27.000Z"),
"lastHeartbeatRecv" : ISODate("2014-02-20T11:11:28.000Z"),
"pingMs" : 6
}
],
"ok" : 1
}
Master optime: Timestamp(1392894669, 113);
Slave optime : Timestamp(1392894670, 68);
How come?

MongoDB Primary replica set member syncing to secondary

I have a replica set having three members, with host0:27100 as a primary member. Recently i changed the configuration and made the host2:27102 as primary member. Followed these docs.
After changing the configuratio, the rs.status() output says that the host1:27101 is "syncingTo" : "host2:27102" which is intended.
But the output for new primary host2:27102 shows it is "syncingTo" : "host0:27100" which is the previous primary member, and changed into secondary.
I cannot understand why its syncing to the secondary member. Is it a normal behavior?
s0:SECONDARY> rs.status()
{
"set" : "s0",
"date" : ISODate("2013-09-25T12:31:42Z"),
"myState" : 2,
"syncingTo" : "host2:27102",
"members" : [
{
"_id" : 0,
"name" : "host0:27100",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 428068,
"optime" : Timestamp(1380112272, 1),
"optimeDate" : ISODate("2013-09-25T12:31:12Z"),
"self" : true
},
{
"_id" : 1,
"name" : "host1:27101",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 397,
"optime" : Timestamp(1380112272, 1),
"optimeDate" : ISODate("2013-09-25T12:31:12Z"),
"lastHeartbeat" : ISODate("2013-09-25T12:31:42Z"),
"lastHeartbeatRecv" : ISODate("2013-09-25T12:31:41Z"),
"pingMs" : 10,
"syncingTo" : "host2:27102"
},
{
"_id" : 2,
"name" : "host2:27102",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 397,
"optime" : Timestamp(1380112272, 1),
"optimeDate" : ISODate("2013-09-25T12:31:12Z"),
"lastHeartbeat" : ISODate("2013-09-25T12:31:42Z"),
"lastHeartbeatRecv" : ISODate("2013-09-25T12:31:41Z"),
"pingMs" : 2,
"syncingTo" : "host0:27100"
}
],
"ok" : 1
}
This is a known issue. There is an open ticket about rs.status() showing the primary as syncingTo when run from a secondary if the current primary was a secondary in the past ( SERVER-9989 ). Fix verion is 2.5.1