mongo replica set - data not replicating - mongodb

I setup a test replcia set using mongo v3.2 on a single node and the rs.status() command in the mongo shell seems to indicate that the replica set is established (output below).
However, when I insert data into the primary, I can see that it gets inserted into the primary but I cannot seem to run the find operation on any of the secondary processes (message given below)
message on secondary
Error: error: { "ok" : 0, "errmsg" : "not master and slaveOk=false", "code" : 13435 }
rs.status()
{
"set" : "set0",
"date" : ISODate("2016-07-21T19:53:41.882Z"),
"myState" : 1,
"term" : NumberLong(1),
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 0,
"name" : "127.0.0.1:27049",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 2388,
"optime" : {
"ts" : Timestamp(1469130790, 2),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2016-07-21T19:53:10Z"),
"electionTime" : Timestamp(1469128700, 1),
"electionDate" : ISODate("2016-07-21T19:18:20Z"),
"configVersion" : 1,
"self" : true
},
{
"_id" : 1,
"name" : "127.0.0.1:27050",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 2132,
"optime" : {
"ts" : Timestamp(1469130790, 2),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2016-07-21T19:53:10Z"),
"lastHeartbeat" : ISODate("2016-07-21T19:53:40.964Z"),
"lastHeartbeatRecv" : ISODate("2016-07-21T19:53:40.823Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "127.0.0.1:27049",
"configVersion" : 1
},
{
"_id" : 2,
"name" : "127.0.0.1:27051",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 2132,
"optime" : {
"ts" : Timestamp(1469130790, 2),
"t" : NumberLong(1)
},
"optimeDate" : ISODate("2016-07-21T19:53:10Z"),
"lastHeartbeat" : ISODate("2016-07-21T19:53:40.963Z"),
"lastHeartbeatRecv" : ISODate("2016-07-21T19:53:40.823Z"),
"pingMs" : NumberLong(0),
"syncingTo" : "127.0.0.1:27049",
"configVersion" : 1
}
],
"ok" : 1
}
output from rs.conf() on primary
{
"_id" : "set0",
"version" : 1,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 0,
"host" : "127.0.0.1:27049",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "127.0.0.1:27050",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "127.0.0.1:27051",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("57911ff0bd131f1aeb2ef878")
}
}

After I ran the following command on the secondary process, I was able to read the data from the secondary:
rs.slaveOk()
or
db.getMongo().setSlaveOk()
After running the following command from the Mongo shell, I was able to get the required documents from Mongo using the find command.

Related

Mongo ReplicaSet connection timeout in compass after enabling authentication

SUMMARY:
Recently worked on the replica set and it was successfully able to connect from compass using the connection string below
mongodb://10.47.72.28:27017,10.47.72.38:27017,10.47.72.39:27017?replicaSet=mongo-cluster.
ISSUE:
Post enabling the authentication, I cannot connect as cluster (connect ETIMEDOUT ) but can connect individually . Below is the connection string and authentication entry.
mongodb://admin:Admin%40123#10.47.72.28:27017,10.47.72.38:27017,10.47.72.39:27017/?authSource=admin&replicaSet=mongo-cluster&readPreference=primaryPreferred&appname=MongoDB%20Compass&ssl=false
Strangely I can connect to all the nodes individually without opting replicaSet in compass.
Can someone please advise on what could be happening here.
monogd.cnf values
net:
port: 27017
bindIp: 0.0.0.0
security:
authorization: enabled
keyFile: /etc/mongo/mongodb.key
replication:
replSetName: mongo-cluster
enableMajorityReadConcern: true*
Below are the logs
{"t":{"$date":"2021-12-05T18:27:19.258+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"13.126.136.66:64175","connectionId":202,"connectionCount":15}}
{"t":{"$date":"2021-12-05T18:27:19.260+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn202","msg":"client metadata","attr":{"remote":"13.126.136.66:64175","client":"conn202","doc":{"driver":{"name":"nodejs","version":"4.0.0-beta.6"},"os":{"type":"Windows_NT","name":"win32","architecture":"x64","version":"10.0.19044"},"platform":"Node.js v12.4.0, LE (unified)|Node.js v12.4.0, LE (unified)","application":{"name":"MongoDB Compass"}}}}
{"t":{"$date":"2021-12-05T18:27:19.306+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn202","msg":"Connection ended","attr":{"remote":"13.126.136.66:64175","connectionId":202,"connectionCount":14}}
ReplicaSet Config.
rs.config()
{
"_id" : "mongo-cluster",
"version" : 3,
"term" : 19,
"protocolVersion" : NumberLong(1),
"writeConcernMajorityJournalDefault" : true,
"members" : [
{
"_id" : 0,
"host" : "174.1.0.6:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "174.1.0.7:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "174.1.0.8:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : -1,
"catchUpTakeoverDelayMillis" : 30000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("618a54b988498045e01e33ee")
}
}
mongo-cluster:PRIMARY>
ReplicaSet Status
mongo-cluster:PRIMARY> rs.status()
{
"set" : "mongo-cluster",
"date" : ISODate("2021-12-06T11:00:23.263Z"),
"myState" : 1,
"term" : NumberLong(19),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"lastCommittedWallTime" : ISODate("2021-12-06T11:00:17.347Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"readConcernMajorityWallTime" : ISODate("2021-12-06T11:00:17.347Z"),
"appliedOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"durableOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"lastAppliedWallTime" : ISODate("2021-12-06T11:00:17.347Z"),
"lastDurableWallTime" : ISODate("2021-12-06T11:00:17.347Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1638788417, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "stepUpRequestSkipDryRun",
"lastElectionDate" : ISODate("2021-12-02T07:02:16.093Z"),
"electionTerm" : NumberLong(19),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(1638428533, 1),
"t" : NumberLong(18)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1638428533, 1),
"t" : NumberLong(18)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 1,
"electionTimeoutMillis" : NumberLong(10000),
"priorPrimaryMemberId" : 1,
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2021-12-02T07:02:16.106Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2021-12-02T07:02:18.107Z")
},
"members" : [
{
"_id" : 0,
"name" : "174.1.0.6:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 427920,
"optime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2021-12-06T11:00:17Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1638428536, 1),
"electionDate" : ISODate("2021-12-02T07:02:16Z"),
"configVersion" : 3,
"configTerm" : 19,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "174.1.0.7:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 359881,
"optime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDurable" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2021-12-06T11:00:17Z"),
"optimeDurableDate" : ISODate("2021-12-06T11:00:17Z"),
"lastHeartbeat" : ISODate("2021-12-06T11:00:21.530Z"),
"lastHeartbeatRecv" : ISODate("2021-12-06T11:00:21.530Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "174.1.0.6:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3,
"configTerm" : 19
},
{
"_id" : 2,
"name" : "174.1.0.8:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 359879,
"optime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDurable" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2021-12-06T11:00:17Z"),
"optimeDurableDate" : ISODate("2021-12-06T11:00:17Z"),
"lastHeartbeat" : ISODate("2021-12-06T11:00:21.534Z"),
"lastHeartbeatRecv" : ISODate("2021-12-06T11:00:21.530Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "174.1.0.7:27017",
"syncSourceId" : 1,
"infoMessage" : "",
"configVersion" : 3,
"configTerm" : 19
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1638788417, 1),
"signature" : {
"hash" : BinData(0,"UeCHJX2kYRT5awReraDGQBMt13E="),
"keyId" : NumberLong("7028523322010763269")
}
},
"operationTime" : Timestamp(1638788417, 1)
}
mongo-cluster:PRIMARY>

Multiple Primary node in Mongodb replicaset

Sometimes our Mongodb replicaset has multiple primary nodes!
in this case mongo-b node is our real primary and mongo-c node is not responding and just have primary role in fake.
Number of nodes in replicaset: 4 (3 normal + 1 delay node without voting).
network partition and access checked and there is no partion.
we have had this problem several times since updating the cluster from version 4.2.5 to version 4.4.5.
each time we fix the problem by cleaning datadir on fake node and restarting it.
replica conf:
{ [25/599]
"_id" : 0,
"host" : "mongo-a:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 3,
"host" : "mongo-h:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : true,
"priority" : 0,
"tags" : {
},
"slaveDelay" : NumberLong(900),
"votes" : 0
},
{
"_id" : 5,
"host" : "mongo-c:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 6,
"host" : "mongo-b:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
rs.status() result (partial):
{ [1881/1930]
"_id" : 5,
"name" : "mongo-c:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 3435,
"optime" : {
"ts" : Timestamp(1637525581, 2),
"t" : NumberLong(87)
},
"optimeDurable" : {
"ts" : Timestamp(1637525443, 1),
"t" : NumberLong(87)
},
"optimeDate" : ISODate("2021-11-21T20:13:01Z"),
"optimeDurableDate" : ISODate("2021-11-21T20:10:43Z"),
"lastHeartbeat" : ISODate("2021-11-24T07:08:32.108Z"),
"lastHeartbeatRecv" : ISODate("2021-11-24T07:08:34.867Z"),
"pingMs" : NumberLong(2355),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1637524307, 1),
"electionDate" : ISODate("2021-11-21T19:51:47Z"),
"configVersion" : 23,
"configTerm" : 87
},
{
"_id" : 6,
"name" : "mongo-b:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 7995003,
"optime" : {
"ts" : Timestamp(1637737714, 24),
"t" : NumberLong(88)
},
"optimeDate" : ISODate("2021-11-24T07:08:34Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1637525581, 1),
"electionDate" : ISODate("2021-11-21T20:13:01Z"),
"configVersion" : 23,
"configTerm" : 88,
"self" : true,
"lastHeartbeatMessage" : ""
}

pymongo unable to connect to primary

I'm trying to find_one by connecting to my replica set's primary node.
MongoClient(hostname, replicaSet="rs0", read_preference=ReadPreference.PRIMARY)
But it results in an error:
ServerSelectionTimeoutError: No replica set members match selector
"Primary()"
I'm able to successfully read using SECONDARY_PREFERRED. I also tried connecting using MongoReplicaSetClient with no success. I'm guessing this due to bad configuration, but what should I be looking for?
rs.status:
rs0:PRIMARY> rs.conf()
{
"_id" : "rs0",
"version" : 111313,
"protocolVersion" : NumberLong(1),
"members" : [
{
"_id" : 1,
"host" : "ANDROMEDA:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "mongo02.db.com:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 0.5,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 3,
"host" : "mongo03.db.com:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 0.5,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
}
}
}
rs0:SECONDARY> rs.status()
{
"set" : "rs0",
"date" : ISODate("2016-08-04T08:58:02.293Z"),
"myState" : 2,
"term" : NumberLong(90),
"syncingTo" : "mongo03.db.com:27017",
"heartbeatIntervalMillis" : NumberLong(2000),
"members" : [
{
"_id" : 1,
"name" : "ANDROMEDA:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 2503,
"optime" : {
"ts" : Timestamp(1470299746, 1),
"t" : NumberLong(90)
},
"optimeDate" : ISODate("2016-08-04T08:35:46Z"),
"lastHeartbeat" : ISODate("2016-08-04T08:58:01.109Z"),
"lastHeartbeatRecv" : ISODate("2016-08-04T08:58:01.803Z"),
"pingMs" : NumberLong(28),
"electionTime" : Timestamp(1469600522, 1),
"electionDate" : ISODate("2016-07-27T06:22:02Z"),
"configVersion" : 111313
},
{
"_id" : 2,
"name" : "mongo02.db.com:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 7604104,
"optime" : {
"ts" : Timestamp(1470299746, 1),
"t" : NumberLong(90)
},
"optimeDate" : ISODate("2016-08-04T08:35:46Z"),
"syncingTo" : "mongo03.db.com:27017",
"configVersion" : 111313,
"self" : true
},
{
"_id" : 3,
"name" : "mongo03.db.com:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 2503,
"optime" : {
"ts" : Timestamp(1470299746, 1),
"t" : NumberLong(90)
},
"optimeDate" : ISODate("2016-08-04T08:35:46Z"),
"lastHeartbeat" : ISODate("2016-08-04T08:58:01.948Z"),
"lastHeartbeatRecv" : ISODate("2016-08-04T08:58:01.802Z"),
"pingMs" : NumberLong(28),
"syncingTo" : "ANDROMEDA:27017",
"configVersion" : 111313
}
],
"ok" : 1
}
In cases with an error message similar to ServerSelectionTimeoutError: No replica set members match selector "Primary()" and where replica set status function output, rs.status(), shows the Primary member using a non-FQDN (ANDROMEDA:27017 as in this case) then it is highly likely the application is unable to resolve the Primary host on the network.
You can easily check this using the following commands from the command line of the host running your application:
$ dig ANDROMEDA
$ ping ANDROMEDA
$ mongo --host ANDROMEDA:27017
If you don't have the Mongo Shell installed on the host running your application you can use Telnet instead.
$ telnet ANDROMEDA 27017
These outputs will allow you to check connectivity between your application host and your mongod host to determine if this is causing the problem.
I use this connection string:
MongoClient('mongodb://mongo01.db.com:27017,mongo02.db.com:27017,mongo03.db.com:27017/mydb',replicaSet="rs0", read_preference=ReadPreference.PRIMARY)

MongoDb primary replica becomes secondary if secondary fails

I have 2 mongo replicas. One is primary and the second one is secondary. If I stop the secondary replica, the primary one becomes secondary and I lost the write permissions.
Here are my configs:
replica:PRIMARY> rs.status()
{
"set" : "replica",
"date" : ISODate("2016-02-26T11:27:50.140Z"),
"myState" : 1,
"members" : [
{
"_id" : 1,
"name" : "192.168.5.44:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 80,
"optime" : Timestamp(1456486069, 1),
"optimeDate" : ISODate("2016-02-26T11:27:49Z"),
"electionTime" : Timestamp(1456485992, 1),
"electionDate" : ISODate("2016-02-26T11:26:32Z"),
"configVersion" : 82935,
"self" : true
},
{
"_id" : 2,
"name" : "192.168.5.34:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 79,
"optime" : Timestamp(1456486067, 2),
"optimeDate" : ISODate("2016-02-26T11:27:47Z"),
"lastHeartbeat" : ISODate("2016-02-26T11:27:48.874Z"),
"lastHeartbeatRecv" : ISODate("2016-02-26T11:27:48.930Z"),
"pingMs" : 1,
"syncingTo" : "192.168.5.44:27017",
"configVersion" : 82935
}
],
"ok" : 1
}
replica:SECONDARY> rs.status()
{
"set" : "replica",
"date" : ISODate("2016-02-26T11:21:38.574Z"),
"myState" : 2,
"syncingTo" : "192.168.5.44:27017",
"members" : [
{
"_id" : 1,
"name" : "192.168.5.44:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1306,
"optime" : Timestamp(1456485695, 3),
"optimeDate" : ISODate("2016-02-26T11:21:35Z"),
"lastHeartbeat" : ISODate("2016-02-26T11:21:36.602Z"),
"lastHeartbeatRecv" : ISODate("2016-02-26T11:21:37.412Z"),
"pingMs" : 0,
"electionTime" : Timestamp(1456484393, 1),
"electionDate" : ISODate("2016-02-26T10:59:53Z"),
"configVersion" : 82935
},
{
"_id" : 2,
"name" : "192.168.5.34:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 1306,
"optime" : Timestamp(1456485697, 1),
"optimeDate" : ISODate("2016-02-26T11:21:37Z"),
"syncingTo" : "192.168.5.44:27017",
"configVersion" : 82935,
"self" : true
}
],
"ok" : 1
}
replica:SECONDARY> cfg = rs.conf()
{
"_id" : "replica",
"version" : 82935,
"members" : [
{
"_id" : 1,
"host" : "192.168.5.44:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 2,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
},
{
"_id" : 2,
"host" : "192.168.5.34:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : 10,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
}
}
}
Even if I shutdown the primary replica, the secondary won't become primary.
If you need any other details notify me.
Thank you for help!
MongoDB need the majority of members to accomplish an election. In a 2 members replication set, both member must be available to reach the majority. If one is down, another cannot be elected to primary.

MongoDB 3.0.1 Replication Setup

I am using MongoDB 3.0.1 version. i am try to setup for mongodb replicaiton in
our machine. i have use three mongodb in septate machine for replication.
machine1 - master
machine2 - slave
machine3 - slave
I refer this url http://docs.mongodb.org/manual/tutorial/deploy-replica-set/
I have configure for every machine in mongodb.conf file
replSet = rs1
fork = true
and i added two members in master machine using this command
rs.add(192.168.1.2)
rs.add(192.168.1.3)
but i insert a document in master machine but not replicate other two slave machine. i did check slave machine throw the following error
> show dbs
2015-05-18T12:43:22.020+0530 E QUERY Error: listDatabases failed:{ "note" : "from execCommand", "ok" : 0, "errmsg" : "not master" }
at Error (<anonymous>)
at Mongo.getDBs (src/mongo/shell/mongo.js:47:15)
at shellHelper.show (src/mongo/shell/utils.js:630:33)
at shellHelper (src/mongo/shell/utils.js:524:36)
at (shellhelp2):1:1 at src/mongo/shell/mongo.js:47
>
> rs.conf()
2015-05-18T12:43:38.692+0530 E QUERY Error: Could not retrieve replica set config: {
"info" : "run rs.initiate(...) if not yet done for the set",
"ok" : 0,
"errmsg" : "no replset config has been received",
"code" : 94
}
at Function.rs.conf (src/mongo/shell/utils.js:1011:11)
at (shell):1:4 at src/mongo/shell/utils.js:1011
>
Please help me to solve the problem. thanks & Advance.
EDIT:
rs1:PRIMARY> rs.conf()
{
"_id" : "rs1",
"version" : 4,
"members" : [
{
"_id" : 0,
"host" : "analyzer-xubuntu:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
},
{
"_id" : 1,
"host" : "192.168.1.31:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 0.75,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
},
{
"_id" : 2,
"host" : "192.168.1.33:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 0.5,
"tags" : {
},
"slaveDelay" : 0,
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatTimeoutSecs" : 10,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
}
}
}
rs1:PRIMARY> rs.status()
{
"set" : "rs1",
"date" : ISODate("2015-05-18T09:07:31.767Z"),
"myState" : 1,
"members" : [
{
"_id" : 0,
"name" : "analyzer-xubuntu:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 9236,
"optime" : Timestamp(1431939509, 2),
"optimeDate" : ISODate("2015-05-18T08:58:29Z"),
"electionTime" : Timestamp(1431931054, 2),
"electionDate" : ISODate("2015-05-18T06:37:34Z"),
"configVersion" : 4,
"self" : true
},
{
"_id" : 1,
"name" : "192.168.1.31:27017",
"health" : 1,
"state" : 0,
"stateStr" : "STARTUP",
"uptime" : 8953,
"optime" : Timestamp(0, 0),
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2015-05-18T09:07:29.831Z"),
"lastHeartbeatRecv" : ISODate("1970-01-01T00:00:00Z"),
"pingMs" : 1,
"configVersion" : -2
},
{
"_id" : 2,
"name" : "192.168.1.33:27017",
"health" : 1,
"state" : 0,
"stateStr" : "STARTUP",
"uptime" : 8946,
"optime" : Timestamp(0, 0),
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2015-05-18T09:07:30.533Z"),
"lastHeartbeatRecv" : ISODate("1970-01-01T00:00:00Z"),
"pingMs" : 1,
"configVersion" : -2
}
],
"ok" : 1
}
Let us go step by step.
Your MongoDB config is ok, the rest of work may be done in the shell. I will use machine names instead of IPs.
First connect to machine1, and run the following there:
> conf = {
_id: "rs1",
members:
[
{_id : 0, host : "machine1:27017"},
{_id : 1, host : "machine2:27017"},
{_id : 2, host : "machine3:27017"}
]
}
> rs.initiate(conf)
Then just run rs.slaveOk() on the secondaries. The secondaries will start replicating, and you will be able to query them and see your data, inserted from the primary.