I've recently followed the aws quick start cloudfront deployment for a 3 node mongo replica set.
I've set up an NLB so they are accessible from my local network and am trying to connect via mongo shell.
Trying
mongo "mongodb://<username:<password>#<url1>,<url2>,<url3>"
connects me to the database but doesn't always connect to primary so must be the incorrect way of connecting
Trying
mongo "mongodb://<username:<password>#<url1>,<url2>,<url3>/?replicaSet=s0"
I get the error
Error: Could not find host matching read preference { mode: "nearest" } for set s0
The /etc/mongo.conf is
net:
port: 27017
bindIpAll: true
systemLog:
destination: file
logAppend: true
path: /log/mongod.log
storage:
dbPath: /data
journal:
enabled: true
security:
authorization: enabled
keyFile: /mongo_auth/mongodb.key
processManagement:
fork: true
pidFilePath: /var/run/mongodb/mongod.pid
replication:
replSetName: s0
And the rs.conf() is
{
"_id" : "s0",
"version" : 1,
"protocolVersion" : NumberLong(1),
"writeConcernMajorityJournalDefault" : true,
"members" : [
{
"_id" : 1,
"host" : "10.0.7.79:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 10,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "10.0.38.224:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 5,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 3,
"host" : "10.0.80.111:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 5,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : -1,
"catchUpTakeoverDelayMillis" : 30000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("myid")
}
}
The link to the quickstart guide is here
Any advice on what I might be doing wrong would be appreciated
rs.status() is
{
"set" : "s0",
"date" : ISODate("2022-01-12T19:49:16.066Z"),
"myState" : 1,
"term" : NumberLong(6),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"lastCommittedWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"readConcernMajorityWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"appliedOpTime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"durableOpTime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"lastAppliedWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"lastDurableWallTime" : ISODate("2022-01-12T19:49:08.522Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1642016908, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "priorityTakeover",
"lastElectionDate" : ISODate("2022-01-12T13:46:38.130Z"),
"electionTerm" : NumberLong(6),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(1641995191, 1),
"t" : NumberLong(5)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1641995191, 1),
"t" : NumberLong(5)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 10,
"electionTimeoutMillis" : NumberLong(10000),
"priorPrimaryMemberId" : 2,
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2022-01-12T13:46:38.137Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-01-12T13:46:40.143Z")
},
"members" : [
{
"_id" : 1,
"name" : "10.0.7.79:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 21770,
"optime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"optimeDate" : ISODate("2022-01-12T19:49:08Z"),
"lastAppliedWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"lastDurableWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1641995198, 1),
"electionDate" : ISODate("2022-01-12T13:46:38Z"),
"configVersion" : 1,
"configTerm" : -1,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 2,
"name" : "10.0.38.224:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 21768,
"optime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"optimeDurable" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"optimeDate" : ISODate("2022-01-12T19:49:08Z"),
"optimeDurableDate" : ISODate("2022-01-12T19:49:08Z"),
"lastAppliedWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"lastDurableWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"lastHeartbeat" : ISODate("2022-01-12T19:49:14.148Z"),
"lastHeartbeatRecv" : ISODate("2022-01-12T19:49:14.507Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "10.0.7.79:27017",
"syncSourceId" : 1,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : -1
},
{
"_id" : 3,
"name" : "10.0.80.111:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 21768,
"optime" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"optimeDurable" : {
"ts" : Timestamp(1642016948, 1),
"t" : NumberLong(6)
},
"optimeDate" : ISODate("2022-01-12T19:49:08Z"),
"optimeDurableDate" : ISODate("2022-01-12T19:49:08Z"),
"lastAppliedWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"lastDurableWallTime" : ISODate("2022-01-12T19:49:08.522Z"),
"lastHeartbeat" : ISODate("2022-01-12T19:49:14.145Z"),
"lastHeartbeatRecv" : ISODate("2022-01-12T19:49:14.952Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "10.0.38.224:27017",
"syncSourceId" : 2,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : -1
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1642016948, 1),
"signature" : {
"hash" : BinData(0,"<signature>"),
"keyId" : NumberLong("<long>")
}
},
"operationTime" : Timestamp(1642016948, 1)
}
Related
I have 3 node mongo with 1 primary and 2 secondary. One of my secondary node is down but the primary and the other secondary node is still up. The problem is now i cannot write to the primary. what is the reason as primary is still up but read operations are working. I am using connection string like
"mongodb://custom-pvc-mongodb-0.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017/admin"
the below is my rs.status()
{
"set" : "custom-pvc-mongodb",
"date" : ISODate("2022-12-07T12:04:12.543Z"),
"myState" : 1,
"term" : NumberLong(3),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1670301216, 1),
"t" : NumberLong(3)
},
"lastCommittedWallTime" : ISODate("2022-12-06T04:33:36.252Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1670301216, 1),
"t" : NumberLong(3)
},
"appliedOpTime" : {
"ts" : Timestamp(1670414650, 1),
"t" : NumberLong(3)
},
"durableOpTime" : {
"ts" : Timestamp(1670414650, 1),
"t" : NumberLong(3)
},
"lastAppliedWallTime" : ISODate("2022-12-07T12:04:10.025Z"),
"lastDurableWallTime" : ISODate("2022-12-07T12:04:10.025Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1670301216, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2022-11-25T07:35:27.387Z"),
"electionTerm" : NumberLong(3),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1669361650, 1),
"t" : NumberLong(1)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 1,
"electionTimeoutMillis" : NumberLong(10000),
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2022-11-25T07:35:27.410Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-11-25T07:35:28.101Z")
},
"members" : [
{
"_id" : 0,
"name" : "custom-pvc-mongodb-0.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 1052969,
"optime" : {
"ts" : Timestamp(1670414650, 1),
"t" : NumberLong(3)
},
"optimeDate" : ISODate("2022-12-07T12:04:10Z"),
"lastAppliedWallTime" : ISODate("2022-12-07T12:04:10.025Z"),
"lastDurableWallTime" : ISODate("2022-12-07T12:04:10.025Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1669361727, 1),
"electionDate" : ISODate("2022-11-25T07:35:27Z"),
"configVersion" : 1,
"configTerm" : 3,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "custom-pvc-mongodb-1.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"optime" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDurable" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"),
"lastAppliedWallTime" : ISODate("2022-12-06T04:33:36.252Z"),
"lastDurableWallTime" : ISODate("2022-12-06T04:33:36.252Z"),
"lastHeartbeat" : ISODate("2022-12-07T12:04:11.496Z"),
"lastHeartbeatRecv" : ISODate("2022-12-06T04:33:41.424Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "Error connecting to custom-pvc-mongodb-1.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017 :: caused by :: Could not find address for custom-pvc-mongodb-1.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017: SocketException: Host not found (authoritative)",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 3
},
{
"_id" : 2,
"name" : "custom-pvc-mongodb-2.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 1052905,
"optime" : {
"ts" : Timestamp(1670301216, 1),
"t" : NumberLong(3)
},
"optimeDurable" : {
"ts" : Timestamp(1670301216, 1),
"t" : NumberLong(3)
},
"optimeDate" : ISODate("2022-12-06T04:33:36Z"),
"optimeDurableDate" : ISODate("2022-12-06T04:33:36Z"),
"lastAppliedWallTime" : ISODate("2022-12-06T04:33:36.252Z"),
"lastDurableWallTime" : ISODate("2022-12-06T04:33:36.252Z"),
"lastHeartbeat" : ISODate("2022-12-07T12:04:12.099Z"),
"lastHeartbeatRecv" : ISODate("2022-12-06T17:34:23.114Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 1,
"configTerm" : 3
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1670414650, 1),
"signature" : {
"hash" : BinData(0,"kmVIlznkue1ffD6Se8Ztbzc22j0="),
"keyId" : NumberLong("7169853219500195844")
}
},
"operationTime" : Timestamp(1670414650, 1)
}
You need to connect to entire ReplicaSet, not to a single node. Connection string would be this:
mongodb://custom-pvc-mongodb-0.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017,custom-pvc-mongodb-1.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017,custom-pvc-mongodb-2.custom-pvc-mongodb-svc.mongo-3033.svc.cluster.local:27017/admin?replicaSet=custom-pvc-mongodb
At connection it will automatically connect to the PRIMARY member. Switchover to new PRIMARY should be done automatically. You may add &readPreference=primaryPreferred
I have mongodb replicaset installed on my original host (k8s cluster 1) and i have just installed a new mongodb replicaset on the new host (k8s cluster 2).
My goal is to replicate the data from the original host to the new host, so that i can shutdown the old host.
There is a blog post that im trying to follow to achieve this however im having some trouble. (reference: https://mschmitt.org/blog/mongodb-migration-replicaset/)
original_host: aadfad22ca65e4ff09de37179f961d5b-.us-east-2.elb.amazonaws.com:27017
new_host: ab775d626d81742478af2744923e2ec6-.us-east-2.elb.amazonaws.com:27017
ATTEMPT
I tried to step down the new host with rs.stepDown({force:true}). However it fails....
rs.stepDown({force:true})
{
"ok" : 0,
"errmsg" : "No electable secondaries caught up as of 2022-05-24T08:50:24.809+00:00. Please use the replSetStepDown command with the argument {force: true} to force node to step down.",
"code" : 262,
"codeName" : "ExceededTimeLimit",
"$clusterTime" : {
"clusterTime" : Timestamp(1653382215, 1),
"signature" : {
"hash" : BinData(0,"qugjVF4xVS8+MNYlCgkK+0/Jt1o="),
"keyId" : NumberLong("7100922576402120709")
}
},
"operationTime" : Timestamp(1653382211, 1)
}
I tried to add the new_host as a rs member to the original rs using rs.add( { host: "ab775d626d81742478af2744923e2ec6-<redacted>.us-east-2.elb.amazonaws.com:27017", priority: 0, votes: 0 } ) However it fails with replica set IDs do not match
{
"_id" : 2,
"name" : "ab775d626d81742478af2744923e2ec6-<redacted>.us-east-2.elb.amazonaws.com:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"optime" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDurable" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"),
"lastAppliedWallTime" : ISODate("1970-01-01T00:00:00Z"),
"lastDurableWallTime" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2022-05-24T08:52:21.889Z"),
"lastHeartbeatRecv" : ISODate("1970-01-01T00:00:00Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "replica set IDs do not match, ours: 628c90e7c07f5017faff8b75; remote node's: 628b8b76ab0cdc7f9158b23b",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : -1,
"configTerm" : -1
}
original replicaset config
rs0:PRIMARY> rs.status()
{
"set" : "rs0",
"date" : ISODate("2022-05-24T08:45:48.912Z"),
"myState" : 1,
"term" : NumberLong(2),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 1,
"votingMembersCount" : 2,
"writableVotingMembersCount" : 1,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1653381945, 1),
"t" : NumberLong(2)
},
"lastCommittedWallTime" : ISODate("2022-05-24T08:45:45.660Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1653381945, 1),
"t" : NumberLong(2)
},
"appliedOpTime" : {
"ts" : Timestamp(1653381945, 1),
"t" : NumberLong(2)
},
"durableOpTime" : {
"ts" : Timestamp(1653381945, 1),
"t" : NumberLong(2)
},
"lastAppliedWallTime" : ISODate("2022-05-24T08:45:45.660Z"),
"lastDurableWallTime" : ISODate("2022-05-24T08:45:45.660Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1653381945, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2022-05-24T08:01:45.606Z"),
"electionTerm" : NumberLong(2),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1653379303, 15),
"t" : NumberLong(1)
},
"numVotesNeeded" : 1,
"priorityAtElection" : 5,
"electionTimeoutMillis" : NumberLong(10000),
"newTermStartDate" : ISODate("2022-05-24T08:01:45.609Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-05-24T08:01:45.611Z")
},
"members" : [
{
"_id" : 0,
"name" : "aadfad22ca65e4ff09de37179f961d5b-<redacted>.us-east-2.elb.amazonaws.com:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 2644,
"optime" : {
"ts" : Timestamp(1653381945, 1),
"t" : NumberLong(2)
},
"optimeDate" : ISODate("2022-05-24T08:45:45Z"),
"lastAppliedWallTime" : ISODate("2022-05-24T08:45:45.660Z"),
"lastDurableWallTime" : ISODate("2022-05-24T08:45:45.660Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1653379305, 1),
"electionDate" : ISODate("2022-05-24T08:01:45Z"),
"configVersion" : 7,
"configTerm" : 2,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongodb-staging-arbiter-0.mongodb-staging-arbiter-headless.staging.svc.cluster.local:27017",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 2627,
"lastHeartbeat" : ISODate("2022-05-24T08:45:47.161Z"),
"lastHeartbeatRecv" : ISODate("2022-05-24T08:45:47.763Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 7,
"configTerm" : 2
},
{
"_id" : 2,
"name" : "ab775d626d81742478af2744923e2ec6-<redacted>.us-east-2.elb.amazonaws.com:27017",
"health" : 0,
"state" : 8,
"stateStr" : "(not reachable/healthy)",
"uptime" : 0,
"optime" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDurable" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"optimeDate" : ISODate("1970-01-01T00:00:00Z"),
"optimeDurableDate" : ISODate("1970-01-01T00:00:00Z"),
"lastAppliedWallTime" : ISODate("1970-01-01T00:00:00Z"),
"lastDurableWallTime" : ISODate("1970-01-01T00:00:00Z"),
"lastHeartbeat" : ISODate("2022-05-24T08:45:47.280Z"),
"lastHeartbeatRecv" : ISODate("1970-01-01T00:00:00Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "replica set IDs do not match, ours: 628c90e7c07f5017faff8b75; remote node's: 628b8b76ab0cdc7f9158b23b",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : -1,
"configTerm" : -1
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1653381945, 1),
"signature" : {
"hash" : BinData(0,"1GVq5OEAV3YzQmhPHuzQPVwC7+c="),
"keyId" : NumberLong("7101210034268274693")
}
},
"operationTime" : Timestamp(1653381945, 1)
}
new replicaset config
{
"set" : "rs0",
"date" : ISODate("2022-05-24T08:45:52.621Z"),
"myState" : 1,
"term" : NumberLong(6),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 1,
"votingMembersCount" : 2,
"writableVotingMembersCount" : 1,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1653381951, 1),
"t" : NumberLong(6)
},
"lastCommittedWallTime" : ISODate("2022-05-24T08:45:51.410Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1653381951, 1),
"t" : NumberLong(6)
},
"appliedOpTime" : {
"ts" : Timestamp(1653381951, 1),
"t" : NumberLong(6)
},
"durableOpTime" : {
"ts" : Timestamp(1653381951, 1),
"t" : NumberLong(6)
},
"lastAppliedWallTime" : ISODate("2022-05-24T08:45:51.410Z"),
"lastDurableWallTime" : ISODate("2022-05-24T08:45:51.410Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1653381921, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "electionTimeout",
"lastElectionDate" : ISODate("2022-05-24T08:42:31.402Z"),
"electionTerm" : NumberLong(6),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(0, 0),
"t" : NumberLong(-1)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1653381732, 1),
"t" : NumberLong(5)
},
"numVotesNeeded" : 1,
"priorityAtElection" : 5,
"electionTimeoutMillis" : NumberLong(10000),
"newTermStartDate" : ISODate("2022-05-24T08:42:31.405Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2022-05-24T08:42:31.407Z")
},
"members" : [
{
"_id" : 0,
"name" : "ab775d626d81742478af2744923e2ec6-<redacted>.us-east-2.elb.amazonaws.com:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 204,
"optime" : {
"ts" : Timestamp(1653381951, 1),
"t" : NumberLong(6)
},
"optimeDate" : ISODate("2022-05-24T08:45:51Z"),
"lastAppliedWallTime" : ISODate("2022-05-24T08:45:51.410Z"),
"lastDurableWallTime" : ISODate("2022-05-24T08:45:51.410Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1653381751, 1),
"electionDate" : ISODate("2022-05-24T08:42:31Z"),
"configVersion" : 4,
"configTerm" : 6,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "mongodb-prod-arbiter-0.mongodb-prod-arbiter-headless.mongodb.svc.cluster.local:27017",
"health" : 1,
"state" : 7,
"stateStr" : "ARBITER",
"uptime" : 187,
"lastHeartbeat" : ISODate("2022-05-24T08:45:50.898Z"),
"lastHeartbeatRecv" : ISODate("2022-05-24T08:45:52.493Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"configVersion" : 4,
"configTerm" : 6
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1653381951, 1),
"signature" : {
"hash" : BinData(0,"XYfuUbFoIwFfLZx3FqPOAD+CU44="),
"keyId" : NumberLong("7100922576402120709")
}
},
"operationTime" : Timestamp(1653381951, 1)
}
Drop all data from new hosts, i.e. stop mongod, delete all files from dbPath and start again.
Then simply add new hosts as new member, i.e. rs.add("ab775d626d81742478af2744923e2ec6-.us-east-2.elb.amazonaws.com:27017") Once you added them, an initial sync is running. Check sync with rs.status(), it may take some time.
When the new members are in state SECONDARY and rs.printSecondaryReplicationInfo() shows
rs.printSecondaryReplicationInfo()
source: "aadfad22ca65e4ff09de37179f961d5b-<redacted>.us-east-2.elb.amazonaws.com:27017"
syncedTo: Tue May 24 2022 11:23:23 GMT+0200 (CEST)
0 secs (0 hrs) behind the primary
(1-2 sec. behind the primary is also normal) then you can remove the old host from replica set with rs.remove('aadfad22ca65e4ff09de37179f961d5b-.us-east-2.elb.amazonaws.com:27017')
Finally you can stop mongod on old hosts and remove everything from there.
SUMMARY:
Recently worked on the replica set and it was successfully able to connect from compass using the connection string below
mongodb://10.47.72.28:27017,10.47.72.38:27017,10.47.72.39:27017?replicaSet=mongo-cluster.
ISSUE:
Post enabling the authentication, I cannot connect as cluster (connect ETIMEDOUT ) but can connect individually . Below is the connection string and authentication entry.
mongodb://admin:Admin%40123#10.47.72.28:27017,10.47.72.38:27017,10.47.72.39:27017/?authSource=admin&replicaSet=mongo-cluster&readPreference=primaryPreferred&appname=MongoDB%20Compass&ssl=false
Strangely I can connect to all the nodes individually without opting replicaSet in compass.
Can someone please advise on what could be happening here.
monogd.cnf values
net:
port: 27017
bindIp: 0.0.0.0
security:
authorization: enabled
keyFile: /etc/mongo/mongodb.key
replication:
replSetName: mongo-cluster
enableMajorityReadConcern: true*
Below are the logs
{"t":{"$date":"2021-12-05T18:27:19.258+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"13.126.136.66:64175","connectionId":202,"connectionCount":15}}
{"t":{"$date":"2021-12-05T18:27:19.260+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn202","msg":"client metadata","attr":{"remote":"13.126.136.66:64175","client":"conn202","doc":{"driver":{"name":"nodejs","version":"4.0.0-beta.6"},"os":{"type":"Windows_NT","name":"win32","architecture":"x64","version":"10.0.19044"},"platform":"Node.js v12.4.0, LE (unified)|Node.js v12.4.0, LE (unified)","application":{"name":"MongoDB Compass"}}}}
{"t":{"$date":"2021-12-05T18:27:19.306+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn202","msg":"Connection ended","attr":{"remote":"13.126.136.66:64175","connectionId":202,"connectionCount":14}}
ReplicaSet Config.
rs.config()
{
"_id" : "mongo-cluster",
"version" : 3,
"term" : 19,
"protocolVersion" : NumberLong(1),
"writeConcernMajorityJournalDefault" : true,
"members" : [
{
"_id" : 0,
"host" : "174.1.0.6:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 1,
"host" : "174.1.0.7:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 2,
"host" : "174.1.0.8:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
],
"settings" : {
"chainingAllowed" : true,
"heartbeatIntervalMillis" : 2000,
"heartbeatTimeoutSecs" : 10,
"electionTimeoutMillis" : 10000,
"catchUpTimeoutMillis" : -1,
"catchUpTakeoverDelayMillis" : 30000,
"getLastErrorModes" : {
},
"getLastErrorDefaults" : {
"w" : 1,
"wtimeout" : 0
},
"replicaSetId" : ObjectId("618a54b988498045e01e33ee")
}
}
mongo-cluster:PRIMARY>
ReplicaSet Status
mongo-cluster:PRIMARY> rs.status()
{
"set" : "mongo-cluster",
"date" : ISODate("2021-12-06T11:00:23.263Z"),
"myState" : 1,
"term" : NumberLong(19),
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"votingMembersCount" : 3,
"writableVotingMembersCount" : 3,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"lastCommittedWallTime" : ISODate("2021-12-06T11:00:17.347Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"readConcernMajorityWallTime" : ISODate("2021-12-06T11:00:17.347Z"),
"appliedOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"durableOpTime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"lastAppliedWallTime" : ISODate("2021-12-06T11:00:17.347Z"),
"lastDurableWallTime" : ISODate("2021-12-06T11:00:17.347Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1638788417, 1),
"electionCandidateMetrics" : {
"lastElectionReason" : "stepUpRequestSkipDryRun",
"lastElectionDate" : ISODate("2021-12-02T07:02:16.093Z"),
"electionTerm" : NumberLong(19),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(1638428533, 1),
"t" : NumberLong(18)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1638428533, 1),
"t" : NumberLong(18)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 1,
"electionTimeoutMillis" : NumberLong(10000),
"priorPrimaryMemberId" : 1,
"numCatchUpOps" : NumberLong(0),
"newTermStartDate" : ISODate("2021-12-02T07:02:16.106Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2021-12-02T07:02:18.107Z")
},
"members" : [
{
"_id" : 0,
"name" : "174.1.0.6:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 427920,
"optime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2021-12-06T11:00:17Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1638428536, 1),
"electionDate" : ISODate("2021-12-02T07:02:16Z"),
"configVersion" : 3,
"configTerm" : 19,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 1,
"name" : "174.1.0.7:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 359881,
"optime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDurable" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2021-12-06T11:00:17Z"),
"optimeDurableDate" : ISODate("2021-12-06T11:00:17Z"),
"lastHeartbeat" : ISODate("2021-12-06T11:00:21.530Z"),
"lastHeartbeatRecv" : ISODate("2021-12-06T11:00:21.530Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "174.1.0.6:27017",
"syncSourceId" : 0,
"infoMessage" : "",
"configVersion" : 3,
"configTerm" : 19
},
{
"_id" : 2,
"name" : "174.1.0.8:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 359879,
"optime" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDurable" : {
"ts" : Timestamp(1638788417, 1),
"t" : NumberLong(19)
},
"optimeDate" : ISODate("2021-12-06T11:00:17Z"),
"optimeDurableDate" : ISODate("2021-12-06T11:00:17Z"),
"lastHeartbeat" : ISODate("2021-12-06T11:00:21.534Z"),
"lastHeartbeatRecv" : ISODate("2021-12-06T11:00:21.530Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "174.1.0.7:27017",
"syncSourceId" : 1,
"infoMessage" : "",
"configVersion" : 3,
"configTerm" : 19
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1638788417, 1),
"signature" : {
"hash" : BinData(0,"UeCHJX2kYRT5awReraDGQBMt13E="),
"keyId" : NumberLong("7028523322010763269")
}
},
"operationTime" : Timestamp(1638788417, 1)
}
mongo-cluster:PRIMARY>
Sometimes our Mongodb replicaset has multiple primary nodes!
in this case mongo-b node is our real primary and mongo-c node is not responding and just have primary role in fake.
Number of nodes in replicaset: 4 (3 normal + 1 delay node without voting).
network partition and access checked and there is no partion.
we have had this problem several times since updating the cluster from version 4.2.5 to version 4.4.5.
each time we fix the problem by cleaning datadir on fake node and restarting it.
replica conf:
{ [25/599]
"_id" : 0,
"host" : "mongo-a:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 3,
"host" : "mongo-h:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : true,
"priority" : 0,
"tags" : {
},
"slaveDelay" : NumberLong(900),
"votes" : 0
},
{
"_id" : 5,
"host" : "mongo-c:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
},
{
"_id" : 6,
"host" : "mongo-b:27017",
"arbiterOnly" : false,
"buildIndexes" : true,
"hidden" : false,
"priority" : 1,
"tags" : {
},
"slaveDelay" : NumberLong(0),
"votes" : 1
}
rs.status() result (partial):
{ [1881/1930]
"_id" : 5,
"name" : "mongo-c:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 3435,
"optime" : {
"ts" : Timestamp(1637525581, 2),
"t" : NumberLong(87)
},
"optimeDurable" : {
"ts" : Timestamp(1637525443, 1),
"t" : NumberLong(87)
},
"optimeDate" : ISODate("2021-11-21T20:13:01Z"),
"optimeDurableDate" : ISODate("2021-11-21T20:10:43Z"),
"lastHeartbeat" : ISODate("2021-11-24T07:08:32.108Z"),
"lastHeartbeatRecv" : ISODate("2021-11-24T07:08:34.867Z"),
"pingMs" : NumberLong(2355),
"lastHeartbeatMessage" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1637524307, 1),
"electionDate" : ISODate("2021-11-21T19:51:47Z"),
"configVersion" : 23,
"configTerm" : 87
},
{
"_id" : 6,
"name" : "mongo-b:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 7995003,
"optime" : {
"ts" : Timestamp(1637737714, 24),
"t" : NumberLong(88)
},
"optimeDate" : ISODate("2021-11-24T07:08:34Z"),
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1637525581, 1),
"electionDate" : ISODate("2021-11-21T20:13:01Z"),
"configVersion" : 23,
"configTerm" : 88,
"self" : true,
"lastHeartbeatMessage" : ""
}
I have set up a mongo replica-set with one primary and two secondaries. The problem that I am facing is that the reads from application servers which are connecting with replica-set connection URL are invariably going to only one secondary thereby causing a huge skew in read load between the two secondaries.
Due to this skew, I am constrained for resources on one server while the resources on the other are getting wasted.
rs.status()
{
"set" : "rs0",
"date" : ISODate("2020-09-08T19:39:20.394Z"),
"myState" : 1,
"term" : NumberLong(16),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"heartbeatIntervalMillis" : NumberLong(2000),
"majorityVoteCount" : 2,
"writeMajorityCount" : 2,
"optimes" : {
"lastCommittedOpTime" : {
"ts" : Timestamp(1599593958, 2042),
"t" : NumberLong(16)
},
"lastCommittedWallTime" : ISODate("2020-09-08T19:39:18.908Z"),
"readConcernMajorityOpTime" : {
"ts" : Timestamp(1599593958, 2042),
"t" : NumberLong(16)
},
"readConcernMajorityWallTime" : ISODate("2020-09-08T19:39:18.908Z"),
"appliedOpTime" : {
"ts" : Timestamp(1599593959, 1176),
"t" : NumberLong(16)
},
"durableOpTime" : {
"ts" : Timestamp(1599593958, 2042),
"t" : NumberLong(16)
},
"lastAppliedWallTime" : ISODate("2020-09-08T19:39:19.138Z"),
"lastDurableWallTime" : ISODate("2020-09-08T19:39:18.908Z")
},
"lastStableRecoveryTimestamp" : Timestamp(1599593936, 300),
"lastStableCheckpointTimestamp" : Timestamp(1599593936, 300),
"electionCandidateMetrics" : {
"lastElectionReason" : "priorityTakeover",
"lastElectionDate" : ISODate("2020-08-11T17:18:08.040Z"),
"electionTerm" : NumberLong(16),
"lastCommittedOpTimeAtElection" : {
"ts" : Timestamp(1597166288, 246),
"t" : NumberLong(15)
},
"lastSeenOpTimeAtElection" : {
"ts" : Timestamp(1597166288, 246),
"t" : NumberLong(15)
},
"numVotesNeeded" : 2,
"priorityAtElection" : 2,
"electionTimeoutMillis" : NumberLong(10000),
"priorPrimaryMemberId" : 5,
"targetCatchupOpTime" : {
"ts" : Timestamp(1597166288, 394),
"t" : NumberLong(15)
},
"numCatchUpOps" : NumberLong(148),
"newTermStartDate" : ISODate("2020-08-11T17:18:08.074Z"),
"wMajorityWriteAvailabilityDate" : ISODate("2020-08-11T17:18:10.782Z")
},
"members" : [
{
"_id" : 3,
"name" : "1.1.1.1:27017",
"health" : 1,
"state" : 1,
"stateStr" : "PRIMARY",
"uptime" : 2427845,
"optime" : {
"ts" : Timestamp(1599593959, 1176),
"t" : NumberLong(16)
},
"optimeDate" : ISODate("2020-09-08T19:39:19Z"),
"syncingTo" : "",
"syncSourceHost" : "",
"syncSourceId" : -1,
"infoMessage" : "",
"electionTime" : Timestamp(1597166288, 383),
"electionDate" : ISODate("2020-08-11T17:18:08Z"),
"configVersion" : 32,
"self" : true,
"lastHeartbeatMessage" : ""
},
{
"_id" : 5,
"name" : "3.3.3.3:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 3672,
"optime" : {
"ts" : Timestamp(1599593954, 3378),
"t" : NumberLong(16)
},
"optimeDurable" : {
"ts" : Timestamp(1599593954, 3378),
"t" : NumberLong(16)
},
"optimeDate" : ISODate("2020-09-08T19:39:14Z"),
"optimeDurableDate" : ISODate("2020-09-08T19:39:14Z"),
"lastHeartbeat" : ISODate("2020-09-08T19:39:19.238Z"),
"lastHeartbeatRecv" : ISODate("2020-09-08T19:39:20.261Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "1.1.1.1:27017",
"syncSourceHost" : "1.1.1.1:27017",
"syncSourceId" : 3,
"infoMessage" : "",
"configVersion" : 32
},
{
"_id" : 6,
"name" : "2.2.2.2:27017",
"health" : 1,
"state" : 2,
"stateStr" : "SECONDARY",
"uptime" : 3341,
"optime" : {
"ts" : Timestamp(1599593957, 2190),
"t" : NumberLong(16)
},
"optimeDurable" : {
"ts" : Timestamp(1599593957, 2190),
"t" : NumberLong(16)
},
"optimeDate" : ISODate("2020-09-08T19:39:17Z"),
"optimeDurableDate" : ISODate("2020-09-08T19:39:17Z"),
"lastHeartbeat" : ISODate("2020-09-08T19:39:18.751Z"),
"lastHeartbeatRecv" : ISODate("2020-09-08T19:39:20.078Z"),
"pingMs" : NumberLong(0),
"lastHeartbeatMessage" : "",
"syncingTo" : "1.1.1.1:27017",
"syncSourceHost" : "1.1.1.1:27017",
"syncSourceId" : 3,
"infoMessage" : "",
"configVersion" : 32
}
],
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1599593959, 1329),
"signature" : {
"hash" : BinData(0,"dfdfdggjhkljoj+mvY8="),
"keyId" : NumberLong("897987897897987")
}
},
"operationTime" : Timestamp(1599593959, 1176)
}
Please help me here. Is this something which is normally expected from a mongo replica-set cluster?
Many thanks in advance :)
The server selection algorithm for secondaries in a replica set is described here.
You can play with local threshold and max staleness parameters, e.g. if you increase both sufficiently you should be seeing roughly even load distribution between the secondaries assuming a compliant driver.