I am currently trying to install a mongo cluster on docker.
We already have such cluster with mongo 4.2 but for the new installation we wanted to use latest version of docker image.
I used the same docker-compose file but the data and config servers don’t want to start.
When looking at the docker logs, the error is:
BadValue: Cannot start a shardsvr as a standalone server. Please use the option --replSet to start the node as a replica set.
BadValue: Cannot start a configsvr as a standalone server. Please use the option --replSet to start the node as a replica set.
But I have the replSet in my commands.
After some try and errors, the error occurs when I add the init db environment variables to initialize the admin user.
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
I did the test also with mongo image version 5 and I have same behavior.
I works fine with mongo image 4.4.18
Here is my docker compose file
version: '3.5'
services:
# Router
mongo-router-01:
command: mongos --port 27017 --configdb ${MONGO_RS_CONFIG_NAME}/mongo-config-01:27017,mongo-config-02:27017,mongo-config-03:27017 --bind_ip_all --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_ROUTER_SERVER}-01-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/${ENVIRONMENT_NAME}/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_ROUTER_SERVER}-01/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_ROUTER_SERVER}-01/configdb:/data/configdb
mongo-router-02:
command: mongos --port 27017 --configdb ${MONGO_RS_CONFIG_NAME}/mongo-config-01:27017,mongo-config-02:27017,mongo-config-03:27017 --bind_ip_all --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_ROUTER_SERVER}-02-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/${ENVIRONMENT_NAME}/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_ROUTER_SERVER}-02/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_ROUTER_SERVER}-02/configdb:/data/configdb
# Config Servers
mongo-config-01:
command: mongod --port 27017 --configsvr --replSet ${MONGO_RS_CONFIG_NAME} --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_CONFIG_SERVER}-01-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/preprod/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_CONFIG_SERVER}-01/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_CONFIG_SERVER}-01/configdb:/data/configdb
mongo-config-02:
command: mongod --port 27017 --configsvr --replSet ${MONGO_RS_CONFIG_NAME} --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_CONFIG_SERVER}-02-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/preprod/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_CONFIG_SERVER}-02/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_CONFIG_SERVER}-02/configdb:/data/configdb
mongo-config-03:
command: mongod --port 27017 --configsvr --replSet ${MONGO_RS_CONFIG_NAME} --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_CONFIG_SERVER}-03-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/${ENVIRONMENT_NAME}/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_CONFIG_SERVER}-03/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_CONFIG_SERVER}-03/configdb:/data/configdb
# Data Servers
mongo-arbiter-01:
command: mongod --port 27017 --shardsvr --replSet ${MONGO_RS_DATA_NAME} --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_ARBITER_SERVER}-01-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/${ENVIRONMENT_NAME}/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_ARBITER_SERVER}-01/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_ARBITER_SERVER}-01/configdb:/data/configdb
mongo-data-01:
command: mongod --port 27017 --shardsvr --replSet ${MONGO_RS_DATA_NAME} --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_DATA_SERVER}-01-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/${ENVIRONMENT_NAME}/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_DATA_SERVER}-01/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_DATA_SERVER}-01/configdb:/data/configdb
mongo-data-02:
command: mongod --port 27017 --shardsvr --replSet ${MONGO_RS_DATA_NAME} --keyFile /etc/mongo-cluster.key
container_name: ${MONGO_DATA_SERVER}-02-${ENVIRONMENT_NAME}
environment:
MONGO_INITDB_ROOT_USERNAME: ${MONGO_ADMIN_USER}
MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ADMIN_PASSWORD}
image: mongo:${MONGO_VERSION}
networks:
- mongo-network
restart: always
volumes:
- ./keys/${ENVIRONMENT_NAME}/mongo-cluster.key:/etc/mongo-cluster.key
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_DATA_SERVER}-02/db:/data/db
- ./volumes/${ENVIRONMENT_NAME}/${MONGO_DATA_SERVER}-02/configdb:/data/configdb
networks:
mongo-network:
external:
name: _preprod
EDIT 2023-02-08
I finally may have found something: https://github.com/docker-library/mongo/issues/509
Seems it is normal that it fails on shard server.
For config server, there is a PR: https://github.com/docker-library/mongo/pull/600
But it has not been merged yet.
So I guess until the PR is merged and new version of the image is published, there is no way to use the environment variables at all.
So the root user insertion should be done via script after the replica sets and routers are initialized
I am not familiar with Docker, so I can only guess. When I deployed my sharded cluster I had to add readPreference=primaryPreferred to connectionString.
Otherwise when the ReplicaSet is initated, then the current host may become a SECONDARY and the shell does not switch-over automatically to new PRIMARY.
Another common issue, is when the ReplicaSet is initated then you must wait till it is finished before you run other actions. When I initate a ReplicaSet, then usually I do it like this:
rs.initiate(...)
while (! db.hello().isWritablePrimary ) { sleep(1000) }
And last but not least in version 6.0 you must set setDefaultRWConcern before you can run many other operations, see Compatibility Changes in MongoDB 6.0#Replica Sets
I found this repository:
https://github.com/minhhungit/mongodb-cluster-docker-compose
There are some docker-compose files (with or without cluster key)
I tested it and it seems to work.
EDIT 13/02/2023
Took a bit more time than expected as I do not have the same setup as the example found in the repository mentioned above, but now all is working fine with replica set and users initialized correctly
Related
I want to connect to MongoDB cluster using
mongodb://localhost:27017
It shows me an error
getaddrinfo ENOTFOUND messenger-mongodb-1
This is my docker-compose.yml file
version: '3'
services:
messenger-mongodb-1:
container_name: messenger-mongodb-1
image: mongo:6.0.3
command: mongod --replSet messenger-mongodb-replica-set --bind_ip_all
ports:
- 27017:27017
networks:
- messenger-mongodb-cluster
volumes:
- messenger-mongodb-1-data:/data/db
depends_on:
- messenger-mongodb-2
- messenger-mongodb-3
healthcheck:
test: test $$(echo "rs.initiate({_id:\"messenger-mongodb-replica-set\",members:[{_id:0,host:\"messenger-mongodb-1\"},{_id:1,host:\"messenger-mongodb-2\"},{_id:2,host:\"messenger-mongodb-3\"}]}).ok || rs.status().ok" | mongo --quiet) -eq 1
interval: 10s
start_period: 30s
messenger-mongodb-2:
container_name: messenger-mongodb-2
image: mongo:6.0.3
command: mongod --replSet messenger-mongodb-replica-set --bind_ip_all
ports:
- 27018:27017
networks:
- messenger-mongodb-cluster
environment:
- MONGO_INITDB_DATABASE=messenger-db
volumes:
- messenger-mongodb-2-data:/data/db
messenger-mongodb-3:
container_name: messenger-mongodb-3
image: mongo:6.0.3
command: mongod --replSet messenger-mongodb-replica-set --bind_ip_all
ports:
- 27019:27017
networks:
- messenger-mongodb-cluster
environment:
- MONGO_INITDB_DATABASE=messenger-db
volumes:
- messenger-mongodb-3-data:/data/db
networks:
messenger-mongodb-cluster:
volumes:
messenger-mongodb-1-data:
messenger-mongodb-2-data:
messenger-mongodb-3-data:
I run it like
docker-compose up -d
How can I connect to my replica set?
I want to use it for the local development of my node.js application
My operating system is Windows 11 Pro
I have tried to run mongodb replicaSet in local with mongoldb-community in my Mac I follow mongodb doc I can run it by this command
mongod --port 27017 --dbpath /usr/local/var/mongodb --replSet rs0 --bind_ip localhost,127.0.0.1
but it doesn't run on background, so every time I want to start replica set mongodb I should run that command, before I run it I should stop mongo first, then on the next tab console I should run mongo --eval "rs.initiate()" to create to replicaSet again
here is my docker compose:
version: "3.7"
services:
mongodb_container:
image: mongo:latest
ports:
- 27017:27017
volumes:
- mongodb_data_container:/data/db
volumes:
mongodb_data_container:
how to convert that into docker-compose ? is it possible ?
can I do docker exec CONTAINER_ID [commands] ? to run command mongo like above , but must stop the mongodb run in that docker ?
You can have a mongodb replica-set with this docker-compose services:
mongodb-primary:
image: "bitnami/mongodb:4.2"
user: root
volumes:
- ./mongodb-persistence/bitnami:/bitnami
networks:
- parse_network
environment:
- MONGODB_REPLICA_SET_MODE=primary
- MONGODB_REPLICA_SET_KEY=123456789
- MONGODB_ROOT_USERNAME=admin-123
- MONGODB_ROOT_PASSWORD=password-123
- MONGODB_USERNAME=admin-123
- MONGODB_PASSWORD=password-123
- MONGODB_DATABASE=my_database
ports:
- 27017:27017
mongodb-secondary:
image: "bitnami/mongodb:4.2"
depends_on:
- mongodb-primary
environment:
- MONGODB_REPLICA_SET_MODE=secondary
- MONGODB_REPLICA_SET_KEY=123456789
- MONGODB_PRIMARY_HOST=mongodb-primary
- MONGODB_PRIMARY_PORT_NUMBER=27017
- MONGODB_PRIMARY_ROOT_USERNAME=admin-123
- MONGODB_PRIMARY_ROOT_PASSWORD=password-123
networks:
- parse_network
ports:
- 27027:27017
mongodb-arbiter:
image: "bitnami/mongodb:4.2"
depends_on:
- mongodb-primary
environment:
- MONGODB_ADVERTISED_HOSTNAME=mongodb-arbiter
- MONGODB_REPLICA_SET_MODE=arbiter
- MONGODB_PRIMARY_HOST=mongodb-primary
- MONGODB_PRIMARY_PORT_NUMBER=27017
- MONGODB_PRIMARY_ROOT_PASSWORD=password-123
- MONGODB_REPLICA_SET_KEY=123456789
networks:
- parse_network
ports:
- 27037:27017
networks:
parse_network:
driver: bridge
ipam:
driver: default
volumes:
mongodb_master_data:
driver: local
I have the following docker-compose.yml file:
version: "3"
services:
pokerstats:
image: pokerstats
container_name: pokerstats
ports:
- 8080:8080
depends_on:
- db
db:
image: mongo
container_name: mongo
volumes:
- ./database:/data
ports:
- "27018:27017"
environment:
MONGO_INITDB_ROOT_USERNAME: admin
MONGO_INITDB_ROOT_PASSWORD: admin
MONGO_INITDB_DATABASE: pokerstats
My issue is that when I run docker-compose down all the data within my mongo database is lost.
How can I create a mongo volume that persists even when the mongo container goes down?
Per the image documentation the database volume needs to be /data/db. This is also seen in the Dockerfile volume.
Since the volume is defined in the Dockerfile, if you do not create a volume at that directory, even if you created a volume in the parent like /data, docker will create an anonymous volume at /data/db which will show up as a long guid volume name in docker volume ls. Depending on how the container is run, those may be left behind.
Therefore the fix is to adjust your volume mount to that path:
version: "3"
services:
pokerstats:
image: pokerstats
container_name: pokerstats
ports:
- 8080:8080
depends_on:
- db
db:
image: mongo
container_name: mongo
volumes:
- ./database:/data/db
ports:
- "27018:27017"
environment:
MONGO_INITDB_ROOT_USERNAME: admin
MONGO_INITDB_ROOT_PASSWORD: admin
MONGO_INITDB_DATABASE: pokerstats
Note that unless you need direct access to this data on the host, I'd recommend using a named volume instead. It includes initialization steps that helps with permission issues you may encounter with host volumes, particularly when running directly on a Linux host.
To use a named volume, that would look like:
version: "3"
services:
pokerstats:
image: pokerstats
container_name: pokerstats
ports:
- 8080:8080
depends_on:
- db
db:
image: mongo
container_name: mongo
volumes:
- dbdata:/data/db
ports:
- "27018:27017"
environment:
MONGO_INITDB_ROOT_USERNAME: admin
MONGO_INITDB_ROOT_PASSWORD: admin
MONGO_INITDB_DATABASE: pokerstats
volumes:
dbdata:
Try to change volumes for MongoDb container:
volumes:
- "./database:/data/db"
I'm trying to deploy a minimal mongodb cluster into my development swarm environment like this:
Up to now:
version: "3.3"
networks:
net:
driver: overlay
services:
data1:
image: mongo:3.6
container_name: data1
command: mongod --shardsvr --replSet datars --smallfiles --port 27017
expose:
- 27017
networks:
- net
cfg1:
image: mongo:3.6
container_name: cfg1
command: mongod --configsvr --replSet cfgrs --smallfiles --port 27017
expose:
- 27017
networks:
- net
mongos1:
image: mongo:3.4
container_name: mongos1
command: mongos --configdb cfgrs/cfg1:27017
expose:
- 27017
networks:
- net
Into my config server, I'm getting these message:
2019-09-06T09:22:15.693+0000 I SHARDING [shard registry reload] Periodic reload of shard registry failed :: caused by :: 134 could not get updated shard list from config server due to Read concern majority reads are currently not possible.; will retry after 30s,
Any ideas?
I'm running this docker-compose.yml on my mac over a fresh Docker for Mac environment.
Gist here!
version: '2'
services:
replica1:
image: mongo:3.0
container_name: mongo1
ports:
- "27017:27017"
volumes:
- ./mongodata/replica1:/data/db
command: mongod --smallfiles --replSet "mrmtx"
networks:
- mongo_cluster
replica2:
image: mongo:3.0
container_name: mongo2
ports:
- "27017:27017"
volumes:
- ./mongodata/replica2:/data/db
command: mongod --smallfiles --replSet "mrmtx"
networks:
- mongo_cluster
replica3:
image: mongo:3.0
container_name: mongo3
ports:
- "27017:27017"
volumes:
- ./mongodata/replica3:/data/db
command: mongod --smallfiles --replSet "mrmtx"
networks:
- mongo_cluster
networks:
mongo_cluster:
driver: overlay
I'm getting this error:
Cannot start service replica2: b'Could not attach to network mongo-rplset_mongo_cluster: rpc error: code = PermissionDenied desc = network mongo-rplset_mongo_cluster not manually attachable'