I have the following docker-compose.yaml file:
version: '3.8'
services:
mongo_launcher:
container_name: mongo_launcher
image: mongo:5.0.8
restart: on-failure
networks:
- mongo_network
volumes:
- ./docker/mongo-setup.sh:/scripts/mongo-setup.sh
entrypoint: ['sh', '/scripts/mongo-setup.sh']
mongo_replica_1:
container_name: mongo_replica_1
image: mongo:5.0.8
ports:
- 27017:27017
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27017',
]
volumes:
- ./.volumes/mongo/replica1:/data/db
- ./.volumes/mongo/replica1/configdb:/data/configdb
networks:
- mongo_network
mongo_replica_2:
container_name: mongo_replica_2
image: mongo:5.0.8
ports:
- 27018:27018
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27018',
]
volumes:
- ./.volumes/mongo/replica2:/data/db
- ./.volumes/mongo/replica2/configdb:/data/configdb
networks:
- mongo_network
mongo_replica_3:
container_name: mongo_replica_3
image: mongo:5.0.8
ports:
- 27019:27019
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27019',
]
volumes:
- ./.volumes/mongo/replica3:/data/db
- ./.volumes/mongo/replica3/configdb:/data/configdb
networks:
- mongo_network
networks:
mongo_network:
driver: bridge
Note that the first service will use the mongo-setup.sh script:
#!/bin/bash
MONGODB_REPLICA_1=mongo_replica_1
MONGODB_REPLICA_2=mongo_replica_2
MONGODB_REPLICA_3=mongo_replica_3
echo "************ [ Waiting for startup ] **************" ${MONGODB_REPLICA_1}
until curl http://${MONGODB_REPLICA_1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo "************ [ Startup completed ] **************" ${MONGODB_REPLICA_1}
mongosh --host ${MONGODB_REPLICA_1}:27017 <<EOF
var cfg = {
"_id": "dbrs",
"protocolVersion": 1,
"version": 1,
"members": [
{
"_id": 1,
"host": "${MONGODB_REPLICA_1}:27017",
"priority": 3
},
{
"_id": 2,
"host": "${MONGODB_REPLICA_2}:27018",
"priority": 2
},
{
"_id": 3,
"host": "${MONGODB_REPLICA_3}:27019",
"priority": 1
}
],settings: {chainingAllowed: true}
};
rs.initiate(cfg, { force: true });
rs.reconfig(cfg, { force: true });
rs.secondaryOk();
db.getMongo().setReadPref('nearest');
db.getMongo().setSecondaryOk();
EOF
When I run docker-compose up -d, it succeeds and when I run docker ps I get:
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
0c8f6d916e4a mongo:5.0.8 "/usr/bin/mongod --b…" 11 seconds ago Up 10 seconds 27017/tcp, 0.0.0.0:27018->27018/tcp mongo_replica_2
c59cb625362e mongo:5.0.8 "/usr/bin/mongod --b…" 11 seconds ago Up 10 seconds 0.0.0.0:27017->27017/tcp mongo_replica_1
cb61093e4dd0 mongo:5.0.8 "/usr/bin/mongod --b…" 11 seconds ago Up 10 seconds 27017/tcp, 0.0.0.0:27019->27019/tcp mongo_replica_3
But when I try to connect one of the replica sets with Mongo Compass I get the following error:
So it seems as the containers are all running fine, why do I get this issue? Please advice..
I could solve the issue by appending the following content to my /etc/hosts file:
127.0.0.1 mongo_replica_1
127.0.0.1 mongo_replica_2
127.0.0.1 mongo_replica_3
Could anyone come up with more elegant way?
I tried to bypass the /etc/hosts solution, so I edited docker-compose.yaml file and I applied extra_hosts:
version: '3.8'
services:
mongo_launcher:
container_name: mongo_launcher
image: mongo:5.0.8
restart: on-failure
networks:
- mongo_network
volumes:
- ./docker/mongo-setup.sh:/scripts/mongo-setup.sh
entrypoint: ['sh', '/scripts/mongo-setup.sh']
mongo_replica_1:
container_name: mongo_replica_1
image: mongo:5.0.8
ports:
- 27017:27017
extra_hosts:
- 'localhost:0.0.0.0'
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27017',
]
volumes:
- ./.volumes/mongo/replica1:/data/db
- ./.volumes/mongo/replica1/configdb:/data/configdb
networks:
- mongo_network
mongo_replica_2:
container_name: mongo_replica_2
image: mongo:5.0.8
ports:
- 27018:27018
extra_hosts:
- 'localhost:0.0.0.0'
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27018',
]
volumes:
- ./.volumes/mongo/replica2:/data/db
- ./.volumes/mongo/replica2/configdb:/data/configdb
networks:
- mongo_network
mongo_replica_3:
container_name: mongo_replica_3
image: mongo:5.0.8
ports:
- 27019:27019
extra_hosts:
- 'localhost:0.0.0.0'
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27019',
]
volumes:
- ./.volumes/mongo/replica3:/data/db
- ./.volumes/mongo/replica3/configdb:/data/configdb
networks:
- mongo_network
networks:
mongo_network:
driver: bridge
Then I edited the mongo-setup.sh to:
#!/bin/bash
echo "************ [ Waiting for startup ] **************"
until curl http://${MONGODB_REPLICA_1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo "************ [ Startup completed ] **************"
mongosh --host localhost:27017 <<EOF
var cfg = {
"_id": "dbrs",
"protocolVersion": 1,
"version": 1,
"members": [
{
"_id": 1,
"host": "localhost:27017",
"priority": 3
},
{
"_id": 2,
"host": "localhost:27018",
"priority": 2
},
{
"_id": 3,
"host": "localhost:27019",
"priority": 1
}
],settings: {chainingAllowed: true}
};
rs.initiate(cfg, { force: true });
rs.reconfig(cfg, { force: true });
rs.secondaryOk();
db.getMongo().setReadPref('nearest');
db.getMongo().setSecondaryOk();
EOF
And I removed the edits I made in /etc/hosts file. I don't get an error like before, I do get timeout:
Related
I want to setup a replica set with mongodb, and I want to detect all the servers are ready to use.
I have configured the following docker-compose file:
version: '3.8'
services:
mongo_launcher:
container_name: mongo_launcher
image: mongo:6.0.2
restart: on-failure
networks:
- dashboard_network
volumes:
- ./docker/scripts/mongo-setup.sh:/scripts/mongo-setup.sh
entrypoint: ['sh', '/scripts/mongo-setup.sh']
mongo_replica_1:
container_name: mongo_replica_1
image: mongo:6.0.2
ports:
- 27017:27017
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27017',
]
volumes:
- ./.volumes/mongo/replica1:/data/db
- ./.volumes/mongo/replica1/configdb:/data/configdb
networks:
- dashboard_network
mongo_replica_2:
container_name: mongo_replica_2
image: mongo:6.0.2
ports:
- 27018:27018
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27018',
]
volumes:
- ./.volumes/mongo/replica2:/data/db
- ./.volumes/mongo/replica2/configdb:/data/configdb
networks:
- dashboard_network
mongo_replica_3:
container_name: mongo_replica_3
image: mongo:6.0.2
ports:
- 27019:27019
restart: always
entrypoint:
[
'/usr/bin/mongod',
'--bind_ip_all',
'--replSet',
'dbrs',
'--dbpath',
'/data/db',
'--port',
'27019',
]
volumes:
- ./.volumes/mongo/replica3:/data/db
- ./.volumes/mongo/replica3/configdb:/data/configdb
networks:
- dashboard_network
My mongo-setup.sh file is:
#!/bin/bash
MONGODB_REPLICA_1=mongo_replica_1
MONGODB_REPLICA_2=mongo_replica_2
MONGODB_REPLICA_3=mongo_replica_3
echo "************ [ Waiting for startup ] **************" ${MONGODB_REPLICA_1}
until ncc -zvv ${MONGODB_REPLICA_1} 27017 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo "************ [ Startup completed ] **************" ${MONGODB_REPLICA_1}
mongosh --host ${MONGODB_REPLICA_1}:27017 <<EOF
var cfg = {
"_id": "dbrs",
"protocolVersion": 1,
"version": 1,
"members": [
{
"_id": 1,
"host": "${MONGODB_REPLICA_1}:27017",
"priority": 3
},
{
"_id": 2,
"host": "${MONGODB_REPLICA_2}:27018",
"priority": 2
},
{
"_id": 3,
"host": "${MONGODB_REPLICA_3}:27019",
"priority": 1
}
],settings: {chainingAllowed: true}
};
rs.initiate(cfg, { force: true });
rs.reconfig(cfg, { force: true });
rs.secondaryOk();
db.getMongo().setReadPref('nearest');
db.getMongo().setSecondaryOk();
EOF
If I check the logs of mongo_launcher, using docker logs mongo_launcher I get:
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f96a8273830a6762893c
Connecting to: mongodb://mongo_replica_1:27017/?directConnection=true&appName=mongosh+1.6.0
MongoNetworkError: connect ECONNREFUSED 172.21.0.3:27017
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f96c7b96e8524fb103e7
Connecting to: mongodb://mongo_replica_1:27017/?directConnection=true&appName=mongosh+1.6.0
MongoNetworkError: connect ECONNREFUSED 172.21.0.3:27017
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f96f9fd15a9ae8bc32d6
Connecting to: mongodb://mongo_replica_1:27017/?directConnection=true&appName=mongosh+1.6.0
MongoNetworkError: connect ECONNREFUSED 172.21.0.3:27017
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f972d8f575d837789d62
Connecting to: mongodb://mongo_replica_1:27017/?directConnection=true&appName=mongosh+1.6.0
MongoNetworkError: connect ECONNREFUSED 172.21.0.3:27017
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f9746bfa236397cf67a5
Connecting to: mongodb://mongo_replica_1:27017/?directConnection=true&appName=mongosh+1.6.0
MongoNetworkError: connect ECONNREFUSED 172.21.0.3:27017
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f97842ac739549b27e4a
Connecting to: mongodb://mongo_replica_1:27017/?directConnection=true&appName=mongosh+1.6.0
MongoNetworkError: connect ECONNREFUSED 172.21.0.3:27017
************ [ Waiting for startup ] ************** mongo_replica_1
************ [ Startup completed ] ************** mongo_replica_1
Current Mongosh Log ID: 6367f97cb586d80909d5b2f8
Does anyone could tell why the scripts passes the:
until ncc -zvv ${MONGODB_REPLICA_1} 27017 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
script, but fails to connect to mongo server? I don't want the script to skip this if condition, if the server is not ready..
I have an ASP.NET Core application and I have kept the connection string in the appsettings.json file.
Should I also add the connection string in the Docker Profile as an environment variable?
This is my appsettings.json:
{
"PersistenceAccess": {
"ConnectionString": "Server=localhost;Port=5432;Database=DemoDatabase;User Id=postgres;Password=postgres;"
}
}
And this is the Docker Profile in the LaunchSettings.json:
{
"iisSettings": {
"windowsAuthentication": false,
"anonymousAuthentication": true,
"iisExpress": {
"applicationUrl": "http://localhost:35847",
"sslPort": 0
}
},
"profiles": {
"IIS Express": {
..
}
},
"Docker": {
"commandName": "Docker",
"launchBrowser": true,
"launchUrl": "{Scheme}://{ServiceHost}:{ServicePort}",
"environmentVariables": {
"PersistenceAccess__ConnectionString": "Server=host.docker.internal;Port=5432;Database=DemoDatabase;User Id=postgres;Password=postgres;"
},
"DockerfileRunArguments": "--add-host host.docker.internal:host-gateway",
"publishAllPorts": true,
"useSSL": false
}
}
}
I also have a Docker-Compose.yml file:
version: '3.4'
services:
Demo.api:
image: ${DOCKER_REGISTRY-}demoapi
build:
context: .
dockerfile: Sources/Code/Demo.Api/Dockerfile
environment:
- ASPNETCORE_ENVIRONMENT=Development
- PersistenceAccess__ConnectionString= Server=db;Port=5432;Database=DemoDatabase;User Id=postgres;Password=postgres;
ports:
- '8081:80'
depends_on:
- db
db:
image: postgres
restart: always
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
logging:
options:
max-size: 10m
max-file: "3"
ports:
- '5438:5432'
volumes:
- ./postgres-data:/var/lib/postgresql/data
# copy the sql script to create tables
- ./sql/create_tables.sql:/docker-entrypoint-initdb.d/create_tables.sql
# copy the sql script to fill tables
- ./sql/fill_tables.sql:/docker-entrypoint-initdb.d/fill_tables.sql
Even with containers connected to the same network, when executing the migration on sequelize, the error 'getaddrinfo ENOTFOUND' is returned.
If I remove the host address in the 'database.js' settings, the migration runs but the CRUD routines stop running returning the error "connect ECONNREFUSED 127.0.0.1:5432"
If I point to the 'postgresdb' container in the 'database.js' settings, the CRUD routines execute but the sequelize migration does not.
Help me please
Fragment of docker network inspect in bridge
"Containers": {
"35467ab419c3632c4c0cfe57e972bd94c7de0a5818e37fdae6eb82a25381ceab": {
"Name": "api",
"EndpointID": "d54d95b94bec543fa831526d1abb99346045efc4cc5f4425d5d59b200ece3e62",
"MacAddress": "02:42:ac:1a:00:05",
"IPv4Address": "172.26.0.5/16",
"IPv6Address": ""
},
"7e028bcd80948fd61d14ff87437b963af31291220b7adbc8861fb98a2171f04a": {
"Name": "postgresdb",
"EndpointID": "e1d55e0fe8658a142880da27eea12a47d73f6cce472eeff38322ed4d6a60e8ad",
"MacAddress": "02:42:ac:1a:00:03",
"IPv4Address": "172.26.0.3/16",
"IPv6Address": ""
},
My docker-compose.yml
version: '3'
networks:
api:
driver: bridge
services:
api:
container_name: api
depends_on:
- postgresdb
- mongodb
- redisdb
links:
- postgresdb
environment:
POSTGRES_HOST: postgresdb
build: .
volumes:
- .:/home/node/api
command: yarn dev
networks:
- api
ports:
- 3333:3333
postgresdb:
image: postgres:alpine
container_name: postgresdb
environment:
- POSTGRES_USER=docker
- POSTGRES_PASSWORD=docker
- POSTGRES_DB=postdb
networks:
- api
ports:
- "5432:5432"
mongodb:
image: mongo
container_name: mongodb
networks:
- api
ports:
- "27017:27017"
redisdb:
image: redis
container_name: redisdb
networks:
- api
ports:
- "6379:6379"
My database.js file
require('dotenv/config');
module.exports = {
dialect: process.env.DB_DIALECT,
host: process.env.POSTGRES_HOST,
username: process.env.POSTGRES_USER,
password: process.env.POSTGRES_PASSWORD,
database: process.env.POSTGRES_DB,
define: {
timestamps: true,
underscored: true,
underscoredAll: true,
},
};
My directory structure:
root
- mongo_seed
- Dockerfile
- init.json
- docker-compose.yml
- Dockerfile
Docker compose file
version: "3"
services:
web:
container_name: "hgbackend"
build: .
image: tahashin/hgbackend:v2
ports:
- "3000:3000"
links:
- mongodb
depends_on:
- mongodb
mongodb:
image: mongo:latest
container_name: "mongodb"
ports:
- "27017:27017"
mongo_seeding:
build: ./mongo_seed .
volumes:
- ./config/db-seed:/data
links:
- mongodb
depends_on:
- mongodb
docker file under mongo_seed directory
FROM mongo:latest
COPY init.json /init.json
CMD mongoimport --host mongodb --db alifhala --collection honcollection --type json --file /init.json --jsonArray
mongodb test data file init.json
[
{
"name": "Joe Smith",
"email": "jsmith#gmail.com",
"age": 40,
"admin": false
},
{
"name": "Jen Ford",
"email": "jford#gmail.com",
"age": 45,
"admin": true
}
]
After running docker-compose up in windows powershell database and collection is not creating and data is not dumping. After running mongo query in docker it is showing only 3 databases: local, admin, config
Check this Answer
https://stackoverflow.com/a/48179360/1124364
mongo_seeding:
build: ./mongo_seed .
Change it to
build:mongo_seed/.
I have set a MongoDB replica set which is running properly.
But I want to run the config settings inside the .yml, and not initiating inside a replica set node.
by config settings I mean:
1.
config = {
"_id": "comments",
"members": [
{
"_id": 0,
"host": "node1:27017"
},
{
"_id": 1,
"host": "node2:27017"
},
{
"_id": 2,
"host": "node3:27017"
}
]
}
and the below:
2.
rs.initiate(config)
So i figured out a way, hope it helps.Below i will post my .yml file, rsinit file and rs.sh file, all the file should be placed on the same location in order to work, rest all config are written anyways.
.yml file:
version: '3.7'
services:
mongo1:
hostname: mongo1
container_name: localmongo1
image: mongo
expose:
- 27017
ports:
- 27017:27017
restart: always
entrypoint: ["/usr/bin/mongod", "--bind_ip_all", "--replSet", "rs0"]
# volumes:
# - /data/db/mongotest:/data/db # This is where your volume will persist. e.g. VOLUME-DIR = ./volumes/mongodb
mongo2:
hostname: mongo2
container_name: localmongo2
image: mongo
expose:
- 27017
ports:
- 27018:27017
restart: always
entrypoint: ["/usr/bin/mongod", "--bind_ip_all", "--replSet", "rs0"]
mongo3:
hostname: mongo3
container_name: localmongo3
image: mongo
expose:
- 27017
ports:
- 27019:27017
restart: always
entrypoint: ["/usr/bin/mongod", "--bind_ip_all", "--replSet", "rs0"]
rsinit:
build:
context: .
dockerfile: rsinit
depends_on:
- mongo1
- mongo2
- mongo3
entrypoint: ["sh", "-c", "rs.sh"]
rsinit(normal text file):
FROM mongo
ADD rs.sh /usr/local/bin/
RUN chmod +x /usr/local/bin/rs.sh
rs.sh file:
!/bin/bash
echo "prepare rs initiating"
check_db_status() {
mongo1=$(mongo --host mongo1 --port 27017 --eval "db.stats().ok" | tail -n1 | grep -E '(^|\s)1($|\s)')
mongo2=$(mongo --host mongo2 --port 27017 --eval "db.stats().ok" | tail -n1 | grep -E '(^|\s)1($|\s)')
mongo3=$(mongo --host mongo3 --port 27017 --eval "db.stats().ok" | tail -n1 | grep -E '(^|\s)1($|\s)')
if [[ $mongo1 == 1 ]] && [[ $mongo2 == 1 ]] && [[ $mongo3 == 1 ]]; then
init_rs
else
check_db_status
fi
}
init_rs() {
ret=$(mongo --host mongo1 --port 27017 --eval "rs.initiate({ _id: 'rs0', members: [{ _id: 0, host: 'mongo1:27017' }, { _id: 1, host: 'mongo2:27017' }, { _id: 2, host: 'mongo3:27017' } ] })" > /dev/null 2>&1)
}
check_db_status > /dev/null 2>&1
echo "rs initiating finished"
exit 0