mongodb always crashes with docker-compose file - mongodb

This is my docker-compose file
version: "3.1"
services:
mongo:
image: mongo:focal
user: 1000:1000
networks:
mynet:
ipv4_address: "172.19.0.15"
ports:
- 27017:27017
environment:
- MONGO_INITDB_ROOT_USERNAME="root"
- MONGO_INITDB_ROOT_PASSWORD="mongo#123"
command: mongod --verbose --dbpath=/data/db
volumes:
- /data/mongo:/data/db
mongo-admin:
image: mongo-express:0.54.0
networks:
mynet:
ipv4_address: "172.19.0.16"
ports:
- 8081:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME="root"
- ME_CONFIG_MONGODB_ADMINPASSWORD="mongo#123"
- ME_CONFIG_MONGODB_URL="mongodb://root:mongo#123#mongo:27017/"
networks:
mynet:
driver: bridge
ipam:
config:
- subnet: "172.19.0.0/24"
gateway: "172.19.0.1"
whenever I run it it errors out:
Attaching to environment_mongo-admin_1, environment_mongo_1
mongo-admin_1 | Waiting for mongo:27017...
mongo-admin_1 | /docker-entrypoint.sh: connect: Connection refused
mongo-admin_1 | /docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Connection refused
mongo_1 | about to fork child process, waiting until server is ready for connections.
mongo_1 | forked process: 20
mongo_1 |
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.185+00:00"},"s":"I", "c":"CONTROL", "id":20698, "ctx":"-","msg":"***** SERVER RESTARTED *****"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.187+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"-","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.200+00:00"},"s":"I", "c":"NETWORK", "id":4915701, "ctx":"main","msg":"Initialized wire specification","attr":{"spec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":13},"outgoing":{"minWireVersion":0,"maxWireVersion":13},"isInternalClient":true}}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.201+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.202+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.204+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.204+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationDonorService","ns":"config.tenantMigrationDonors"}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.205+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationRecipientService","ns":"config.tenantMigrationRecipients"}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.205+00:00"},"s":"D1", "c":"NETWORK", "id":22940, "ctx":"main","msg":"file descriptor and connection resource limits","attr":{"hard":1048576,"soft":1048576,"conn":838860}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.206+00:00"},"s":"I", "c":"CONTROL", "id":5945603, "ctx":"main","msg":"Multi threading initialized"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.207+00:00"},"s":"I", "c":"CONTROL", "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":20,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"e2237d5770b6"}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.207+00:00"},"s":"I", "c":"CONTROL", "id":23403, "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"5.0.8","gitVersion":"c87e1c23421bf79614baf500fda6622bd90f674e","openSSLVersion":"OpenSSL 1.1.1f 31 Mar 2020","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu2004","distarch":"x86_64","target_arch":"x86_64"}}}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.207+00:00"},"s":"I", "c":"CONTROL", "id":51765, "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"20.04"}}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.208+00:00"},"s":"I", "c":"CONTROL", "id":21951, "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{"net":{"bindIp":"127.0.0.1","port":27017,"tls":{"mode":"disabled"}},"processManagement":{"fork":true,"pidFilePath":"/tmp/docker-entrypoint-temp-mongod.pid"},"storage":{"dbPath":"/data/db"},"systemLog":{"destination":"file","logAppend":true,"path":"/proc/1/fd/1","verbosity":1}}}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.212+00:00"},"s":"D1", "c":"NETWORK", "id":22940, "ctx":"initandlisten","msg":"file descriptor and connection resource limits","attr":{"hard":1048576,"soft":1048576,"conn":838860}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.214+00:00"},"s":"D1", "c":"EXECUTOR", "id":23104, "ctx":"OCSPManagerHTTP-0","msg":"Starting thread","attr":{"threadName":"OCSPManagerHTTP-0","poolName":"OCSPManagerHTTP"}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.214+00:00"},"s":"D1", "c":"-", "id":23074, "ctx":"initandlisten","msg":"User assertion","attr":{"error":"IllegalOperation: Attempted to create a lock file on a read-only directory: /data/db","file":"src/mongo/db/storage/storage_engine_init.cpp","line":214}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.215+00:00"},"s":"E", "c":"CONTROL", "id":20557, "ctx":"initandlisten","msg":"DBException in initAndListen, terminating","attr":{"error":"IllegalOperation: Attempted to create a lock file on a read-only directory: /data/db"}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.215+00:00"},"s":"I", "c":"REPL", "id":4784900, "ctx":"initandlisten","msg":"Stepping down the ReplicationCoordinator for shutdown","attr":{"waitTimeMillis":15000}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.216+00:00"},"s":"D1", "c":"-", "id":23074, "ctx":"initandlisten","msg":"User assertion","attr":{"error":"NotWritablePrimary: not primary so can't step down","file":"src/mongo/db/repl/replication_coordinator_impl.cpp","line":2589}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.216+00:00"},"s":"I", "c":"COMMAND", "id":4784901, "ctx":"initandlisten","msg":"Shutting down the MirrorMaestro"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.216+00:00"},"s":"I", "c":"SHARDING", "id":4784902, "ctx":"initandlisten","msg":"Shutting down the WaitForMajorityService"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.217+00:00"},"s":"I", "c":"NETWORK", "id":20562, "ctx":"initandlisten","msg":"Shutdown: going to close listening sockets"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.217+00:00"},"s":"I", "c":"NETWORK", "id":4784905, "ctx":"initandlisten","msg":"Shutting down the global connection pool"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.219+00:00"},"s":"I", "c":"CONTROL", "id":4784906, "ctx":"initandlisten","msg":"Shutting down the FlowControlTicketholder"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.219+00:00"},"s":"I", "c":"-", "id":20520, "ctx":"initandlisten","msg":"Stopping further Flow Control ticket acquisitions."}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.220+00:00"},"s":"I", "c":"NETWORK", "id":4784918, "ctx":"initandlisten","msg":"Shutting down the ReplicaSetMonitor"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.222+00:00"},"s":"I", "c":"SHARDING", "id":4784921, "ctx":"initandlisten","msg":"Shutting down the MigrationUtilExecutor"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.224+00:00"},"s":"I", "c":"ASIO", "id":22582, "ctx":"MigrationUtil-TaskExecutor","msg":"Killing all outstanding egress activity."}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.224+00:00"},"s":"I", "c":"COMMAND", "id":4784923, "ctx":"initandlisten","msg":"Shutting down the ServiceEntryPoint"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.224+00:00"},"s":"I", "c":"CONTROL", "id":4784925, "ctx":"initandlisten","msg":"Shutting down free monitoring"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.225+00:00"},"s":"I", "c":"CONTROL", "id":4784927, "ctx":"initandlisten","msg":"Shutting down the HealthLog"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.225+00:00"},"s":"I", "c":"CONTROL", "id":4784928, "ctx":"initandlisten","msg":"Shutting down the TTL monitor"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.225+00:00"},"s":"I", "c":"CONTROL", "id":4784929, "ctx":"initandlisten","msg":"Acquiring the global lock for shutdown"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.226+00:00"},"s":"I", "c":"-", "id":4784931, "ctx":"initandlisten","msg":"Dropping the scope cache for shutdown"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.226+00:00"},"s":"I", "c":"FTDC", "id":4784926, "ctx":"initandlisten","msg":"Shutting down full-time data capture"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.226+00:00"},"s":"I", "c":"CONTROL", "id":20565, "ctx":"initandlisten","msg":"Now exiting"}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.227+00:00"},"s":"D1", "c":"EXECUTOR", "id":23105, "ctx":"OCSPManagerHTTP-0","msg":"Shutting down thread","attr":{"threadName":"OCSPManagerHTTP-0","poolName":"OCSPManagerHTTP"}}
mongo_1 | {"t":{"$date":"2022-06-01T09:51:29.227+00:00"},"s":"I", "c":"CONTROL", "id":23138, "ctx":"initandlisten","msg":"Shutting down","attr":{"exitCode":100}}
mongo_1 | ERROR: child process failed, exited with 100
mongo_1 | To see additional information in this output, start without the "--fork" option.
environment_mongo_1 exited with code 100
mongo-admin_1 | Wed Jun 1 09:51:29 UTC 2022 retrying to connect to mongo:27017 (2/5)
mongo-admin_1 | /docker-entrypoint.sh: line 14: mongo: Try again
mongo-admin_1 | /docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
mongo-admin_1 | Wed Jun 1 09:51:35 UTC 2022 retrying to connect to mongo:27017 (3/5)
mongo-admin_1 | /docker-entrypoint.sh: line 14: mongo: Try again
mongo-admin_1 | /docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
mongo-admin_1 | Wed Jun 1 09:51:41 UTC 2022 retrying to connect to mongo:27017 (4/5)
^CGracefully stopping... (press Ctrl+C again to force)
what is the issue in my docker-compose file for mongodb?

Related

MongoDB on Docker keeps waiting for connection

Trying to put up a mongo and mongo-express on Docker, but the DB won't initiate.
I am on an online course to learn. I just started on docker and mongo.
The teacher made the same code, and works normally.
Can it be on my machine? I am using WSL and VSCode Remote to make the course project.
Here is the docker-compose:
services:
app:
build: .
ports:
- 3000:3000
volumes:
- .:/home/node/app
depends_on:
- mongodb
mongodb:
image: mongo
ports:
- 27017:27017
environment:
- MONGO_INITDDB_ROOT_USERNAME=root
- MONGO_INITDDB_ROOT_PASSWORD=root
healthcheck:
test: echo 'db.runCommand("ping").ok' | mongo localhost:27017/test
retries: 5
interval: 15s
start_period: 30s
mongo-express:
image: mongo-express
restart: unless-stopped
ports:
- 8081:8081
environment:
- ME_CONFIG_MONGODB_SERVER=mongodb
- ME_CONFIG_MONGODB_AUTH_USERNAME=root
- ME_CONFIG_MONGODB_AUTH_PASSWORD=root
- ME_CONFIG_MONGODB_ADMINUSERNAME=root
- ME_CONFIG_MONGODB_ADMINPASSWORD=root
depends_on:
mongodb:
condition: service_healthy
And here is the log:
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.267+00:00"},"s":"I", "c":"NETWORK", "id":4915701, "ctx":"-","msg":"Initialized wire specification","attr":{"spec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":17},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":17},"outgoing":{"minWireVersion":6,"maxWireVersion":17},"isInternalClient":true}}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.270+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"main","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.270+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.272+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationDonorService","namespace":"config.tenantMigrationDonors"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.272+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationRecipientService","namespace":"config.tenantMigrationRecipients"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.272+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"ShardSplitDonorService","namespace":"config.tenantSplitDonors"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.272+00:00"},"s":"I", "c":"CONTROL", "id":5945603, "ctx":"main","msg":"Multi threading initialized"}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.273+00:00"},"s":"I", "c":"CONTROL", "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":1,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"434b144655c6"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.273+00:00"},"s":"I", "c":"CONTROL", "id":23403, "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"6.0.2","gitVersion":"94fb7dfc8b974f1f5343e7ea394d0d9deedba50e","openSSLVersion":"OpenSSL 1.1.1f 31 Mar 2020","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu2004","distarch":"x86_64","target_arch":"x86_64"}}}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.273+00:00"},"s":"I", "c":"CONTROL", "id":51765, "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"20.04"}}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.273+00:00"},"s":"I", "c":"CONTROL", "id":21951, "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{"net":{"bindIp":"*"}}}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.274+00:00"},"s":"I", "c":"STORAGE", "id":22270, "ctx":"initandlisten","msg":"Storage engine to use detected by data files","attr":{"dbpath":"/data/db","storageEngine":"wiredTiger"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.274+00:00"},"s":"I", "c":"STORAGE", "id":22297, "ctx":"initandlisten","msg":"Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem","tags":["startupWarnings"]}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:37.275+00:00"},"s":"I", "c":"STORAGE", "id":22315, "ctx":"initandlisten","msg":"Opening WiredTiger","attr":{"config":"create,cache_size=1301M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,remove=true,path=journal,compressor=snappy),builtin_extension_config=(zstd=(compression_level=6)),file_manager=(close_idle_time=600,close_scan_interval=10,close_handle_minimum=2000),statistics_log=(wait=0),json_output=(error,message),verbose=[recovery_progress:1,checkpoint_progress:1,compact_progress:1,backup:0,checkpoint:0,compact:0,evict:0,history_store:0,recovery:0,rts:0,salvage:0,tiered:0,timestamp:0,transaction:0,verify:0,log:0],"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.070+00:00"},"s":"I", "c":"STORAGE", "id":4795906, "ctx":"initandlisten","msg":"WiredTiger opened","attr":{"durationMillis":795}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.071+00:00"},"s":"I", "c":"RECOVERY", "id":23987, "ctx":"initandlisten","msg":"WiredTiger recoveryTimestamp","attr":{"recoveryTimestamp":{"$timestamp":{"t":0,"i":0}}}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.078+00:00"},"s":"W", "c":"CONTROL", "id":22120, "ctx":"initandlisten","msg":"Access control is not enabled for the database. Read and write access to data and configuration is unrestricted","tags":["startupWarnings"]}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.078+00:00"},"s":"W", "c":"CONTROL", "id":22178, "ctx":"initandlisten","msg":"/sys/kernel/mm/transparent_hugepage/enabled is 'always'. We suggest setting it to 'never'","tags":["startupWarnings"]}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.078+00:00"},"s":"W", "c":"CONTROL", "id":5123300, "ctx":"initandlisten","msg":"vm.max_map_count is too low","attr":{"currentValue":65530,"recommendedMinimum":1677720,"maxConns":838860},"tags":["startupWarnings"]}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.081+00:00"},"s":"I", "c":"NETWORK", "id":4915702, "ctx":"initandlisten","msg":"Updated wire specification","attr":{"oldSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":17},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":17},"outgoing":{"minWireVersion":6,"maxWireVersion":17},"isInternalClient":true},"newSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":17},"incomingInternalClient":{"minWireVersion":17,"maxWireVersion":17},"outgoing":{"minWireVersion":17,"maxWireVersion":17},"isInternalClient":true}}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.081+00:00"},"s":"I", "c":"REPL", "id":5853300, "ctx":"initandlisten","msg":"current featureCompatibilityVersion value","attr":{"featureCompatibilityVersion":"6.0","context":"startup"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.081+00:00"},"s":"I", "c":"STORAGE", "id":5071100, "ctx":"initandlisten","msg":"Clearing temp directory"}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.083+00:00"},"s":"I", "c":"CONTROL", "id":20536, "ctx":"initandlisten","msg":"Flow Control is enabled on this deployment"}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.083+00:00"},"s":"I", "c":"FTDC", "id":20625, "ctx":"initandlisten","msg":"Initializing full-time diagnostic data capture","attr":{"dataDirectory":"/data/db/diagnostic.data"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.089+00:00"},"s":"I", "c":"REPL", "id":6015317, "ctx":"initandlisten","msg":"Setting new configuration state","attr":{"newState":"ConfigReplicationDisabled","oldState":"ConfigPreStart"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.089+00:00"},"s":"I", "c":"STORAGE", "id":22262, "ctx":"initandlisten","msg":"Timestamp monitor starting"}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.093+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"/tmp/mongodb-27017.sock"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.093+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"0.0.0.0"}}
nestjs-api-testes-mongodb-1 | {"t":{"$date":"2022-10-21T19:01:38.093+00:00"},"s":"I", "c":"NETWORK", "id":23016, "ctx":"listener","msg":"Waiting for connections","attr":{"port":27017,"ssl":"off"}}
nestjs-api-testes-app-1 |
nestjs-api-testes-app-1 | up to date in 1m
nestjs-api-testes-app-1 |
nestjs-api-testes-app-1 | 92 packages are looking for funding
nestjs-api-testes-app-1 | run `npm fund` for details
container for service "mongodb" is unhealthy```
Thanks in advance.

Mongodb-express wont compose with docker-compose.yaml

whenever i try to compose mongodb and mongodb-express i get the below error
C:\Users\ayan9\Documents\repository\ayanpal_dot_me [master +2 ~0 -0 !]> docker compose up
[+] Running 3/3
- Network ayanpal_dot_me_default Created 0.0s
- Container ayanpal_dot_me-mongo-express-1 Created 0.0s
- Container ayanpal_dot_me-mongodb-1 Created 0.0s
Attaching to ayanpal_dot_me-mongo-express-1, ayanpal_dot_me-mongodb-1
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.254+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"-","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.255+00:00"},"s":"I", "c":"NETWORK", "id":4915701, "ctx":"main","msg":"Initialized wire specification","attr":{"spec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":13},"outgoing":{"minWireVersion":0,"maxWireVersion":13},"isInternalClient":true}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.257+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.257+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.258+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationDonorService","ns":"config.tenantMigrationDonors"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationRecipientService","ns":"config.tenantMigrationRecipients"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"CONTROL", "id":5945603, "ctx":"main","msg":"Multi threading initialized"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"CONTROL", "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":1,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"5924e60df736"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"CONTROL", "id":23403, "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"5.0.8","gitVersion":"c87e1c23421bf79614baf500fda6622bd90f674e","openSSLVersion":"OpenSSL 1.1.1f 31 Mar 2020","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu2004","distarch":"x86_64","target_arch":"x86_64"}}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"CONTROL", "id":51765, "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"20.04"}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.259+00:00"},"s":"I", "c":"CONTROL", "id":21951, "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{"net":{"bindIp":"*"}}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.261+00:00"},"s":"I", "c":"STORAGE", "id":22297, "ctx":"initandlisten","msg":"Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem","tags":["startupWarnings"]}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.261+00:00"},"s":"I", "c":"STORAGE", "id":22315, "ctx":"initandlisten","msg":"Opening WiredTiger","attr":{"config":"create,cache_size=15474M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),builtin_extension_config=(zstd=(compression_level=6)),file_manager=(close_idle_time=600,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.681+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1652017564:681852][1:0x7ff209735c80], txn-recover: [WT_VERB_RECOVERY_ALL] Set global recovery timestamp: (0, 0)"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.681+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1652017564:681919][1:0x7ff209735c80], txn-recover: [WT_VERB_RECOVERY_ALL] Set global oldest timestamp: (0, 0)"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.690+00:00"},"s":"I", "c":"STORAGE", "id":4795906, "ctx":"initandlisten","msg":"WiredTiger opened","attr":{"durationMillis":429}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.690+00:00"},"s":"I", "c":"RECOVERY", "id":23987, "ctx":"initandlisten","msg":"WiredTiger recoveryTimestamp","attr":{"recoveryTimestamp":{"$timestamp":{"t":0,"i":0}}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.704+00:00"},"s":"I", "c":"STORAGE", "id":4366408, "ctx":"initandlisten","msg":"No table logging settings modifications are required for existing WiredTiger tables","attr":{"loggingEnabled":true}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.704+00:00"},"s":"I", "c":"STORAGE", "id":22262, "ctx":"initandlisten","msg":"Timestamp monitor starting"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.714+00:00"},"s":"W", "c":"CONTROL", "id":22120, "ctx":"initandlisten","msg":"Access control is not enabled for the database. Read and write access to data and configuration is unrestricted","tags":["startupWarnings"]}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.714+00:00"},"s":"W", "c":"CONTROL", "id":22178, "ctx":"initandlisten","msg":"/sys/kernel/mm/transparent_hugepage/enabled is 'always'. We suggest setting it to 'never'","tags":["startupWarnings"]}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.715+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"initandlisten","msg":"createCollection","attr":{"namespace":"admin.system.version","uuidDisposition":"provided","uuid":{"uuid":{"$uuid":"1b6aefca-c53e-42d0-9c9b-d2e8eee30340"}},"options":{"uuid":{"$uuid":"1b6aefca-c53e-42d0-9c9b-d2e8eee30340"}}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.728+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"initandlisten","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"admin.system.version","index":"_id_","commitTimestamp":null}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.729+00:00"},"s":"I", "c":"REPL", "id":20459, "ctx":"initandlisten","msg":"Setting featureCompatibilityVersion","attr":{"newVersion":"5.0"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.729+00:00"},"s":"I", "c":"NETWORK", "id":4915702, "ctx":"initandlisten","msg":"Updated wire specification","attr":{"oldSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":13},"outgoing":{"minWireVersion":0,"maxWireVersion":13},"isInternalClient":true},"newSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":13,"maxWireVersion":13},"outgoing":{"minWireVersion":13,"maxWireVersion":13},"isInternalClient":true}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.729+00:00"},"s":"I", "c":"NETWORK", "id":4915702, "ctx":"initandlisten","msg":"Updated wire specification","attr":{"oldSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":13,"maxWireVersion":13},"outgoing":{"minWireVersion":13,"maxWireVersion":13},"isInternalClient":true},"newSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":13,"maxWireVersion":13},"outgoing":{"minWireVersion":13,"maxWireVersion":13},"isInternalClient":true}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.729+00:00"},"s":"I", "c":"STORAGE", "id":5071100, "ctx":"initandlisten","msg":"Clearing temp directory"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.729+00:00"},"s":"I", "c":"CONTROL", "id":20536, "ctx":"initandlisten","msg":"Flow Control is enabled on this deployment"}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.730+00:00"},"s":"I", "c":"FTDC", "id":20625, "ctx":"initandlisten","msg":"Initializing full-time diagnostic data capture","attr":{"dataDirectory":"/data/db/diagnostic.data"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.730+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"initandlisten","msg":"createCollection","attr":{"namespace":"local.startup_log","uuidDisposition":"generated","uuid":{"uuid":{"$uuid":"b0eb7b0e-af74-4183-8a67-ecd9e7de29cc"}},"options":{"capped":true,"size":10485760}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.743+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"initandlisten","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"local.startup_log","index":"_id_","commitTimestamp":null}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.744+00:00"},"s":"I", "c":"REPL", "id":6015317, "ctx":"initandlisten","msg":"Setting new configuration state","attr":{"newState":"ConfigReplicationDisabled","oldState":"ConfigPreStart"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.745+00:00"},"s":"I", "c":"CONTROL", "id":20712, "ctx":"LogicalSessionCacheReap","msg":"Sessions collection is not set up; waiting until next sessions reap interval","attr":{"error":"NamespaceNotFound: config.system.sessions does not exist"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.745+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"/tmp/mongodb-27017.sock"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.745+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"0.0.0.0"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.745+00:00"},"s":"I", "c":"NETWORK", "id":23016, "ctx":"listener","msg":"Waiting for connections","attr":{"port":27017,"ssl":"off"}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.745+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"LogicalSessionCacheRefresh","msg":"createCollection","attr":{"namespace":"config.system.sessions","uuidDisposition":"generated","uuid":{"uuid":{"$uuid":"12df9af9-6392-49ea-abe2-84b860973329"}},"options":{}}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.763+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"LogicalSessionCacheRefresh","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"config.system.sessions","index":"_id_","commitTimestamp":null}}
ayanpal_dot_me-mongodb-1 | {"t":{"$date":"2022-05-08T13:46:04.763+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"LogicalSessionCacheRefresh","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"config.system.sessions","index":"lsidTTLIndex","commitTimestamp":null}}
ayanpal_dot_me-mongo-express-1 | Welcome to mongo-express
ayanpal_dot_me-mongo-express-1 | ------------------------
ayanpal_dot_me-mongo-express-1 |
ayanpal_dot_me-mongo-express-1 |
ayanpal_dot_me-mongo-express-1 | (node:7) [MONGODB DRIVER] Warning: Current Server Discovery and Monitoring engine is deprecated, and will be removed in a future version. To use the new Server Discover and Monitoring engine, pass option { useUnifiedTopology: true } to the MongoClient constructor.
ayanpal_dot_me-mongo-express-1 | Could not connect to database using connectionString: mongodb://mongo:27017"
ayanpal_dot_me-mongo-express-1 | (node:7) UnhandledPromiseRejectionWarning: MongoNetworkError: failed to connect to server [mongo:27017] on first connect [Error: getaddrinfo EAI_AGAIN mongo
ayanpal_dot_me-mongo-express-1 | at GetAddrInfoReqWrap.onlookup [as oncomplete] (dns.js:66:26) {
ayanpal_dot_me-mongo-express-1 | name: 'MongoNetworkError'
ayanpal_dot_me-mongo-express-1 | }]
ayanpal_dot_me-mongo-express-1 | at Pool.<anonymous> (/node_modules/mongodb/lib/core/topologies/server.js:441:11)
ayanpal_dot_me-mongo-express-1 | at Pool.emit (events.js:314:20)
ayanpal_dot_me-mongo-express-1 | at /node_modules/mongodb/lib/core/connection/pool.js:564:14
ayanpal_dot_me-mongo-express-1 | at /node_modules/mongodb/lib/core/connection/pool.js:1000:11
ayanpal_dot_me-mongo-express-1 | at /node_modules/mongodb/lib/core/connection/connect.js:32:7
ayanpal_dot_me-mongo-express-1 | at callback (/node_modules/mongodb/lib/core/connection/connect.js:300:5)
ayanpal_dot_me-mongo-express-1 | at Socket.<anonymous> (/node_modules/mongodb/lib/core/connection/connect.js:330:7)
ayanpal_dot_me-mongo-express-1 | at Object.onceWrapper (events.js:421:26)
ayanpal_dot_me-mongo-express-1 | at Socket.emit (events.js:314:20)
ayanpal_dot_me-mongo-express-1 | at emitErrorNT (internal/streams/destroy.js:92:8)
ayanpal_dot_me-mongo-express-1 | at emitErrorAndCloseNT (internal/streams/destroy.js:60:3)
ayanpal_dot_me-mongo-express-1 | at processTicksAndRejections (internal/process/task_queues.js:84:21)
ayanpal_dot_me-mongo-express-1 | (node:7) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). To terminate the node process on unhandled promise rejection, use the CLI flag `--unhandled-rejections=strict` (see https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode). (rejection id: 1)
ayanpal_dot_me-mongo-express-1 | (node:7) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
ayanpal_dot_me-mongo-express-1 exited with code 0
My docker-compose.yaml is as below
services:
mongodb:
image: mongo
environment:
- MONGO_INTIDB_ROOT_USERNAME=rootuser
- MONGO_INTIDB_ROOT_PASSOWRD=rootpass
mongo-express:
image: mongo-express
ports:
- 8081:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=rootuser
- ME_CONFIG_MONGODB_ADMINPASSWORD=rootpass
Just trying to run mongodb and mongodb express via docker
I have tried adding the ports to both the server configs of mongodb and mongodb-express
also added the volumes and networks but this error is very confusing
There are two issues. The first is that Express tries to connect to the database using a default hostname of 'mongo'. Yours is called 'mongodb' so it can't find it. The easiest way to fix this is by naming the service mongo rather than mongodb.
Now mongo-express can find the mongo container, but now the second issue arises: The mongo-express container tries to connect to the mongo database immediately after startup, but the mongo database isn't ready to accept connections at that time. An easy way around that is to set the restart option to always, so the container restarts until it succeeds.
Try this
version: '3.9'
services:
mongo:
image: mongo
environment:
- MONGO_INTIDB_ROOT_USERNAME=rootuser
- MONGO_INTIDB_ROOT_PASSOWRD=rootpass
mongo-express:
image: mongo-express
ports:
- 8081:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=rootuser
- ME_CONFIG_MONGODB_ADMINPASSWORD=rootpass
restart: always
depends_on:
- mongo

MongoDB config file is ignored after setting replication options

I had MongoDB running fine on Ubuntu 20.04 but after adding replication options to mongod.conf, the config file seems to be being rejected. This results in errors because mongo attempts to write to the default storage path, which doesn't exist.
Here is my conf file:
# mongod.conf
# for documentation of all options, see:
# http://docs.mongodb.org/manual/reference/configuration-options/
# Where and how to store data.
storage:
dbPath: /var/lib/mongodb
journal:
enabled: true
# engine:
# wiredTiger:
# where to write logging data.
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongod.log
# network interfaces
net:
port: 27018
bindIp: 127.0.0.1
# how the process runs
processManagement:
timeZoneInfo: /usr/share/zoneinfo
#security:
#operationProfiling:
replication:
replSetName: rs0
#sharding:
## Enterprise-Only Options:
#auditLog:
#snmp:
And here are the errors:
{"t":{"$date":"2022-03-12T16:05:56.861+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"thread1","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
{"t":{"$date":"2022-03-12T16:05:56.861+00:00"},"s":"I", "c":"NETWORK", "id":4915701, "ctx":"thread1","msg":"Initialized wire specification","attr":{"spec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":13},"outgoing":{"minWireVersion":0,"maxWireVersion":13},"isInternalClient":true}}}
{"t":{"$date":"2022-03-12T16:05:56.864+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"thread1","msg":"No TransportLayer configured during NetworkInterface startup"}
{"t":{"$date":"2022-03-12T16:05:56.864+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"thread1","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
{"t":{"$date":"2022-03-12T16:05:56.865+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"thread1","msg":"No TransportLayer configured during NetworkInterface startup"}
{"t":{"$date":"2022-03-12T16:05:56.865+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"thread1","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationDonorService","ns":"config.tenantMigrationDonors"}}
{"t":{"$date":"2022-03-12T16:05:56.865+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"thread1","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationRecipientService","ns":"config.tenantMigrationRecipients"}}
{"t":{"$date":"2022-03-12T16:05:56.865+00:00"},"s":"I", "c":"CONTROL", "id":5945603, "ctx":"thread1","msg":"Multi threading initialized"}
{"t":{"$date":"2022-03-12T16:05:56.866+00:00"},"s":"I", "c":"CONTROL", "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":74756,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"Gabriel-PC-Ubuntu"}}
{"t":{"$date":"2022-03-12T16:05:56.866+00:00"},"s":"I", "c":"CONTROL", "id":23403, "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"5.0.6","gitVersion":"212a8dbb47f07427dae194a9c75baec1d81d9259","openSSLVersion":"OpenSSL 1.1.1f 31 Mar 2020","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu2004","distarch":"x86_64","target_arch":"x86_64"}}}}
{"t":{"$date":"2022-03-12T16:05:56.866+00:00"},"s":"I", "c":"CONTROL", "id":51765, "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"20.04"}}}
{"t":{"$date":"2022-03-12T16:05:56.866+00:00"},"s":"I", "c":"CONTROL", "id":21951, "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{}}}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"E", "c":"CONTROL", "id":20557, "ctx":"initandlisten","msg":"DBException in initAndListen, terminating","attr":{"error":"NonExistentPath: Data directory /data/db not found. Create the missing directory or specify another path using (1) the --dbpath command line option, or (2) by adding the 'storage.dbPath' option in the configuration file."}}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"REPL", "id":4784900, "ctx":"initandlisten","msg":"Stepping down the ReplicationCoordinator for shutdown","attr":{"waitTimeMillis":15000}}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"COMMAND", "id":4784901, "ctx":"initandlisten","msg":"Shutting down the MirrorMaestro"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"SHARDING", "id":4784902, "ctx":"initandlisten","msg":"Shutting down the WaitForMajorityService"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"NETWORK", "id":20562, "ctx":"initandlisten","msg":"Shutdown: going to close listening sockets"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"NETWORK", "id":4784905, "ctx":"initandlisten","msg":"Shutting down the global connection pool"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":4784906, "ctx":"initandlisten","msg":"Shutting down the FlowControlTicketholder"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"-", "id":20520, "ctx":"initandlisten","msg":"Stopping further Flow Control ticket acquisitions."}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"NETWORK", "id":4784918, "ctx":"initandlisten","msg":"Shutting down the ReplicaSetMonitor"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"SHARDING", "id":4784921, "ctx":"initandlisten","msg":"Shutting down the MigrationUtilExecutor"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"ASIO", "id":22582, "ctx":"MigrationUtil-TaskExecutor","msg":"Killing all outstanding egress activity."}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"COMMAND", "id":4784923, "ctx":"initandlisten","msg":"Shutting down the ServiceEntryPoint"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":4784925, "ctx":"initandlisten","msg":"Shutting down free monitoring"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":4784927, "ctx":"initandlisten","msg":"Shutting down the HealthLog"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":4784928, "ctx":"initandlisten","msg":"Shutting down the TTL monitor"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":4784929, "ctx":"initandlisten","msg":"Acquiring the global lock for shutdown"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"-", "id":4784931, "ctx":"initandlisten","msg":"Dropping the scope cache for shutdown"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"FTDC", "id":4784926, "ctx":"initandlisten","msg":"Shutting down full-time data capture"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":20565, "ctx":"initandlisten","msg":"Now exiting"}
{"t":{"$date":"2022-03-12T16:05:56.867+00:00"},"s":"I", "c":"CONTROL", "id":23138, "ctx":"initandlisten","msg":"Shutting down","attr":{"exitCode":100}}
Any help would be greatly appreciated!

mongo-express is not trying to reconnect after first attempt

I am trying to create two services mongo and mongo-express through docker-compose file.
Let me explain the scenario -
If I run those two containers separately in a same container using docker-run commands in terminal one by one they work fine.
Commands which I ran in terminal ->
For mongo
docker run -d \
-p27017:27017 \
-e MONGO_INITDB_ROOT_USERNAME=admin \
-e MONGO_INITDB_ROOT_PASSWORD=password \
--network 6-mar-learning-docker-with-mongo-image \
--name mongodb \
mongo
and for mongo-express
docker run -d \
-p 8081:8081 \
-e ME_CONFIG_MONGODB_ADMINUSERNAME=admin \
-e ME_CONFIG_MONGODB_ADMINPASSWORD=password \
--network 6-mar-learning-docker-with-mongo-image \
--name mongo-express \
-e ME_CONFIG_MONGODB_SERVER=mongodb \
mongo-express
But if I try using docker-compose yaml file mongo-express service tries to connect to mongo service only once and then exits with code 0.
Here's my yaml file :
version: "3"
services:
mongodb:
image: mongo
ports:
- 27017:27017
environment:
- MONGO_INITDB_ROOT_USERNAME=admin
- MONGO_INITDB_ROOT_PASSWORD=password
mongo-express:
image: mongo-express
ports:
- 8080:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=admin
- ME_CONFIG_MONGODB_ADMINPASSWORD=password
- ME_CONFIG_MONGODB_SERVER=mongodb
Command I'm using to execute this file
docker-compose -f docker-compose.yaml up
Log of this command :
Creating network "docker_default" with the default driver
Creating docker_mongo-express_1 ... done
Creating docker_mongodb_1 ... done
Attaching to docker_mongo-express_1, docker_mongodb_1
mongodb_1 | about to fork child process, waiting until server is ready for connections.
mongodb_1 | forked process: 32
mongodb_1 |
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.852+00:00"},"s":"I", "c":"CONTROL", "id":20698, "ctx":"-","msg":"***** SERVER RESTARTED *****"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.856+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"main","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.856+00:00"},"s":"I", "c":"NETWORK", "id":4915701, "ctx":"main","msg":"Initialized wire specification","attr":{"spec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":13},"outgoing":{"minWireVersion":0,"maxWireVersion":13},"isInternalClient":true}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.857+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.857+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.858+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.859+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationDonorService","ns":"config.tenantMigrationDonors"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.859+00:00"},"s":"I", "c":"REPL", "id":5123008, "ctx":"main","msg":"Successfully registered PrimaryOnlyService","attr":{"service":"TenantMigrationRecipientService","ns":"config.tenantMigrationRecipients"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.859+00:00"},"s":"I", "c":"CONTROL", "id":5945603, "ctx":"main","msg":"Multi threading initialized"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.859+00:00"},"s":"I", "c":"CONTROL", "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":32,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"5a9ee157c392"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.860+00:00"},"s":"I", "c":"CONTROL", "id":23403, "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"5.0.6","gitVersion":"212a8dbb47f07427dae194a9c75baec1d81d9259","openSSLVersion":"OpenSSL 1.1.1f 31 Mar 2020","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu2004","distarch":"x86_64","target_arch":"x86_64"}}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.860+00:00"},"s":"I", "c":"CONTROL", "id":51765, "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"20.04"}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.860+00:00"},"s":"I", "c":"CONTROL", "id":21951, "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{"net":{"bindIp":"127.0.0.1","port":27017,"tls":{"mode":"disabled"}},"processManagement":{"fork":true,"pidFilePath":"/tmp/docker-entrypoint-temp-mongod.pid"},"systemLog":{"destination":"file","logAppend":true,"path":"/proc/1/fd/1"}}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.861+00:00"},"s":"I", "c":"STORAGE", "id":22297, "ctx":"initandlisten","msg":"Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem","tags":["startupWarnings"]}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:34.861+00:00"},"s":"I", "c":"STORAGE", "id":22315, "ctx":"initandlisten","msg":"Opening WiredTiger","attr":{"config":"create,cache_size=480M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),builtin_extension_config=(zstd=(compression_level=6)),file_manager=(close_idle_time=600,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.323+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1646573975:323191][32:0x7ff1314c6c80], txn-recover: [WT_VERB_RECOVERY_ALL] Set global recovery timestamp: (0, 0)"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.323+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1646573975:323262][32:0x7ff1314c6c80], txn-recover: [WT_VERB_RECOVERY_ALL] Set global oldest timestamp: (0, 0)"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.329+00:00"},"s":"I", "c":"STORAGE", "id":4795906, "ctx":"initandlisten","msg":"WiredTiger opened","attr":{"durationMillis":468}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.329+00:00"},"s":"I", "c":"RECOVERY", "id":23987, "ctx":"initandlisten","msg":"WiredTiger recoveryTimestamp","attr":{"recoveryTimestamp":{"$timestamp":{"t":0,"i":0}}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.340+00:00"},"s":"I", "c":"STORAGE", "id":4366408, "ctx":"initandlisten","msg":"No table logging settings modifications are required for existing WiredTiger tables","attr":{"loggingEnabled":true}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.340+00:00"},"s":"I", "c":"STORAGE", "id":22262, "ctx":"initandlisten","msg":"Timestamp monitor starting"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.345+00:00"},"s":"W", "c":"CONTROL", "id":22120, "ctx":"initandlisten","msg":"Access control is not enabled for the database. Read and write access to data and configuration is unrestricted","tags":["startupWarnings"]}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.346+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"initandlisten","msg":"createCollection","attr":{"namespace":"admin.system.version","uuidDisposition":"provided","uuid":{"uuid":{"$uuid":"6cdf8a79-38eb-4cf7-a722-437bf35ff55f"}},"options":{"uuid":{"$uuid":"6cdf8a79-38eb-4cf7-a722-437bf35ff55f"}}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.354+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"initandlisten","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"admin.system.version","index":"_id_","commitTimestamp":null}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.355+00:00"},"s":"I", "c":"REPL", "id":20459, "ctx":"initandlisten","msg":"Setting featureCompatibilityVersion","attr":{"newVersion":"5.0"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.355+00:00"},"s":"I", "c":"NETWORK", "id":4915702, "ctx":"initandlisten","msg":"Updated wire specification","attr":{"oldSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":0,"maxWireVersion":13},"outgoing":{"minWireVersion":0,"maxWireVersion":13},"isInternalClient":true},"newSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":13,"maxWireVersion":13},"outgoing":{"minWireVersion":13,"maxWireVersion":13},"isInternalClient":true}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.355+00:00"},"s":"I", "c":"NETWORK", "id":4915702, "ctx":"initandlisten","msg":"Updated wire specification","attr":{"oldSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":13,"maxWireVersion":13},"outgoing":{"minWireVersion":13,"maxWireVersion":13},"isInternalClient":true},"newSpec":{"incomingExternalClient":{"minWireVersion":0,"maxWireVersion":13},"incomingInternalClient":{"minWireVersion":13,"maxWireVersion":13},"outgoing":{"minWireVersion":13,"maxWireVersion":13},"isInternalClient":true}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.355+00:00"},"s":"I", "c":"STORAGE", "id":5071100, "ctx":"initandlisten","msg":"Clearing temp directory"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.355+00:00"},"s":"I", "c":"CONTROL", "id":20536, "ctx":"initandlisten","msg":"Flow Control is enabled on this deployment"}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.356+00:00"},"s":"I", "c":"FTDC", "id":20625, "ctx":"initandlisten","msg":"Initializing full-time diagnostic data capture","attr":{"dataDirectory":"/data/db/diagnostic.data"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.356+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"initandlisten","msg":"createCollection","attr":{"namespace":"local.startup_log","uuidDisposition":"generated","uuid":{"uuid":{"$uuid":"c7bdfd70-ca9a-4a5a-8c7d-b498500fff1f"}},"options":{"capped":true,"size":10485760}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.367+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"initandlisten","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"local.startup_log","index":"_id_","commitTimestamp":null}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.367+00:00"},"s":"I", "c":"REPL", "id":6015317, "ctx":"initandlisten","msg":"Setting new configuration state","attr":{"newState":"ConfigReplicationDisabled","oldState":"ConfigPreStart"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.369+00:00"},"s":"I", "c":"CONTROL", "id":20712, "ctx":"LogicalSessionCacheReap","msg":"Sessions collection is not set up; waiting until next sessions reap interval","attr":{"error":"NamespaceNotFound: config.system.sessions does not exist"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.369+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"/tmp/mongodb-27017.sock"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.369+00:00"},"s":"I", "c":"STORAGE", "id":20320, "ctx":"LogicalSessionCacheRefresh","msg":"createCollection","attr":{"namespace":"config.system.sessions","uuidDisposition":"generated","uuid":{"uuid":{"$uuid":"a361b644-06c4-4102-8abd-a78af1e4fb4d"}},"options":{}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.369+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"127.0.0.1"}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.369+00:00"},"s":"I", "c":"NETWORK", "id":23016, "ctx":"listener","msg":"Waiting for connections","attr":{"port":27017,"ssl":"off"}}
mongodb_1 | child process started successfully, parent exiting
mongo-express_1 | Welcome to mongo-express
mongo-express_1 | ------------------------
mongo-express_1 |
mongo-express_1 |
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.388+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"LogicalSessionCacheRefresh","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"config.system.sessions","index":"_id_","commitTimestamp":null}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.388+00:00"},"s":"I", "c":"INDEX", "id":20345, "ctx":"LogicalSessionCacheRefresh","msg":"Index build: done building","attr":{"buildUUID":null,"namespace":"config.system.sessions","index":"lsidTTLIndex","commitTimestamp":null}}
mongo-express_1 | (node:7) [MONGODB DRIVER] Warning: Current Server Discovery and Monitoring engine is deprecated, and will be removed in a future version. To use the new Server Discover and Monitoring engine, pass option { useUnifiedTopology: true } to the MongoClient constructor.
mongo-express_1 | Could not connect to database using connectionString: mongodb://admin:password#mongodb:27017/"
mongo-express_1 | (node:7) UnhandledPromiseRejectionWarning: MongoNetworkError: failed to connect to server [mongodb:27017] on first connect [Error: connect ECONNREFUSED 172.27.0.3:27017
mongo-express_1 | at TCPConnectWrap.afterConnect [as oncomplete] (net.js:1144:16) {
mongo-express_1 | name: 'MongoNetworkError'
mongo-express_1 | }]
mongo-express_1 | at Pool.<anonymous> (/node_modules/mongodb/lib/core/topologies/server.js:441:11)
mongo-express_1 | at Pool.emit (events.js:314:20)
mongo-express_1 | at /node_modules/mongodb/lib/core/connection/pool.js:564:14
mongo-express_1 | at /node_modules/mongodb/lib/core/connection/pool.js:1000:11
mongo-express_1 | at /node_modules/mongodb/lib/core/connection/connect.js:32:7
mongo-express_1 | at callback (/node_modules/mongodb/lib/core/connection/connect.js:300:5)
mongo-express_1 | at Socket.<anonymous> (/node_modules/mongodb/lib/core/connection/connect.js:330:7)
mongo-express_1 | at Object.onceWrapper (events.js:421:26)
mongo-express_1 | at Socket.emit (events.js:314:20)
mongo-express_1 | at emitErrorNT (internal/streams/destroy.js:92:8)
mongo-express_1 | at emitErrorAndCloseNT (internal/streams/destroy.js:60:3)
mongo-express_1 | at processTicksAndRejections (internal/process/task_queues.js:84:21)
mongo-express_1 | (node:7) UnhandledPromiseRejectionWarning: Unhandled promise rejection. This error originated either by throwing inside of an async function without a catch block, or by rejecting a promise which was not handled with .catch(). To terminate the node process on unhandled promise rejection, use the CLI flag `--unhandled-rejections=strict` (see https://nodejs.org/api/cli.html#cli_unhandled_rejections_mode). (rejection id: 1)
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.432+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"127.0.0.1:60038","uuid":"70bac03c-a30e-47b6-b6a5-bc6a551ab95e","connectionId":1,"connectionCount":1}}
mongo-express_1 | (node:7) [DEP0018] DeprecationWarning: Unhandled promise rejections are deprecated. In the future, promise rejections that are not handled will terminate the Node.js process with a non-zero exit code.
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.433+00:00"},"s":"I", "c":"NETWORK", "id":51800, "ctx":"conn1","msg":"client metadata","attr":{"remote":"127.0.0.1:60038","client":"conn1","doc":{"application":{"name":"MongoDB Shell"},"driver":{"name":"MongoDB Internal Client","version":"5.0.6"},"os":{"type":"Linux","name":"Ubuntu","architecture":"x86_64","version":"20.04"}}}}
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.443+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn1","msg":"Connection ended","attr":{"remote":"127.0.0.1:60038","uuid":"70bac03c-a30e-47b6-b6a5-bc6a551ab95e","connectionId":1,"connectionCount":0}}
docker_mongo-express_1 exited with code 0
mongodb_1 | {"t":{"$date":"2022-03-06T13:39:35.598+00:00"},"s":"I", "c":"NETWORK", "id":22943, "ctx":"listener","msg":"Connection accepted","attr":{"remote":"127.0.0.1:60040","uuid":"2f5cd59e-5691-40c5-a31a-fd4fbb0e6a63","connectionId":2,"connectionCount":1}}
docker ps output at this instance
CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
5a9ee157c392 mongo "docker-entrypoint.s…" 3 minutes ago Up 3 minutes 0.0.0.0:27017->27017/tcp, :::27017->27017/tcp docker_mongodb_1
So basically mongo got created and mongo-express did not as it exited.
Now if I press CTRL+C(exiting the command in Mac) after running the docker-compose command and run it again. Now it works since mongo service is already up and mongo-express can establish a connection in the first attempt.
But it should try to reconnect I think, I saw and read a couple of tutorials too which validates that the service should not exit, it should try to reconnect. Maybe I missed something in my docker-compose.yaml file, forgive me for that since I'm learning Docker.
Just a small note - I solved this by using/setting the "restart" option to mongo-express service, but I've seen in people's tutorials and code that it tries to reconnect itself, without restarting it over and over.
Tutorial reference which I used - https://youtu.be/3c-iBn73dDE?t=5705
The mongodb and the mongo-express service should not be started at the same time, because the mongo-express depends on the mongodb container. You can ensure the database running, before the express server starts with the following changes to the compose file.
I have added a simple healthcheck, and a depends_on there ensure the mongo instance is healthy and ready, before the express server starts.
version: '3'
services:
mongodb:
image: mongo
ports:
- 27017:27017
environment:
- MONGO_INITDB_ROOT_USERNAME=admin
- MONGO_INITDB_ROOT_PASSWORD=password
healthcheck:
test: echo 'db.runCommand("ping").ok' | mongo localhost:27017/test --quiet
retries: 5
interval: 15s
start_period: 30s
mongo-express:
image: mongo-express
ports:
- 8080:8081
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=admin
- ME_CONFIG_MONGODB_ADMINPASSWORD=password
- ME_CONFIG_MONGODB_SERVER=mongodb
depends_on:
mongodb:
condition: service_healthy
You can simply use
mongo-express:
image: mongo-express
ports:
- 8080:8081
restart: unless-stopped
environment:
- ME_CONFIG_MONGODB_ADMINUSERNAME=admin
- ME_CONFIG_MONGODB_ADMINPASSWORD=password
- ME_CONFIG_MONGODB_SERVER=mongodb
or any of restart options. Here is the docker docs.
Btw, if you want to use startup-order, I'd recommend you to read Digital Ocean containerize tutorial especially step 4
Ps: I think version: "3" is ok, but check the version capability before use.
Happy coding:)

YAML provided on official Mongo Docker Image page doesn't work on CentOS

The YAML file below (originally provided here) works in a Windows and macOS docker environment. But when I run it in a CentOS environment, mongo-express can't connect to the MongoDB service and it doesn't show up in the browser at localhost:8081. I suppose this is an issue with DNSes and mapping Mongo to the respective container IP address. How can I fix this?
# Use root/example as user/password credentials
version: '3.1'
services:
mongo:
image: mongo
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
mongo-express:
image: mongo-express
restart: always
ports:
- 8081:8081
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: root
ME_CONFIG_MONGODB_ADMINPASSWORD: example
Log from the mongo service:
{"t":{"$date":"2021-01-01T00:15:56.059+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"main","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
killing process with pid: 29
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I", "c":"CONTROL", "id":23377, "ctx":"SignalHandler","msg":"Received signal","attr":{"signal":15,"error":"Terminated"}}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I", "c":"CONTROL", "id":23378, "ctx":"SignalHandler","msg":"Signal was sent by kill(2)","attr":{"pid":83,"uid":999}}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I", "c":"CONTROL", "id":23381, "ctx":"SignalHandler","msg":"will terminate after current cmd ends"}
{"t":{"$date":"2021-01-01T00:15:56.063+00:00"},"s":"I", "c":"REPL", "id":4784900, "ctx":"SignalHandler","msg":"Stepping down the ReplicationCoordinator for shutdown","attr":{"waitTimeMillis":10000}}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"COMMAND", "id":4784901, "ctx":"SignalHandler","msg":"Shutting down the MirrorMaestro"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"SHARDING", "id":4784902, "ctx":"SignalHandler","msg":"Shutting down the WaitForMajorityService"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"CONTROL", "id":4784903, "ctx":"SignalHandler","msg":"Shutting down the LogicalSessionCache"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"NETWORK", "id":20562, "ctx":"SignalHandler","msg":"Shutdown: going to close listening sockets"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"NETWORK", "id":23017, "ctx":"listener","msg":"removing socket file","attr":{"path":"/tmp/mongodb-27017.sock"}}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"NETWORK", "id":4784905, "ctx":"SignalHandler","msg":"Shutting down the global connection pool"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"STORAGE", "id":4784906, "ctx":"SignalHandler","msg":"Shutting down the FlowControlTicketholder"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"-", "id":20520, "ctx":"SignalHandler","msg":"Stopping further Flow Control ticket acquisitions."}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"STORAGE", "id":4784908, "ctx":"SignalHandler","msg":"Shutting down the PeriodicThreadToAbortExpiredTransactions"}
{"t":{"$date":"2021-01-01T00:15:56.064+00:00"},"s":"I", "c":"STORAGE", "id":4784934, "ctx":"SignalHandler","msg":"Shutting down the PeriodicThreadToDecreaseSnapshotHistoryCachePressure"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"REPL", "id":4784909, "ctx":"SignalHandler","msg":"Shutting down the ReplicationCoordinator"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"SHARDING", "id":4784910, "ctx":"SignalHandler","msg":"Shutting down the ShardingInitializationMongoD"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"REPL", "id":4784911, "ctx":"SignalHandler","msg":"Enqueuing the ReplicationStateTransitionLock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"-", "id":4784912, "ctx":"SignalHandler","msg":"Killing all operations for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"-", "id":4695300, "ctx":"SignalHandler","msg":"Interrupted all currently running operations","attr":{"opsKilled":3}}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"COMMAND", "id":4784913, "ctx":"SignalHandler","msg":"Shutting down all open transactions"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"REPL", "id":4784914, "ctx":"SignalHandler","msg":"Acquiring the ReplicationStateTransitionLock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"INDEX", "id":4784915, "ctx":"SignalHandler","msg":"Shutting down the IndexBuildsCoordinator"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"REPL", "id":4784916, "ctx":"SignalHandler","msg":"Reacquiring the ReplicationStateTransitionLock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"REPL", "id":4784917, "ctx":"SignalHandler","msg":"Attempting to mark clean shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"NETWORK", "id":4784918, "ctx":"SignalHandler","msg":"Shutting down the ReplicaSetMonitor"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"SHARDING", "id":4784921, "ctx":"SignalHandler","msg":"Shutting down the MigrationUtilExecutor"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"CONTROL", "id":4784925, "ctx":"SignalHandler","msg":"Shutting down free monitoring"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"CONTROL", "id":20609, "ctx":"SignalHandler","msg":"Shutting down free monitoring"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"FTDC", "id":4784926, "ctx":"SignalHandler","msg":"Shutting down full-time data capture"}
{"t":{"$date":"2021-01-01T00:15:56.065+00:00"},"s":"I", "c":"FTDC", "id":20626, "ctx":"SignalHandler","msg":"Shutting down full-time diagnostic data capture"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":4784927, "ctx":"SignalHandler","msg":"Shutting down the HealthLog"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":4784929, "ctx":"SignalHandler","msg":"Acquiring the global lock for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":4784930, "ctx":"SignalHandler","msg":"Shutting down the storage engine"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":20282, "ctx":"SignalHandler","msg":"Deregistering all the collections"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":22261, "ctx":"SignalHandler","msg":"Timestamp monitor shutting down"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":22317, "ctx":"SignalHandler","msg":"WiredTigerKVEngine shutting down"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":22318, "ctx":"SignalHandler","msg":"Shutting down session sweeper thread"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":22319, "ctx":"SignalHandler","msg":"Finished shutting down session sweeper thread"}
{"t":{"$date":"2021-01-01T00:15:56.067+00:00"},"s":"I", "c":"STORAGE", "id":22320, "ctx":"SignalHandler","msg":"Shutting down journal flusher thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I", "c":"STORAGE", "id":22321, "ctx":"SignalHandler","msg":"Finished shutting down journal flusher thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I", "c":"STORAGE", "id":22322, "ctx":"SignalHandler","msg":"Shutting down checkpoint thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I", "c":"STORAGE", "id":22323, "ctx":"SignalHandler","msg":"Finished shutting down checkpoint thread"}
{"t":{"$date":"2021-01-01T00:15:56.068+00:00"},"s":"I", "c":"STORAGE", "id":4795902, "ctx":"SignalHandler","msg":"Closing WiredTiger","attr":{"closeConfig":"leak_memory=true,"}}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I", "c":"STORAGE", "id":4795901, "ctx":"SignalHandler","msg":"WiredTiger closed","attr":{"durationMillis":6}}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I", "c":"STORAGE", "id":22279, "ctx":"SignalHandler","msg":"shutdown: removing fs lock..."}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I", "c":"-", "id":4784931, "ctx":"SignalHandler","msg":"Dropping the scope cache for shutdown"}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I", "c":"CONTROL", "id":20565, "ctx":"SignalHandler","msg":"Now exiting"}
{"t":{"$date":"2021-01-01T00:15:56.074+00:00"},"s":"I", "c":"CONTROL", "id":23138, "ctx":"SignalHandler","msg":"Shutting down","attr":{"exitCode":0}}
MongoDB init process complete; ready for start up.
{"t":{"$date":"2021-01-01T00:15:57.181+00:00"},"s":"I", "c":"CONTROL", "id":23285, "ctx":"main","msg":"Automatically disabling TLS 1.0, to force-enable TLS 1.0 specify --sslDisabledProtocols 'none'"}
{"t":{"$date":"2021-01-01T00:15:57.185+00:00"},"s":"W", "c":"ASIO", "id":22601, "ctx":"main","msg":"No TransportLayer configured during NetworkInterface startup"}
{"t":{"$date":"2021-01-01T00:15:57.185+00:00"},"s":"I", "c":"NETWORK", "id":4648601, "ctx":"main","msg":"Implicit TCP FastOpen unavailable. If TCP FastOpen is required, set tcpFastOpenServer, tcpFastOpenClient, and tcpFastOpenQueueSize."}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I", "c":"STORAGE", "id":4615611, "ctx":"initandlisten","msg":"MongoDB starting","attr":{"pid":1,"port":27017,"dbPath":"/data/db","architecture":"64-bit","host":"973e354c35f7"}}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I", "c":"CONTROL", "id":23403, "ctx":"initandlisten","msg":"Build Info","attr":{"buildInfo":{"version":"4.4.2","gitVersion":"15e73dc5738d2278b688f8929aee605fe4279b0e","openSSLVersion":"OpenSSL 1.1.1 11 Sep 2018","modules":[],"allocator":"tcmalloc","environment":{"distmod":"ubuntu1804","distarch":"x86_64","target_arch":"x86_64"}}}}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I", "c":"CONTROL", "id":51765, "ctx":"initandlisten","msg":"Operating System","attr":{"os":{"name":"Ubuntu","version":"18.04"}}}
{"t":{"$date":"2021-01-01T00:15:57.186+00:00"},"s":"I", "c":"CONTROL", "id":21951, "ctx":"initandlisten","msg":"Options set by command line","attr":{"options":{"net":{"bindIp":"*"},"security":{"authorization":"enabled"}}}}
{"t":{"$date":"2021-01-01T00:15:57.189+00:00"},"s":"I", "c":"STORAGE", "id":22270, "ctx":"initandlisten","msg":"Storage engine to use detected by data files","attr":{"dbpath":"/data/db","storageEngine":"wiredTiger"}}
{"t":{"$date":"2021-01-01T00:15:57.189+00:00"},"s":"I", "c":"STORAGE", "id":22297, "ctx":"initandlisten","msg":"Using the XFS filesystem is strongly recommended with the WiredTiger storage engine. See http://dochub.mongodb.org/core/prodnotes-filesystem","tags":["startupWarnings"]}
{"t":{"$date":"2021-01-01T00:15:57.189+00:00"},"s":"I", "c":"STORAGE", "id":22315, "ctx":"initandlisten","msg":"Opening WiredTiger","attr":{"config":"create,cache_size=6656M,session_max=33000,eviction=(threads_min=4,threads_max=4),config_base=false,statistics=(fast),log=(enabled=true,archive=true,path=journal,compressor=snappy),file_manager=(close_idle_time=100000,close_scan_interval=10,close_handle_minimum=250),statistics_log=(wait=0),verbose=[recovery_progress,checkpoint_progress,compact_progress],"}}
{"t":{"$date":"2021-01-01T00:15:57.861+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460157:861019][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 1 through 2"}}
{"t":{"$date":"2021-01-01T00:15:57.969+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460157:969647][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 2"}}
{"t":{"$date":"2021-01-01T00:15:58.064+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:64534][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Main recovery loop: starting at 1/29568 to 2/256"}}
{"t":{"$date":"2021-01-01T00:15:58.175+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:175353][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 1 through 2"}}
{"t":{"$date":"2021-01-01T00:15:58.249+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:249021][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY_PROGRESS] Recovering log 2 through 2"}}
{"t":{"$date":"2021-01-01T00:15:58.301+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:301066][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global recovery timestamp: (0, 0)"}}
{"t":{"$date":"2021-01-01T00:15:58.301+00:00"},"s":"I", "c":"STORAGE", "id":22430, "ctx":"initandlisten","msg":"WiredTiger message","attr":{"message":"[1609460158:301126][1:0x7f184105dac0], txn-recover: [WT_VERB_RECOVERY | WT_VERB_RECOVERY_PROGRESS] Set global oldest timestamp: (0, 0)"}}
{"t":{"$date":"2021-01-01T00:15:58.317+00:00"},"s":"I", "c":"STORAGE", "id":4795906, "ctx":"initandlisten","msg":"WiredTiger opened","attr":{"durationMillis":1128}}
{"t":{"$date":"2021-01-01T00:15:58.317+00:00"},"s":"I", "c":"RECOVERY", "id":23987, "ctx":"initandlisten","msg":"WiredTiger recoveryTimestamp","attr":{"recoveryTimestamp":{"$timestamp":{"t":0,"i":0}}}}
{"t":{"$date":"2021-01-01T00:15:58.318+00:00"},"s":"I", "c":"STORAGE", "id":4366408, "ctx":"initandlisten","msg":"No table logging settings modifications are required for existing WiredTiger tables","attr":{"loggingEnabled":true}}
{"t":{"$date":"2021-01-01T00:15:58.320+00:00"},"s":"I", "c":"STORAGE", "id":22262, "ctx":"initandlisten","msg":"Timestamp monitor starting"}
{"t":{"$date":"2021-01-01T00:15:58.321+00:00"},"s":"I", "c":"CONTROL", "id":22161, "ctx":"initandlisten","msg":"You are running in OpenVZ which can cause issues on versions of RHEL older than RHEL6","tags":["startupWarnings"]}
{"t":{"$date":"2021-01-01T00:15:58.324+00:00"},"s":"I", "c":"STORAGE", "id":20536, "ctx":"initandlisten","msg":"Flow Control is enabled on this deployment"}
{"t":{"$date":"2021-01-01T00:15:58.325+00:00"},"s":"I", "c":"FTDC", "id":20625, "ctx":"initandlisten","msg":"Initializing full-time diagnostic data capture","attr":{"dataDirectory":"/data/db/diagnostic.data"}}
{"t":{"$date":"2021-01-01T00:15:58.334+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"/tmp/mongodb-27017.sock"}}
{"t":{"$date":"2021-01-01T00:15:58.334+00:00"},"s":"I", "c":"NETWORK", "id":23015, "ctx":"listener","msg":"Listening on","attr":{"address":"0.0.0.0"}}
{"t":{"$date":"2021-01-01T00:15:58.334+00:00"},"s":"I", "c":"NETWORK", "id":23016, "ctx":"listener","msg":"Waiting for connections","attr":{"port":27017,"ssl":"off"}}
Log from the mongo-express service:
Mongo Express server listening at http://0.0.0.0:8081
Server is open to allow connections from anyone (0.0.0.0)
basicAuth credentials are "admin:pass", it is recommended you change this in your config.js!
/node_modules/mongodb/lib/server.js:265
process.nextTick(function() { throw err; })
^
Error [MongoError]: failed to connect to server [mongo:27017] on first connect
at Pool.<anonymous> (/node_modules/mongodb-core/lib/topologies/server.js:326:35)
at Pool.emit (events.js:314:20)
at Connection.<anonymous> (/node_modules/mongodb-core/lib/connection/pool.js:270:12)
at Object.onceWrapper (events.js:421:26)
at Connection.emit (events.js:314:20)
at Socket.<anonymous> (/node_modules/mongodb-core/lib/connection/connection.js:175:49)
at Object.onceWrapper (events.js:421:26)
at Socket.emit (events.js:314:20)
at emitErrorNT (internal/streams/destroy.js:92:8)
at emitErrorAndCloseNT (internal/streams/destroy.js:60:3)
Waiting for mongo:27017...
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan 1 00:17:21 UTC 2021 retrying to connect to mongo:27017 (2/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan 1 00:17:27 UTC 2021 retrying to connect to mongo:27017 (3/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan 1 00:17:33 UTC 2021 retrying to connect to mongo:27017 (4/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Fri Jan 1 00:17:39 UTC 2021 retrying to connect to mongo:27017 (5/5)
/docker-entrypoint.sh: line 14: mongo: Try again
/docker-entrypoint.sh: line 14: /dev/tcp/mongo/27017: Invalid argument
Typically the number of retries mongo-express is 5, maybe the database take more time to be connectable on docker-centos-env more than windows/macos ..you can check if the database is working if you do docker exec -it nameofmongocontainer mongo -uroot -pexample , then you can do something like show dbs;
You mentioned an DNS issue it happened to on my docker windows env 3 time before mongo-express managed to establish connection with database so its not exclusive to centos docker env ..
Potential fixes : you can add script to docker-compose file that can increase to number of retries to 20 .. here is an example from here (save the docker-entrypoint.sh script in the same location as your docker-compose file )
#!/bin/bash
set -eo pipefail
# This is a very small modification of the original docker script taken from
# https://github.com/mongo-express/mongo-express-docker/blob/master/docker-entrypoint.sh
# with only change being the modified value of max_tries=20 instead of original 5,
# to give the main mongodb container enough time to start and be connectable to.
# Also, see https://docs.docker.com/compose/startup-order/
# if command does not start with mongo-express, run the command instead of the entrypoint
if [ "${1}" != "mongo-express" ]; then
exec "$#"
fi
function wait_tcp_port {
local host="$1" port="$2"
local max_tries=20 tries=1
# see http://tldp.org/LDP/abs/html/devref1.html for description of this syntax.
while ! exec 6<>/dev/tcp/$host/$port && [[ $tries -lt $max_tries ]]; do
sleep 10s
tries=$(( tries + 1 ))
echo "$(date) retrying to connect to $host:$port ($tries/$max_tries)"
done
exec 6>&-
}
# if ME_CONFIG_MONGODB_SERVER has a comma in it, we're pointing to a replica set (https://github.com/mongo-express/mongo-express-docker/issues/21)
if [[ "$ME_CONFIG_MONGODB_SERVER" != *,* ]]; then
# wait for the mongo server to be available
echo Waiting for ${ME_CONFIG_MONGODB_SERVER}:${ME_CONFIG_MONGODB_PORT:-27017}...
wait_tcp_port "${ME_CONFIG_MONGODB_SERVER}" "${ME_CONFIG_MONGODB_PORT:-27017}"
fi
# run mongo-express
exec node app
The docker-compose file then will be
version: '3.1'
services:
mongo:
image: mongo
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
mongo-express:
image: mongo-express
restart: always
ports:
- 8081:8081
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: root
ME_CONFIG_MONGODB_ADMINPASSWORD: example
volumes:
- ./docker-entrypoint.sh:/docker-entrypoint.sh
An issue with Centos maybe ?
What you can do is do these steps separately
docker network create mongo-test
docker run -d --name mongo -e MONGO_INITDB_ROOT_USERNAME=root -e MONGO_INITDB_ROOT_PASSWORD=example --net mongo-test mongo
docker run -d --name mongo-express -e ME_CONFIG_MONGODB_ADMINUSERNAME=root -e ME_CONFIG_MONGODB_ADMINPASSWORD=example --net mongo-test -p 8081:8081 mongo-express
A potential issue centos is not allowing any container to use "-p" try running a test docker run --rm --name test --net mongo-test -p 8081:80 httpd and then check your localhost:80
Try this:
# Use root/example as user/password credentials
version: '3.1'
services:
mongo:
image: mongo
container_name: mongo
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
ports:
- 27017:27017
mongo-express:
image: mongo-express
restart: always
ports:
- 8081:8081
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: root
ME_CONFIG_MONGODB_ADMINPASSWORD: example
links:
- "mongo"
depends_on:
- "mongo"
If you want, please try this (working example):
version: '3'
services:
mongo:
image: mongo
restart: always
container_name: my_mongo
ports:
- "27017:27017"
environment:
MONGO_INITDB_ROOT_USERNAME: root
MONGO_INITDB_ROOT_PASSWORD: example
volumes:
- ./data/db:/data/db
- ./data/configdb:/data/configdb
mongo-express:
image: mongo-express
restart: always
container_name: my_mongo_express
ports:
- "8081:8081"
environment:
ME_CONFIG_MONGODB_ADMINUSERNAME: root
ME_CONFIG_MONGODB_ADMINPASSWORD: example
ME_CONFIG_BASICAUTH_USERNAME: user
ME_CONFIG_BASICAUTH_PASSWORD: example
depends_on:
- mongo
volumes:
node_modules:
You can also try to expose the Mongo service with different name or ports, if you think there is a problem there. In this case, you also have to change the respective variables in MongoExpress service accordingly (ME_CONFIG_MONGODB_SERVER and ME_CONFIG_MONGODB_PORT) - details on the possible configurations here
Either way, is the network involving the two services correctly created?