i hope someone can help me here.
I am creating a linux Setup for my server. I need vhosts and only want to use docker-compose to have better track of security and setup and to easily restart and add new container.
For vhosting and ssl i use nginx-proxy (https://github.com/nginx-proxy/nginx-proxy). It works like a charm out of the box. Current docker-compose.yml:
services:
proxy:
image: nginxproxy/nginx-proxy:alpine
ports:
- "80:80"
- "443:443"
volumes:
- certs:/etc/nginx/certs
- vhost:/etc/nginx/vhost.d
- html:/usr/share/nginx/html
- /var/run/docker.sock:/tmp/docker.sock:ro
container_name: proxy
restart: always
ssl:
image: nginxproxy/acme-companion
environment:
- "DEFAULT_EMAIL=<MYEMAIL>"
volumes_from:
- proxy:rw
volumes:
- /var/run/docker.sock:/var/run/docker.sock:ro
- acme:/etc/acme.sh
container_name: ssl-management
restart: always
networks:
default:
name: proxy-net
volumes:
certs:
external: false
vhost:
external: false
html:
external: false
acme:
external: false
I got all services running.
Currently im trying to start a mail server on the same server and ip. As mentioned i want to start (if possible) everything with docker so i got in touch with mailu (https://github.com/Mailu/Mailu) and as an alternative with mailcow (https://mailcow.email/)
I used the default setup for mailu from https://setup.mailu.io/1_7/ and configured everything as mentioned.
docker-compose.yml for mailu
version: '2.2'
services:
# External dependencies
redis:
image: redis:alpine
restart: always
volumes:
- "/home/vhost/mailserver/mailu/redis:/data"
# Core services
front:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}nginx:${MAILU_VERSION:-1.7}
restart: always
env_file: mailu.env
logging:
driver: json-file
ports:
- 25
- 465
- 587
- 110
- 995
- 143
- 993
volumes:
- "/home/vhost/mailserver/mailu/overrides/nginx:/overrides"
expose:
- 80
- 443
environment:
- VIRTUAL_HOST=mail.<MYDOMAIN>.com
- LETSENCRYPT_HOST=mail.<MYDOMAIN>.com
networks:
- proxy-net
admin:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}admin:${MAILU_VERSION:-1.7}
restart: always
env_file: mailu.env
volumes:
- "/home/vhost/mailserver/mailu/data:/data"
- "/home/vhost/mailserver/mailu/dkim:/dkim"
depends_on:
- redis
imap:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}dovecot:${MAILU_VERSION:-1.7}
restart: always
env_file: mailu.env
volumes:
- "/home/vhost/mailserver/mailu/mail:/mail"
- "/home/vhost/mailserver/mailu/overrides:/overrides"
depends_on:
- front
smtp:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}postfix:${MAILU_VERSION:-1.7}
restart: always
env_file: mailu.env
volumes:
- "/home/vhost/mailserver/mailu/overrides:/overrides"
depends_on:
- front
antispam:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}rspamd:${MAILU_VERSION:-1.7}
restart: always
env_file: mailu.env
volumes:
- "/home/vhost/mailserver/mailu/filter:/var/lib/rspamd"
- "/home/vhost/mailserver/mailu/dkim:/dkim"
- "/home/vhost/mailserver/mailu/overrides/rspamd:/etc/rspamd/override.d"
depends_on:
- front
# Optional services
# Webmail
webmail:
image: ${DOCKER_ORG:-mailu}/${DOCKER_PREFIX:-}roundcube:${MAILU_VERSION:-1.7}
restart: always
env_file: mailu.env
volumes:
- "/home/vhost/mailserver/mailu/webmail:/data"
depends_on:
- imap
networks:
default:
driver: bridge
name: mail-net
proxy-net:
Now the Problem.
If i start first nginx-proxy and second mailu (or mailcow, same result, with or without nginx-proxy VIRTUALHOST settings and Port change) I get error:
Creating mailu_redis_1 ... done
Creating mailu_front_1 ... done
Creating mailu_admin_1 ...
Creating mailu_smtp_1 ... error
Creating mailu_antispam_1 ...
Creating mailu_imap_1 ...
Creating mailu_imap_1 ... error
ERROR: for mailu_imap_1 Cannot start service imap: runtime: failed to create new OS thread (have 6 already; errno=11)
runtime: may need to increase max user processes (ulimit -u)
fatal error: newosproc
goroutine 20 [running]:
runtime.throw(0x98a876, 0x9)
/usr/local/go/src/runtime/panic.go:774 +0x72 fp=0xc00003d5f0 sp=0xc00003d5c0 pc=0x42dd12
runtime.newosproc(0xc00008d880)
/usr/local/go/src/runtime/os_linux.go:153 +0x1ba fp=0xc00003d650 sp=0xc00003d5f0 pc=0x42b5da
runtime.newm1(0xc00008d880)
/usr/local/go/src/runtime/proc.go:1853 +0xdc fp=0xc00003d690 sp=0xc00003d650 pc=0x4335dc
runtime.newm(0x9a7f78, 0x0)
/usr/local/go/src/runtime/proc.go:1832 +0x8f fp=0xc00003d6c0 sp=0xc00003d690 pc=0x43344f
runtime.startTemplateThread()
/usr/local/go/src/runtime/proc.go:1873 +0xa9 fp=0xc00003d6e8 sp=0xc00003d6c0 pc=0x4336c9
runtime.LockOSThread()
/usr/local/go/src/runtime/proc.go:3543 +0x6b fp=0xc00003d708 sp=0xc00003d6e8 pc=0x437f2b
runtime.ensureSigM.func1()
/usr/local/go/src/runtime/signal_unix.go:535 +0x34 fp=0xc00003d7e0 sp=0xc00003d708 pc=0x4584a4
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:1357 +0x1 fp=0xc00003d7e8 sp=0xc00003d7e0 pc=0x45ae51
created by runtime.ensureSigM
/usr/local/go/src/runtime/signal_unix.go:532 +0xd5
goroutine 1 [chan send]:
os/signal.signal_enable(0x8b7cf98f0000000f)
/usr/local/go/src/runtime/sigqueue.go:219 +0x6c
os/signal.enableSignal(...)
/usr/local/go/src/os/signal/signal_unix.go:50
os/signal.Notify.func1(0xf)
/usr/local/go/src/os/signal/signal.go:135 +0x8e
os/signal.Notify(0xc0000a05a0, 0xc0000a0600, 0x4, 0x6)
/usr/local/go/src/os/signal/signal.go:147 +0x182
github.com/containerd/containerd/runtime/v2/shim.setupSignals(0x0, 0xc000000180, 0x9a7c18, 0xc0000dda10)
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go:44 +0xc3
github.com/containerd/containerd/runtime/v2/shim.run(0x9903a5, 0x15, 0x9a6840, 0x0, 0x0, 0x0)
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim.go:172 +0x37b
github.com/containerd/containerd/runtime/v2/shim.Run(0x9903a5, 0x15, 0x9a6840, 0x0, 0x0, 0x0)
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim.go:153 +0xc4
main.main()
/go/src/github.com/containerd/containerd/cmd/containerd-shim-runc-v2/main.go:27 +0x53
goroutine 18 [runnable]:
os/signal.loop()
/usr/local/go/src/os/signal/signal_unix.go:21
created by os/signal.init.0
/usr/local/go/src/os/signal/signal_unix.go:29 +0x41
goroutine 19 [runnable]:
github.com/containerd/containerd/runtime/v2/shim.setRuntime.func1()
Creating mailu_admin_1 ... done
created by github.com/containerd/containerd/runtime/v2/shim.setRuntime
Creating mailu_antispam_1 ... done
: exit status 2: unknown
ERROR: for smtp Cannot start service smtp: ttrpc: closed: unknown
ERROR: for imap Cannot start service imap: runtime: failed to create new OS thread (have 6 already; errno=11)
runtime: may need to increase max user processes (ulimit -u)
fatal error: newosproc
goroutine 20 [running]:
runtime.throw(0x98a876, 0x9)
/usr/local/go/src/runtime/panic.go:774 +0x72 fp=0xc00003d5f0 sp=0xc00003d5c0 pc=0x42dd12
runtime.newosproc(0xc00008d880)
/usr/local/go/src/runtime/os_linux.go:153 +0x1ba fp=0xc00003d650 sp=0xc00003d5f0 pc=0x42b5da
runtime.newm1(0xc00008d880)
/usr/local/go/src/runtime/proc.go:1853 +0xdc fp=0xc00003d690 sp=0xc00003d650 pc=0x4335dc
runtime.newm(0x9a7f78, 0x0)
/usr/local/go/src/runtime/proc.go:1832 +0x8f fp=0xc00003d6c0 sp=0xc00003d690 pc=0x43344f
runtime.startTemplateThread()
/usr/local/go/src/runtime/proc.go:1873 +0xa9 fp=0xc00003d6e8 sp=0xc00003d6c0 pc=0x4336c9
runtime.LockOSThread()
/usr/local/go/src/runtime/proc.go:3543 +0x6b fp=0xc00003d708 sp=0xc00003d6e8 pc=0x437f2b
runtime.ensureSigM.func1()
/usr/local/go/src/runtime/signal_unix.go:535 +0x34 fp=0xc00003d7e0 sp=0xc00003d708 pc=0x4584a4
runtime.goexit()
/usr/local/go/src/runtime/asm_amd64.s:1357 +0x1 fp=0xc00003d7e8 sp=0xc00003d7e0 pc=0x45ae51
created by runtime.ensureSigM
/usr/local/go/src/runtime/signal_unix.go:532 +0xd5
goroutine 1 [chan send]:
os/signal.signal_enable(0x8b7cf98f0000000f)
/usr/local/go/src/runtime/sigqueue.go:219 +0x6c
os/signal.enableSignal(...)
/usr/local/go/src/os/signal/signal_unix.go:50
os/signal.Notify.func1(0xf)
/usr/local/go/src/os/signal/signal.go:135 +0x8e
os/signal.Notify(0xc0000a05a0, 0xc0000a0600, 0x4, 0x6)
/usr/local/go/src/os/signal/signal.go:147 +0x182
github.com/containerd/containerd/runtime/v2/shim.setupSignals(0x0, 0xc000000180, 0x9a7c18, 0xc0000dda10)
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim_unix.go:44 +0xc3
github.com/containerd/containerd/runtime/v2/shim.run(0x9903a5, 0x15, 0x9a6840, 0x0, 0x0, 0x0)
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim.go:172 +0x37b
github.com/containerd/containerd/runtime/v2/shim.Run(0x9903a5, 0x15, 0x9a6840, 0x0, 0x0, 0x0)
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim.go:153 +0xc4
main.main()
/go/src/github.com/containerd/containerd/cmd/containerd-shim-runc-v2/main.go:27 +0x53
goroutine 18 [runnable]:
os/signal.loop()
/usr/local/go/src/os/signal/signal_unix.go:21
created by os/signal.init.0
/usr/local/go/src/os/signal/signal_unix.go:29 +0x41
goroutine 19 [runnable]:
github.com/containerd/containerd/runtime/v2/shim.setRuntime.func1()
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim.go:119
created by github.com/containerd/containerd/runtime/v2/shim.setRuntime
/go/src/github.com/containerd/containerd/runtime/v2/shim/shim.go:119 +0x41
: exit status 2: unknown
ERROR: Encountered errors while bringing up the project.
If i run mailu without nginx-proxy running there is no error and it works perfectly.
I am out of ideas what the problem could be. There is no same port exposed and i cant see any same used dependencies what could be a problem. And the mentioned ulimit is set to 10000. There is nothing else running on this server, i dont think this should be a problem ether.
I hope you got some more experience with this kind and can help me.
Greeting
Related
I created a GrayLog 4 with docker compose, it successfully deployed, I can get to it through the browser but the page is blank identifies that it is the GrayLog Web Interface but the authentication screen does not appear, does anyone know how to help me what it could be.
version: '3'
services:
mongo:
image: mongo:4.2
# Elasticsearch: https://www.elastic.co/guide/en/elasticsearch/reference/7.10/docker.html
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:7.10.2
environment:
- http.host=0.0.0.0
- transport.host=localhost
- network.host=0.0.0.0
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ulimits:
memlock:
soft: -1
hard: -1
deploy:
resources:
limits:
memory: 1g
# Graylog: https://hub.docker.com/r/graylog/graylog/
graylog:
image: graylog/graylog:4.0
environment:
# CHANGE ME (must be at least 16 characters)!
- GRAYLOG_PASSWORD_SECRET=somepasswordpepper
# Password: admin
- GRAYLOG_ROOT_PASSWORD_SHA2=8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918
- GRAYLOG_HTTP_EXTERNAL_URI=http://127.0.0.1:9000/
entrypoint: /usr/bin/tini -- wait-for-it elasticsearch:9200 -- /docker-entrypoint.sh
restart: always
depends_on:
- mongo
- elasticsearch
ports:
# Graylog web interface and REST API
- 9000:9000
# Syslog TCP
- 1514:1514
# Syslog UDP
- 1514:1514/udp
# GELF TCP
- 12201:12201
# GELF UDP
- 12201:12201/udp
enter image description here
In your screenshot the IP address ends in a 6, but Graylog is bouund to 127.0.0.1. Set http_bind to 127.0.0.1 and http_publish or http_external to the interface IP that ends in 6.
ref: Graylog docs
I'm kinda new to docker so maybe my question is stupid, however, I've been unable to find a solution for it for a while now and it's starting to bother me so I'm asking here:
I have a default bridge network inside which there are few containers, one of them is running gluetun which is a vpn client and the rest is what's known as apache guacamole which is used as a remote desktop gateway.
It looks something like this:
networks:
guacnetwork_compose:
driver: bridge
services:
#gluten
gluetun:
image: qmcgaw/gluetun
#trqbva da mu dadem net_admin inache openvpn ne raboti
cap_add:
- NET_ADMIN
ports:
- 8888:8888/tcp # HTTP proxy
- 8388:8388/tcp # Shadowsocks
- 8388:8388/udp # Shadowsocks
# - 4823:4822
# networks:
# enable_ipv6=false
volumes:
environment:
- VPNSP=custom
- VPN_TYPE=openvpn
# OpenVPN:
- OPENVPN_USER=
- OPENVPN_PASSWORD=
- OPENVPN_CUSTOM_CONFIG=
# Timezone for accurate log times
# - TZ=
#guacd
guacd:
container_name: guacd_compose
image: guacamole/guacd
network_mode: "service:gluetun"
# networks:
# guacnetwork_compose:
restart: always
volumes:
- ./drive:/drive:rw
- ./record:/record:rw
# ports:
# - 4823:4822
guacd-no-vpn:
container_name: guacd_compose_no_vpn
image: guacamole/guacd
networks:
- guacnetwork_compose
restart: always
volumes:
- ./drive:/drive:rw
- ./record:/record:rw
# guacamole
guacamole:
container_name: guacamole_compose
depends_on:
- guacd
- postgres
environment:
GUACD_HOSTNAME: guacd
POSTGRES_DATABASE:
POSTGRES_HOSTNAME:
POSTGRES_PASSWORD:
POSTGRES_USER:
image: guacamole/guacamole
links:
- gluetun
networks:
- guacnetwork_compose
ports:
## if not nginx
## - 8080:8080/tcp # Guacamole is on :8080/guacamole, not /.
- 8080/tcp
restart: always
Basically what I want to happen is for the guacd container to use the network of the VPN container and then communicate with the GUI which is the guacamole container. Currently, the guacd is using the gluetun network, however, I can not get it to communicate with the guacamole container despite my efforts. Could somebody tell me what am I doing wrong?
I have tried many configurations and scenarios based around this which is mostly a tutorial that stops at one ghost instance. I am trying to scale it to 2 with docker-deploy up -d --scale ghost=2. When I hit the individual IP;s of the ghost containers , they work but port 80 is 503.
version: "3.1"
volumes:
mysql-volume:
ghost-volume:
networks:
ghost-network:
services:
mysql:
image: mysql:5.7
container_name: mysql
volumes:
- mysql-volume:/var/lib/mysql
networks:
- ghost-network
environment:
MYSQL_ROOT_PASSWORD: root
MYSQL_DATABASE: db
MYSQL_USER: blog-user
MYSQL_PASSWORD: supersecret
ghost:
build: ./ghost
image: laminar/ghost:3.0
volumes:
- ghost-volume:/var/lib/ghost/content
networks:
- ghost-network
restart: always
ports:
- "2368"
environment:
database__client: mysql
database__connection__host: mysql
database__connection__user: blog-user
database__connection__password: supersecret
database__connection__database: db
depends_on:
- mysql
entrypoint: ["wait-for-it.sh", "mysql", "--", "docker-entrypoint.sh"]
command: ["node", "current/index.js"]
haproxy:
image: eeacms/haproxy
depends_on:
- ghost
ports:
- "80:5000"
- "1936:1936"
environment:
BACKENDS: "ghost"
DNS_ENABLED: "true"
LOG_LEVEL: "info"
What I get on localhost:80 is a 503 error the particular eeacms/haproxy image is supposed to be self-configuring any help appreciated
I needed to add a backend URL to the environment and also tell ghost it was installed in an alternate location by adding URL: localhost:5050
I got postgresql and pgadmin4 with docker swarm on my ubuntu 18.04 server, but pgadmin gives me errors and after a while the application crashes and I can't enter postgres: this is the error
PermissionError: [Errno 1] Operation not permitted: '/var/lib/pgadmin/sessions'
WARNING: Failed to set ACL on the directory containing the configuration database:
[Errno 1] Operation not permitted: '/var/lib/pgadmin'
HINT : You may need to manually set the permissions on
/var/lib/pgadmin to allow pgadmin to write to it.
/usr/local/lib/python3.8/os.py:1023: RuntimeWarning: line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used
return io.open(fd, *args, **kwargs)
[2020-09-04 21:12:24 +0000] [1] [INFO] Shutting down: Master
[2020-09-04 21:12:24 +0000] [1] [INFO] Reason: Worker failed to boot.
WARNING: Failed to set ACL on the directory containing the configuration database:
[Errno 1] Operation not permitted: '/var/lib/pgadmin'
HINT : You may need to manually set the permissions on
/var/lib/pgadmin to allow pgadmin to write to it.
NOTE: Configuring authentication for SERVER mode.
sudo: setrlimit(RLIMIT_CORE): Operation not permitted
[2020-09-04 21:14:26 +0000] [1] [INFO] Starting gunicorn 19.9.0
[2020-09-04 21:14:26 +0000] [1] [INFO] Listening at: http://[::]:80 (1)
[2020-09-04 21:14:26 +0000] [1] [INFO] Using worker: threads
/usr/local/lib/python3.8/os.py:1023: RuntimeWarning: line buffering (buffering=1) isn't supported in binary mode, the default buffer size will be used
return io.open(fd, *args, **kwargs)
[2020-09-04 21:14:26 +0000] [89] [INFO] Booting worker with pid: 89
in error I see that it tells me NOTE: Configuring authentication for SERVER mode.
but I don't know how to configure what it indicates, someone could help me solve my problem.
Thank you
Edit:
docker-compose.yml
version: '3'
services:
ssl:
image: danieldent/nginx-ssl-proxy
restart: always
environment:
UPSTREAM: myApp:8086
SERVERNAME: dominio.com
ports:
- 80:80/tcp
- 443:443/tcp
depends_on:
- myApp
volumes:
- ./nginxAPP:/etc/letsencrypt
- ./nginxAPP:/etc/nginx/user.conf.d:ro
bdd:
restart: always
image: postgres:12
ports:
- 5432:5432/tcp
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: 12345
POSTGRES_DB: miBDD
volumes:
- ./pgdata:/var/lib/postgresql/data
pgadmin:
image: dpage/pgadmin4
ports:
- 9095:80/tcp
environment:
PGADMIN_DEFAULT_EMAIL: user
PGADMIN_DEFAULT_PASSWORD: 12345
PROXY_X_FOR_COUNT: 3
PROXY_X_PROTO_COUNT: 3
PROXY_X_HOST_COUNT: 3
PROXY_X_PORT_COUNT: 3
volumes:
- ./pgadminAplicattion:/var/lib/pgadmin
myApp:
restart: always
image: appImage
ports:
- 8086:8086
depends_on:
- bdd
working_dir: /usr/myApp
environment:
CONFIG_PATH: ../configuation
command: "node server.js"
It's generally a bad idea to use bind mounts in a non-development environments and doubly so when it comes to Docker Swarm (as opposed to regular Docker). This goes doubly when it comes to images like postgres or dpage/pgadmin4, which require those mounted directories to have specific ownership and/or read/write priviledges.
In your case, you need to run:
sudo chown 999:999 pgdata
sudo chown 5050:5050 pgadminAplicattion
to give those directories correct ownership.
That being said, it's a much better idea to avoid bind mounts entirely and use named volumes instead (irrelevant parts of Compose file skipped):
version: "3"
services:
bdd:
restart: always
image: postgres:12
ports:
- 5432:5432/tcp
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: 12345
POSTGRES_DB: miBDD
volumes:
- pgdata:/var/lib/postgresql/data
pgadmin:
restart: always
image: dpage/pgadmin4
ports:
- 9095:80/tcp
environment:
PGADMIN_DEFAULT_EMAIL: user
PGADMIN_DEFAULT_PASSWORD: 12345
PROXY_X_FOR_COUNT: 3
PROXY_X_PROTO_COUNT: 3
PROXY_X_HOST_COUNT: 3
PROXY_X_PORT_COUNT: 3
volumes:
- pgadmin:/var/lib/pgadmin
volumes:
pgdata:
pgadmin:
In your case you need to use following command: Try this
sudo chown -R 5050:5050 /var/lib/pgadmin
I have docker-compose that looks like this
version: '3.7'
networks:
iam_network:
external:
name: foundation_iam
rdc_network:
name: rdcstu3_net
services:
rdcdeploy:
restart: "no"
container_name: rdcdeploy
build:
context: ./rdcdeploy
args:
- build_version
- build_type
image: rdcdeploy:$build_version
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rdcrabbitmq:
restart: "no"
container_name: rdcrabbitmq
build:
context: ./rabbitmq
args:
- build_version
- build_type
image: rdcrabbitmq:$build_version
ports:
- "5772:5672"
- "15772:15672"
depends_on:
- rdcdeploy
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rdcdb:
restart: "no"
container_name: rdcdb
build:
context: ./postgres
args:
- build_version
- build_type
image: rdcpostgres:$build_version
ports:
- "5532:5432"
depends_on:
- rdcdeploy
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rdcdbdeploy:
restart: "no"
container_name: rdcdbdeploy
build:
context: ./rdcdbdeploy
args:
- build_version
- build_type
image: rdcdbdeploy:$build_version
depends_on:
- rdcdb
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rihapp:
restart: "no"
container_name: rihapp
build:
context: ./rihserver
args:
- build_version
- build_type
image: rihapp:$build_version
ports:
- "9090:8080"
depends_on:
- rdcrabbitmq
- rdcdb
volumes:
- ./cfg:/cfg
networks:
- iam_network
- rdc_network
subscription_scheduler:
restart: "no"
container_name: subscription_scheduler
build:
context: ./subscription
args:
- build_version
- build_type
image: subscription_scheduler:$build_version
depends_on:
- rdcrabbitmq
- rdcdb
- rihapp
volumes:
- ./cfg:/cfg
networks:
- iam_network
- rdc_network
environment:
- rdc.subscription.instanceNumber=0
subscription_processor:
restart: "no"
container_name: subscription_processor
build:
context: ./subscription
args:
- build_version
- build_type
image: subscription_processor:$build_version
depends_on:
- rdcrabbitmq
- rdcdb
- rihapp
volumes:
- ./cfg:/cfg
networks:
- iam_network
- rdc_network
environment:
- rdc.subscription.instanceNumber=1
rdcsmoketest:
restart: "no"
container_name: rdcsmoketests
build:
context: ./rdcdeploy
image: rdcdeploy:$build_version
volumes:
- ./cfg:/cfg
depends_on:
- rihapp
networks:
- iam_network
- rdc_network
entrypoint:
- wait-for-rihapp.sh
- rdcdeploy
command: ["-x", "-z", "/cfg", "-c", "/cfg/config.yml", "docker"]
I start it using docker-compose up and it shows that the containers are started.
eedaa5e11a0e rdicdeploy:3.3.0.1 "wait-for-rihapp.sh…" 2 minutes ago Up 38 seconds rdicsmoketests
9178355cbca7 subscription_scheduler:3.3.0.1 "./wait-for-env.sh /…" 2 minutes ago Up 38 seconds subscription_scheduler
ae24a4b76f3e subscription_processor:3.3.0.1 "./wait-for-env.sh /…" 2 minutes ago Up 38 seconds subscription_processor
5f789ae74ef2 rihapp:3.3.0.1 "./wait_for_rdic_db.s…" 2 minutes ago Up 39 seconds 0.0.0.0:9090->8080/tcp rihapp
698b26d0ca37 rdicdbdeploy:3.3.0.1 "wait-for-env-db.sh …" 2 minutes ago Up 39 seconds rdicdbdeploy
592cb850f5b9 rdicrabbitmq:3.3.0.1 "wait-for-env.sh /cf…" 2 minutes ago Up 39 seconds 4369/tcp, 5671/tcp, 15671/tcp, 25672/tcp, 0.0.0.0:5772->5672/tcp, 0.0.0.0:15772->15672/tcp rdicrabbitmq
505a0f36528f rdicpostgres:3.3.0.1 "wait-for-env.sh /cf…" 2 minutes ago Up 39 seconds 0.0.0.0:5532->5432/tcp
But for some reason no container are able to connect either rabbitmq or postgres.
Logs for rabbitmq shows that they have started
2020-07-24 10:32:13.226 [info] <0.370.0> Running boot step direct_client defined by app rabbit
2020-07-24 10:32:13.226 [info] <0.370.0> Running boot step os_signal_handler defined by app rabbit
2020-07-24 10:32:13.226 [info] <0.489.0> Swapping OS signal event handler (erl_signal_server) for our own
2020-07-24 10:32:13.262 [info] <0.539.0> Management plugin: HTTP (non-TLS) listener started on port 15672
2020-07-24 10:32:13.262 [info] <0.645.0> Statistics database started.
2020-07-24 10:32:13.262 [info] <0.644.0> Starting worker pool 'management_worker_pool' with 3 processes in it
2020-07-24 10:32:13.480 [info] <0.8.0> Server startup complete; 3 plugins started.
* rabbitmq_management
* rabbitmq_web_dispatch
* rabbitmq_management_agent
completed with 3 plugins.
For postgres too
server started
CREATE DATABASE
CREATE ROLE
/usr/local/bin/docker-entrypoint.sh: ignoring /docker-entrypoint-initdb.d/*
waiting for server to shut down...LOG: received fast shutdown request
.LOG: aborting any active transactions
LOG: autovacuum launcher shutting down
LOG: shutting down
LOG: database system is shut down
done
server stopped
PostgreSQL init process complete; ready for start up.
LOG: database system was shut down at 2020-07-24 10:30:59 UTC
LOG: MultiXact member wraparound protections are now enabled
LOG: database system is ready to accept connections
LOG: autovacuum launcher started
Environment Available - proceeding with startup docker-entrypoint.sh postgres
LOG: database system was interrupted; last known up at 2020-07-24 10:31:00 UTC
LOG: database system was not properly shut down; automatic recovery in progress
LOG: invalid record length at 0/14EEEA0: wanted 24, got 0
LOG: redo is not required
LOG: MultiXact member wraparound protections are now enabled
LOG: database system is ready to accept connections
LOG: autovacuum launcher started
But the applications are trying to connect 5772 but the connection refused for rabbitmq and for postgres it also tells that
psql: error: could not connect to server: could not connect to server: Connection refused
rihapp | Is the server running on host "localhost" (127.0.0.1) and accepting
rihapp | TCP/IP connections on port 5532?
It also generates .env files that contains environment variables for APPS like
DATABASE_URL=postgres://rdc:rdc#localhost:5532/pg_db
spring.datasource.url=jdbc:postgresql://localhost:5532/pg_db
spring.rabbitmq.host=localhost
spring.rabbitmq.port=5772
What might be a problem? It feels like some kind of network problem.
It seems that you've configured the clients to contact the servers on localhost:X, am I getting this right?
In that case you need to be aware that containers in docker-compose have different network cgroups, and are able to reach each other through a bridge interface. This means that in the container, you should use rdcrabbitmq:5672 instead of localhost:5772