AWS ECS "invalid reference format" on multicontainer app deploy - docker-compose

Cannot compose up in ecs context my multi-container app. In the default context it run. Where am I doing wrong?
$ docker compose up
mysql:8.0.23 resolved to docker.io/library/mysql:8.0.23#sha256:...
mongo:4.4.3-bionic resolved to docker.io/library/mongo:4.4.3-bionic#sha256:...
**invalid reference format**
My docker-compose file:
version: '3.8'
services:
app:
container_name: tsb-app
build:
context: ..
dockerfile: .docker/Dockerfile
depends_on:
- mongo
- mysql
volumes:
- tsb_modules:/node_modules
expose:
- "80"
mongo:
container_name: tsb-mongo
image: mongo:4.4.3-bionic
environment:
MONGO_INITDB_ROOT_USERNAME: asdasdasd
MONGO_INITDB_ROOT_PASSWORD: asdasdasd
volumes:
- tsb_data:/data/db
mysql:
container_name: tsb-mysql
image: mysql:8.0.23
environment:
MYSQL_ROOT_PASSWORD: asdasdasd
MYSQL_USER: asdasdasd
MYSQL_PASSWORD: asdasdasd
volumes:
- tsb_logs:/var/lib/
# Create the required schemas and tabs on first start
- ./setup.sql:/docker-entrypoint-initdb.d/setup.sql
ports:
- 3300:3306
volumes:
tsb_data:
tsb_logs:
tsb_modules:
my .docker/Dockerfile
FROM node:15.8.0-alpine3.10
WORKDIR /app
COPY package.json .yarnrc yarn.lock prod.env ./.docker/setup.sql ./
RUN yarn install --production --frozen-lockfile
COPY build/ ./build
CMD node build/index.js

Related

Why docker-compose doesn't start all containers described in the file?

I have the following docker-compose.yml
version: '2'
services:
db:
image: postgres
container_name: postgres
restart: unless-stopped
environment:
POSTGRES_PASSWORD: mypassword
POSTGRES_USER: postgres
POSTGRES_DB: postgres
ports:
- 5432:5432
volumes:
- ./data/db:/var/lib/postgresql/data
adminer:
image: adminer
restart: unless-stopped
ports:
- 8080:8080
nginx-reverse-proxy:
image: nginx:1.19.8
container_name: reverse-proxy-container
volumes:
- ./proxy/config:/etc/nginx
restart: unless-stopped
ports:
- 8000:8000
sheet-service:
image: sheetservice:latest
build:
context: ./microservices/sheet-service
dockerfile: Dockerfile
container_name: sheet-service
restart: unless-stopped
ports:
- 3002:3002
depends_on:
- "db"
- "nginx-reverse-proxy"
only sheet-service container starts, when executing the docker script with the command docker-compose up --build. When I check the running processes by typing docker ps -a the output shows this . The name of container strangely is not sheet-service as specified in docker-compose.yml.
The Dockerfile of sheet-service:
FROM node:14.15.0
# Create app directory
WORKDIR /usr/src/app
# Install app dependencies
# A wildcard is used to ensure both package.json AND package-lock.json are copied
# where available (npm#5+)
COPY package*.json ./
RUN npm ci --only=production
COPY . .
RUN npm run start
The script's starting logs:
> docker-compose up --build
Building sheet-service
Sending build context to Docker daemon 73.29MB
Step 1/6 : FROM node:14.15.0
---> b90fa0d7cbd1
Step 2/6 : WORKDIR /usr/src/app
---> Using cache
---> 6ce9a5a77956
Step 3/6 : COPY package*.json ./
---> Using cache
---> d9c528ab2b74
Step 4/6 : RUN npm ci --only=production
---> Using cache
---> 221d8f411055
Step 5/6 : COPY . .
---> Using cache
---> 025dba9ebb44
Step 6/6 : RUN npm run start
---> Running in 84c8c1b8a9d8
> sheet-service#1.0.0 start /usr/src/app
> node src/index.js
Server running on port 3002!
If I comment out the lines where the sheet-service is defined like this
version: '2'
services:
db:
image: postgres
container_name: postgres
restart: unless-stopped
environment:
POSTGRES_PASSWORD: mypassword
POSTGRES_USER: postgres
POSTGRES_DB: postgres
ports:
- 5432:5432
volumes:
- ./data/db:/var/lib/postgresql/data
adminer:
image: adminer
restart: unless-stopped
ports:
- 8080:8080
nginx-reverse-proxy:
image: nginx:1.19.8
container_name: reverse-proxy-container
volumes:
- ./proxy/config:/etc/nginx
restart: unless-stopped
ports:
- 8000:8000
# sheet-service:
# image: sheetservice:latest
# build:
# context: ./microservices/sheet-service
# dockerfile: Dockerfile
# container_name: sheet-service
# restart: unless-stopped
# ports:
# - 3002:3002
# depends_on:
# - "db"
# - "nginx-reverse-proxy"
all other containers start successfully. Why sheet-service stop other containers from running and why it shows with different than specified name?
If I run containers one by one with docker-compose run container_name all containers start successfully without errors or warnings.
docker-compose config:
services:
adminer:
image: adminer
ports:
- published: 8080
target: 8080
restart: unless-stopped
db:
container_name: postgres
environment:
POSTGRES_DB: postgres
POSTGRES_PASSWORD: mypassword
POSTGRES_USER: postgres
image: postgres
ports:
- published: 5432
target: 5432
restart: unless-stopped
volumes:
- /home/myuser/Coding/myproject/data/db:/var/lib/postgresql/data:rw
nginx-reverse-proxy:
container_name: reverse-proxy-container
image: nginx:1.19.8
ports:
- published: 8000
target: 8000
restart: unless-stopped
volumes:
- /home/myuser/Coding/myproject/proxy/config:/etc/nginx:rw
sheet-service:
build:
context: /home/myuser/Coding/myproject/microservices/sheet-service
dockerfile: Dockerfile
container_name: sheet-service
depends_on:
db:
condition: service_started
nginx-reverse-proxy:
condition: service_started
image: sheetservice:latest
ports:
- published: 3002
target: 3002
restart: unless-stopped
version: '2'
Docker version 20.10.7, build f0df350
docker-compose version 1.29.1, build c34c88b2
Ubuntu 16.04.7 LTS
Your Dockerfile ...
FROM node:14.15.0
# Create app directory
WORKDIR /usr/src/app
# Install app dependencies
# A wildcard is used to ensure both package.json AND package-lock.json are copied
# where available (npm#5+)
COPY package*.json ./
RUN npm ci --only=production
COPY . .
RUN npm run start
... contains a last line of RUN npm run start. This RUN instruction is carried out as part of the docker image build and starts the node server - the build will then 'hang' while that stays running and thus cause subsequent steps in the docker compose startup not to ever start (never mind complete).
I presume you mean to execute the node process as part of the actual docker container startup process and so you should change your RUN npm run start line to be an ENTRYPOINT npm run start as that will make the node startup process execute when the container starts.

Docker pgadmin 4 - error: "does not appear to be a valid email address. Please reset the PGADMIN_DEFAULT_EMAIL environment variable"

Please bear with me, I'm rather new to docker.
I've got the following docker-compose.yaml file from my colleague who runs this on windows - apparently without problems:
version: "3.3"
services:
mysql-server:
image: mysql:8.0.19
restart: always
environment:
MYSQL_ROOT_PASSWORD: secret
volumes:
- mysql-data:/var/lib/mysql
ports:
- "33061:33061"
phpmyadmin:
image: phpmyadmin/phpmyadmin:5.1.1
restart: always
environment:
PMA_HOST: mysql-server
PMA_USER: ${PMA_USER}
PMA_PASSWORD: ${PMA_PASSWORD}
UPLOAD_LIMIT: 256M
MAX_EXECUTION_TIME: 0
ports:
- "8080:80"
volumes:
- ./database/config.user.inc.php:/etc/phpmyadmin/config.user.inc.php
postgresdb:
container_name: pg_container
image: postgres:latest
restart: always
ports:
- "54321:54321"
environment:
- POSTGRES_USER=${POSTGRES_USER}
- POSTGRES_PASSWORD=${POSTGRES_PASSWORD}
- POSTGRES_DB=${POSTGRES_DB}
volumes:
- postgres:/var/lib/postgresql/data
pgadmin:
container_name: pgadmin_container
depends_on:
- postgresdb
image: dpage/pgadmin4:5
restart: always
ports:
- "5556:80"
environment:
- PGADMIN_DEFAULT_EMAIL=${PGADMIN_DEFAULT_EMAIL}
- PGADMIN_DEFAULT_PASSWORD=${PGADMIN_DEFAULT_PASSWORD}
volumes:
- pgadmin:/var/lib/pgadmin
web:
build:
context: .
dockerfile: dockerfile-python
command: python3 manage.py runserver 0.0.0.0:8000
container_name: python_myApp
volumes:
- .:/theApp
ports:
- "8000:8000"
depends_on:
- postgresdb
volumes:
mysql-data:
postgres:
pgadmin:
I run it on Linux, version is: Docker version 20.10.9, build c2ea9bc
Problem is, container pgadmin won't start up - it gives me the following error:
'"server#myapp.de"' does not appear to be a valid email address. Please reset the PGADMIN_DEFAULT_EMAIL environment variable and try again.
The .env file looks like that:
PMA_USER="root"
PMA_PASSWORD="XXXX"
POSTGRES_DB='postgres'
POSTGRES_USER='admin'
POSTGRES_PASSWORD='XXXX'
PGADMIN_DEFAULT_EMAIL="server#myapp.de"
PGADMIN_DEFAULT_PASSWORD="XXXX"
I tried to reset everything by doing a
docker system prune
docker volume prune
but the error persists. What's going wrong here?
thanks!
You don't need any " in env files, just remove them
PMA_USER=root
PMA_PASSWORD=XXXX
POSTGRES_DB=postgres
POSTGRES_USER=admin
POSTGRES_PASSWORD=XXXX
PGADMIN_DEFAULT_EMAIL=server#myapp.de
PGADMIN_DEFAULT_PASSWORD=XXXX

docker compose phpmyadmin php_network_getaddresses: getaddrinfo failed: Temporary failure in name resolution

I am trying to set up a docker-pod with laravel, mariadb, nginx, redis and phpmyadmin. The laravel webspace works finde but if i switch to port 10081 like configured in the docker-compose.yml i am not able to login with the root account.
it sais " mysqli::real_connect(): php_network_getaddresses: getaddrinfo failed: Temporary failure in name resolution"
i already tried to configure a "my-network" which links all of the container, but if i understand docker right there is already a "defaul" network which does this. It didnt change the error message anyway.
here is my full docker-compose file
version: "3.8"
services:
redis:
image: redis:6.0-alpine
expose:
- "6380"
db:
image: mariadb:10.4
ports:
- "3307:3306"
environment:
MYSQL_USERNAME: root
MYSQL_ROOT_PASSWORD: secret
MYSQL_DATABASE: laravel
volumes:
- db-data:/var/lib/mysql
nginx:
image: nginx:1.19-alpine
build:
context: .
dockerfile: ./docker/nginx.Dockerfile
restart: always
depends_on:
- php
ports:
- "10080:80"
networks:
- default
environment:
VIRTUAL_HOST: cockpit.example.de
volumes:
- ./docker/nginx.conf:/etc/nginx/nginx.conf:ro
- ./public:/app/public:ro
php:
build:
target: dev
context: .
dockerfile: ./docker/php.Dockerfile
working_dir: /app
env_file: .env
restart: always
expose:
- "9000"
depends_on:
- composer
- redis
- db
volumes:
- ./:/app
- ./docker/www.conf:/usr/local/etc/php-fpm.d/www.conf:ro
links:
- db:mysql
phpmyadmin:
image: phpmyadmin/phpmyadmin:latest
ports:
- 10081:80
restart: always
environment:
PMA_HOST : db
MYSQL_USERNAME: root
MYSQL_ROOT_PASSWORD: secret
depends_on:
- db
#user: "109:115"
links:
- db:mysql
node:
image: node:12-alpine
working_dir: /app
volumes:
- ./:/app
command: sh -c "npm install && npm run watch"
composer:
image: composer:1.10
working_dir: /app
#environment:
#SSH_AUTH_SOCK: /ssh-auth.sock
volumes:
- ./:/app
#- "$SSH_AUTH_SOCK:/ssh-auth.sock"
- /etc/passwd:/etc/passwd:ro
- /etc/group:/etc/group:ro
command: composer install --ignore-platform-reqs --no-scripts
volumes:
db-data:
Make sure you have defined all attributes correctly for phpmyadmin container, in the current case there was the absence of -network definition
phpmyadmin:
image: phpmyadmin/phpmyadmin:latest
container_name: phpmyadmin
restart: always
ports:
# 8080 is the host port and 80 is the docker port
- 8080:80
environment:
- PMA_ARBITRARY:1
- PMA_HOST:mysql
- MYSQL_USERNAME:root
- MYSQL_ROOT_PASSWORD:secret
depends_on:
- mysql
networks:
# define your network where all containers are connected to each other
- laravel
volumes:
# define directory path where you shall store your persistent data and config
# files of phpmyadmin
- ./docker/phpmyadmin
Maybe your container cannot start because its volume contains incompatible data. It can happen if you downgrade the version of mysql or mariadb image.
You can resolve the problem if you remove the volume and import the database again. Maybe you have to create a backup first.

Docker file wait for postgres container

Hello I have the following error in my node project:
(node:51) UnhandledPromiseRejectionWarning: Error: getaddrinfo
ENOTFOUND ${DB_HOST}
I'm thinking the problem is that my postgress is not yet started when my project starts
and so I'm not able to think of a solution on how to start my container after my postgres is ready, I read something about dockerize, but I'm not able to imagine how to apply
my docker file:
FROM node:lts-alpine
RUN mkdir -p /home/node/api/node_modules && chown -R node:node /home/node/api
WORKDIR /home/node/api
COPY ormconfig.json .env package.json yarn.* ./
USER node
RUN yarn
COPY --chown=node:node . .
EXPOSE 4000
CMD ["yarn", "dev"]
my docker compose:
version: '3.7'
services:
ci-api:
build: .
container_name: ci-api
volumes:
- .:/home/node/api
- /home/node/api/node_modules
ports:
- '${SERVER_PORT}:${SERVER_PORT}'
depends_on:
- ci-postgres
networks:
- ci-network
ci-postgres:
image: postgres:12
container_name: ci-postgres
ports:
- '${DB_PORT}:5432'
environment:
- ALLOW_EMPTY_PASSWORD=no
- POSTGRES_USER=${DB_USER}
- POSTGRES_PASSWORD=${DB_PASS}
- POSTGRES_DB=${DB_NAME}
volumes:
- ci-postgres-data:/data
networks:
- ci-network
volumes:
ci-postgres-data:
networks:
ci-network:
driver: bridge
and this is my .env
SERVER_PORT=4000
DB_HOST=ci-postgres
DB_PORT=5432
DB_USER=spirit
DB_PASS=api
DB_NAME=emasa_ci
You can reference the below docker-compose.yml in which depends_on, healthcheck and links are added as web service depends on db service.
Reference:
Postgresql Container is not running in docker-compose file - Why is this?
version: "3"
services:
webapp:
build: .
container_name: webapp
ports:
- "5000:5000"
links:
- postgres
depends_on:
postgres:
condition: service_healthy
postgres:
image: postgres:11-alpine
container_name: postgres
ports:
- "5432:5432"
environment:
- POSTGRES_DB=tmp
- POSTGRES_USER=tmp
- POSTGRES_PASSWORD=tmp_password
volumes: # Persist the db data
- database-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 10s
timeout: 5s
retries: 5
volumes:
database-data:

Save Postgres Data to Directory in Docker Named Volume

Problem
I have an application with postgres. I want to be able to back up the initial database data so that I don't have to re enter it each deployment. However, despite having a named volume set up in my compose file.
What I'm not sure of is how to have postgres save its data into the directory associated with the volume. I'm also not sure exactly how to associate a directory with the named volume. What I want is for the docker host server to be able to see the postgress data in the named volume's associated directory.
Could someone please provide an explanation/some examples of how to handle this? Right now even though the volume is associated with the docker service in the compose file, it doesn't write any data to the database_volume/ directory. This is what I would like to address.
Code
Here's my Dockerfile:
FROM python:3.6
ARG requirements=requirements/production.txt
ENV DJANGO_SETTINGS_MODULE=sasite.settings.production_test
WORKDIR /app
COPY manage.py /app/
COPY requirements/ /app/requirements/
RUN pip install -r $requirements
COPY config config
COPY sasite sasite
COPY templates templates
COPY logs logs
ADD /scripts/docker-entrypoint.sh /docker-entrypoint.sh
RUN chmod a+x /docker-entrypoint.sh
EXPOSE 8001
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["/usr/local/bin/gunicorn", "--config", "config/gunicorn.conf", "--log-config", "config/logging.conf", "-e", "DJANGO_SETTINGS_MODULE=sasite.settings.production_test", "-w", "4", "-b", "0.0.0.0:8001", "sasite.wsgi:application"]
And my docker-compose.yml:
version: "3.2"
services:
app:
restart: always
build:
context: .
dockerfile: Dockerfile.prodtest
args:
requirements: requirements/production.txt
container_name: dj01
environment:
- DJANGO_SETTINGS_MODULE=sasite.settings.production_test
- PYTHONDONTWRITEBYTECODE=1
volumes:
- ./:/app
- /static:/static
- /media:/media
networks:
- main
depends_on:
- db
db:
restart: always
image: postgres:10.1-alpine
container_name: ps01
environment:
POSTGRES_DB: sasite_db
POSTGRES_USER: pguser
POSTGRES_PASSWORD: pguser123
ports:
- "5432:5432"
volumes:
- database_volume:/var/lib/postgresql/data
networks:
- main
nginx:
restart: always
image: nginx
container_name: ng01
volumes:
- ./config/nginx-prodtest.conf:/etc/nginx/conf.d/default.conf:ro
- ./static:/usr/share/nginx/sasite/static
- ./media:/usr/share/nginx/sasite/media
ports:
- "80:80"
- "443:443"
networks:
- main
depends_on:
- app
networks:
main:
volumes:
database_volume:
driver_opts:
type: none
device: ./database_volume
o: bind