I want to pass the runtime arguments from docker run .... --arg=x inside docker-compose.yml
version: "2.8"
services:
atlantis:
build:
context: ./
dockerfile: Dockerfile
environment:
gitlab-hostname: "X"
gitlab-webhook-secret: "X"
gitlab-user: "X"
gitlab-token: "X"
repo-allowlist: "X"
ports:
- "4141:8081"
image: "runatlantis/atlantis"
It seems that it doesn't work: atlantis_1 | Error: --gh-user/--gh-token or --gh-app-id/--gh-app-key-file or --gitlab-user/--gitlab-token or --bitbucket-user/--bitbucket-token or --azuredevops-user/--azuredevops-token must be set
Any ideas?
Use command instead of environment:
version: "3"
services:
atlantis:
build:
context: ./
dockerfile: Dockerfile
ports:
- "4141:8081"
image: "runatlantis/atlantis-custom"
command:
- server
- --gitlab-hostname="X"
- --gitlab-webhook-secret="X"
- --gitlab-user="X"
- --gitlab-token="X"
- --repo-allowlist="X"
Related
I have an Asp.net Core application with Docker-Compose.yml configuration and Docker Section configuration in the LaunchSettings.json
Connection string is configured like mentioned below in the Docker-Compose.yml
PersistenceAccess__ConnectionString= Server=db;Port=5432;Database=DemoDatabase;User Id=postgres;Password=postgres;
version: '3.4'
services:
Demo.api:
image: ${DOCKER_REGISTRY-}demoapi
build:
context: .
dockerfile: Sources/Code/Demo.Api/Dockerfile
environment:
- ASPNETCORE_ENVIRONMENT=Development
- PersistenceAccess__ConnectionString= Server=db;Port=5432;Database=DemoDatabase;User Id=postgres;Password=postgres;
ports:
- '8081:80'
depends_on:
- db
db:
image: postgres
restart: always
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
logging:
options:
max-size: 10m
max-file: "3"
ports:
- '5438:5432'
volumes:
- ./postgres-data:/var/lib/postgresql/data
# copy the sql script to create tables
- ./sql/create_tables.sql:/docker-entrypoint-initdb.d/create_tables.sql
# copy the sql script to fill tables
- ./sql/fill_tables.sql:/docker-entrypoint-initdb.d/fill_tables.sql
Same Connection string is configured in the LaunchSettings.json like mentioned below
"PersistenceAccess__ConnectionString": "Server=host.docker.internal;Port=5432;Database=DemoDatabaseNew;User Id=postgres;Password=postgres;"
{
"iisSettings": {
..
},
"profiles": {
"IIS Express": {
...
},
"Docker": {
"commandName": "Docker",
"launchBrowser": true,
"launchUrl": "{Scheme}://{ServiceHost}:{ServicePort}",
"environmentVariables": {
"PersistenceAccess__ConnectionString": "Server=host.docker.internal;Port=5432;Database=DemoDatabaseNew;User Id=postgres;Password=postgres;"
},
"DockerfileRunArguments": "--add-host host.docker.internal:host-gateway",
"publishAllPorts": true,
"useSSL": false
}
}
}
Which configuration will be used while I run the application using
docker-compose -f docker-compose.yml up
Does the above command create a Database? If so, when it will create the database and what would be the name of the database? Also, when it would create the tables and seed the data?
Please suggest.
I'm running airflow inside docker container and getting airflow image (puckel/docker-airflow:latest) from docker hub. I can access Airflow UI through localhost:8080 but without executing the DAG and the error mentioned in the subject above. I'm even writing pip command to install apache-airflow in my Dockerfile. Here is how my Dockerfile, docker-compose.yml and dag.py looks like:
Dockerfile:
FROM puckel/docker-airflow:latest
RUN pip install requests
RUN pip install pandas
RUN pip install 'apache-airflow'
docker-compose.yml:
version: '3.7'
services:
redis:
image: redis:5.0.5
environment:
REDIS_HOST: redis
REDIS_PORT: 6379
ports:
- 6379:6379
postgres:
image: postgres:9.6
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- ./pgdata:/var/lib/postgresql/data/pgdata
logging:
options:
max-size: 10m
max-file: "3"
webserver:
build: ./dockerfiles
restart: always
depends_on:
- postgres
- redis
environment:
- LOAD_EX=n
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
logging:
options:
max-size: 10m
max-file: "3"
volumes:
- ./dags:/usr/local/airflow/dags
- ./config/airflow.cfg:/usr/local/airflow/airflow.cfg
ports:
- "8080:8080"
command: webserver
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
timeout: 30s
retries: 3
flower:
build: ./dockerfiles
restart: always
depends_on:
- redis
environment:
- EXECUTOR=Celery
ports:
- "5555:5555"
command: flower
scheduler:
build: ./dockerfiles
restart: always
depends_on:
- webserver
volumes:
- ./dags:/usr/local/airflow/dags
environment:
- LOAD_EX=n
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
command: scheduler
worker:
build: ./dockerfiles
restart: always
depends_on:
- scheduler
volumes:
- ./dags:/usr/local/airflow/dags
environment:
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
command: worker
dag.py:
from airflow import DAG
from airflow.operators.subdag import SubDagOperator
from airflow.operators.python import PythonOperator, BranchPythonOperator
from airflow.operators.bash import BashOperator
from datetime import datetime
from random import randint
def _choosing_best_model(ti):
accuracies = ti.xcom_pull(task_ids=[
'training_model_A',
'training_model_B',
'training_model_C'
])
if max(accuracies) > 8:
return 'accurate'
return 'inaccurate'
def _training_model(model):
return randint(1, 10)
with DAG("test",
start_date=datetime(2021, 1 ,1),
schedule_interval='#daily',
catchup=False) as dag:
training_model_tasks = [
PythonOperator(
task_id=f"training_model_{model_id}",
python_callable=_training_model,
op_kwargs={
"model": model_id
}
) for model_id in ['A', 'B', 'C']
]
choosing_best_model = BranchPythonOperator(
task_id="choosing_best_model",
python_callable=_choosing_best_model
)
accurate = BashOperator(
task_id="accurate",
bash_command="echo 'accurate'"
)
inaccurate = BashOperator(
task_id="inaccurate",
bash_command=" echo 'inaccurate'"
)
training_model_tasks >> choosing_best_model >> [accurate, inaccurate]
Am I missing anything here? Please let me know if you can. Thanks :)
I am receiving the following error when trying to call a service using Dapr SDK.
System.Net.Http.HttpRequestException: Connection refused (127.0.0.1:3500)
---> System.Net.Sockets.SocketException (111): Connection refused
Here is my docker-compose settings of the service I am trying to call:
quest-service:
image: ${DOCKER_REGISTRY-gamification}/quest-service:${TAG:-latest}
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ASPNETCORE_URLS=http://0.0.0.0:80
- SeqServerUrl=http://seq
build:
context: .
dockerfile: Services/LW.Gamification.QuestService/Dockerfile
ports:
- "5110:80"
- "50010:50001"
quest-service-dapr:
image: "daprio/daprd:latest"
command: ["./daprd",
"-app-id", "Quest-Service",
"-app-port", "80",
"-components-path", "/Components",
"-config", "/Configuration/config.yaml"
]
volumes:
- "./Dapr/Components/:/Components"
- "./Dapr/Configuration/:/Configuration"
depends_on:
- quest-service
network_mode: "service:quest-service"
And the settings for the caller:
player-service:
image: ${DOCKER_REGISTRY-gamification}/player-service:${TAG:-latest}
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ASPNETCORE_URLS=http://0.0.0.0:80
- SeqServerUrl=http://seq
build:
context: .
dockerfile: Services/LW.Gamificaiton.PlayerService/Dockerfile
ports:
- "5109:80"
- "50009:50001"
player-service-dapr:
image: "daprio/daprd:latest"
command: ["./daprd",
"-app-id", "Player-Service",
"-app-port", "80",
"-components-path", "/Components",
"-config", "/Configuration/config.yaml"
]
volumes:
- "./Dapr/Components/:/Components"
- "./Dapr/Configuration/:/Configuration"
depends_on:
- player-service
network_mode: "service:player-service"
And here is the code that is failing to work:
// demo service to service call
var httpClient = DaprClient.CreateInvokeHttpClient("Quest-Service");
var requestUri = $"api/v1/Quest";
var result = await httpClient.GetFromJsonAsync<IEnumerable<string>>(requestUri);
Note: Messaging is working fine. :-)
I am new to Dapr so I must be doing something silly wrong, maybe something to do with ports.. I just don't know!
From following this question :Dapr Client Docker Compose Issue
I managed to get this partly working using the following docker-compose config:
services:
placement:
image: "daprio/dapr"
command: ["./placement", "-port", "50000", "-log-level", "debug"]
ports:
- "50000:50000"
quest-service:
image: ${DOCKER_REGISTRY-gamification}/quest-service:${TAG:-latest}
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ASPNETCORE_URLS=http://0.0.0.0:80
- SeqServerUrl=http://seq
- DAPR_GRPC_PORT=50010
build:
context: .
dockerfile: Services/LW.Gamification.QuestService/Dockerfile
ports:
- "5110:80"
- "50010:50010"
depends_on:
- placement
- rabbitmq
- redis
- seq
- zipkin
quest-service-dapr:
image: "daprio/daprd:latest"
command: ["./daprd",
"-app-id", "Quest-Service",
"-app-port", "80",
"-placement-host-address", "placement:50000",
"-dapr-grpc-port", "50010",
"-components-path", "/Components",
"-config", "/Configuration/config.yaml"
]
volumes:
- "./Dapr/Components/:/Components"
- "./Dapr/Configuration/:/Configuration"
depends_on:
- quest-service
network_mode: "service:quest-service"
generatetraffic:
image: ${DOCKER_REGISTRY-gamification}/generatetraffic:${TAG:-latest}
environment:
- ASPNETCORE_ENVIRONMENT=Development
- ASPNETCORE_URLS=http://0.0.0.0:80
- SeqServerUrl=http://seq
- DAPR_GRPC_PORT=50017
build:
context: .
dockerfile: Services/LW.Gamification.GenerateTraffic/Dockerfile
ports:
- "5117:80"
- "50017:50017"
depends_on:
- placement
- rabbitmq
- redis
- seq
- zipkin
generatetraffic-dapr:
image: "daprio/daprd:latest"
command: ["./daprd",
"-app-id", "Generate-Traffic",
"-app-port", "80",
"-placement-host-address", "placement:50000",
"-dapr-grpc-port", "50017",
"-components-path", "/Components",
"-config", "/Configuration/config.yaml"
]
volumes:
- "./Dapr/Components/:/Components"
- "./Dapr/Configuration/:/Configuration"
depends_on:
- generatetraffic
network_mode: "service:generatetraffic"
However I still have issues with some of the documented APIs not working!.
var httpClient = DaprClient.CreateInvokeHttpClient("Quest-Service");
var requestUri = $"api/v1/Quest";
var result = await httpClient.GetAsync(requestUri);
Still fails?
I'm getting the following error when dockerizing a node postgres database using sequelize as an orm backend
Unhandled rejection SequelizeConnectionRefusedError: connect
ECONNREFUSED 127.0.0.1:5432 app_1 | at
connection.connect.err
(/home/app/node_modules/sequelize/lib/dialects/postgres/connection-manager.js:170:24)
These lines of code seems to be the culprit, docker should not be connecting these credentials as this is for my local.
if (process.env.NODE_ENV === "production") {
var sequelize = new Sequelize(process.env.DATABASE_URL);
} else {
// docker is looking at these credentials..... when it should not
var sequelize = new Sequelize("elifullstack", "eli", "", {
host: "127.0.0.1",
dialect: "postgres",
pool: {
max: 100,
min: 0,
idle: 200000,
// #note https://github.com/sequelize/sequelize/issues/8133#issuecomment-359993057
acquire: 1000000,
},
});
}
docker-compose.yml
# docker-compose.yml
version: "3"
services:
app:
build: ./server
depends_on:
- database
ports:
- 5000:5000
environment:
# database refers to the database server at the bottom called "database"
- PSQL_HOST=database
- PSQL_USER=postgres
- PORT=5000
- PSQL_NAME=elitypescript
command: npm run server
client:
build: ./client
image: react_client
links:
- app
working_dir: /home/node/app/client
volumes:
- ./:/home/node/app
ports:
- 3001:3001
command: npm run start
env_file:
- ./client/.env
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
volumes:
database:
./server/dockerFile
FROM node:10.6.0
COPY . /home/app
WORKDIR /home/app
COPY package.json ./
RUN npm install
EXPOSE 5000
I looked at other similar questions like the following, but it ultimately did not help solve the issue.
Docker - SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:3306
SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:3306
I solved it...
What i did was change this
host: "127.0.0.1",
to this
let sequelize;
if (process.env.NODE_ENV === "production") {
sequelize = new Sequelize(process.env.DATABASE_URL);
} else {
sequelize = new Sequelize(
process.env.POSTGRES_DB || "elitypescript",
process.env.POSTGRES_USER || "eli",
"",
{
host: process.env.PSQL_HOST || "localhost",
dialect: "postgres",
pool: {
max: 100,
min: 0,
idle: 200000,
// #note https://github.com/sequelize/sequelize/issues/8133#issuecomment-359993057
acquire: 1000000,
},
}
);
}
that way the host would be set to docker environment variable like this
PSQL_HOST: database
and that connects to
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
Edit
# docker-compose.yml
version: "3"
services:
app:
build: ./server
depends_on:
- database
ports:
- 5000:5000
environment:
PSQL_HOST: database
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-elitypescript}
command: npm run server
client:
build: ./client
image: react_client
links:
- app
working_dir: /home/node/app/client
volumes:
- ./:/home/node/app
ports:
- 3001:3001
command: npm run start
env_file:
- ./client/.env
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
volumes:
database:
I'm starting a springboot app and dynamodb local in docker containers via docker-compose.
Both containers come up successfully.
When I use the container name for the AMAZON_AWS_DYNAMODB_ENDPOINT value, I get the following error:
[https-jsse-nio-8443-exec-6] [2019-04-15 08:03:42,239] INFO com.amazonaws.protocol.json.JsonContent [] - Unable to parse HTTP response content
com.fasterxml.jackson.core.JsonParseException: Unexpected character ('<' (code 60)): expected a valid value (number, String, array, object, 'true', 'false' or 'null')
at [Source: (byte[])"<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML 2.0//EN">
<html><head>
<title>301 Moved Permanently</title>
</head><body>
<h1>Moved Permanently</h1>
<p>The document has moved here.</p>
</body></html>
Further down I'm getting the following error:
com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException: null (Service: AmazonDynamoDBv2; Status Code: 301; Error Code: null; Request ID: null)
If I replace the AMAZON_AWS_DYNAMODB_ENDPOINT value with my Windows computer IP address (running the containers) it works successfully.
Any suggestions on how to get the container name working?
Here's my docker-compose:
version: '3'
services:
dynamodb:
image: amazon/dynamodb-local
ports:
- "8000:8000"
volumes:
- dynamodata:/data
command: "-jar DynamoDBLocal.jar -sharedDb -dbPath ."
app:
build: .
ports:
- "8443:8443"
environment:
- SERVER_PORT=8443
- SERVER_SSL_KEY_STORE=/etc/ssl/key
- SERVER_SSL_KEY_STORE_TYPE=PKCS12
- SERVER_SSL_KEY_ALIAS=tomcat
- SERVER_SSL_KEY_STORE_PASSWORD=xxxxxx
- SPRING_PROFILES_ACTIVE=aws,local
- DATAPOWER_ENABLED=true
# - AMAZON_AWS_DYNAMODB_ENDPOINT=${DYNAMODB_ENDPOINT:-http://dynamodb:8000} <--- does not work
# - AMAZON_AWS_DYNAMODB_ENDPOINT=${DYNAMODB_ENDPOINT:-http://xx.xxx.xxx.xxx:8000} <--- works
- AMAZON_AWS_DYNAMODB_REGION=${DYNAMODB_REGION:-us-east-1}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-local}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-xxxxxxxxxx}
- ENV=dev
- AWS_REGION=us-east-1
volumes:
dynamodata:
Thanks
Try adding networks something like this:
version: '3'
services:
dynamodb:
image: amazon/dynamodb-local
ports:
- "8000:8000"
volumes:
- dynamodata:/data
command: "-jar DynamoDBLocal.jar -sharedDb -dbPath ."
networks:
- my-network
app:
build: .
ports:
- "8443:8443"
environment:
- SERVER_PORT=8443
- SERVER_SSL_KEY_STORE=/etc/ssl/key
- SERVER_SSL_KEY_STORE_TYPE=PKCS12
- SERVER_SSL_KEY_ALIAS=tomcat
- SERVER_SSL_KEY_STORE_PASSWORD=xxxxxx
- SPRING_PROFILES_ACTIVE=aws,local
- DATAPOWER_ENABLED=true
# - AMAZON_AWS_DYNAMODB_ENDPOINT=${DYNAMODB_ENDPOINT:-http://dynamodb:8000} <--- does not work
# - AMAZON_AWS_DYNAMODB_ENDPOINT=${DYNAMODB_ENDPOINT:-http://xx.xxx.xxx.xxx:8000} <--- works
- AMAZON_AWS_DYNAMODB_REGION=${DYNAMODB_REGION:-us-east-1}
- AWS_ACCESS_KEY_ID=${AWS_ACCESS_KEY_ID:-local}
- AWS_SECRET_ACCESS_KEY=${AWS_SECRET_ACCESS_KEY:-xxxxxxxxxx}
- ENV=dev
- AWS_REGION=us-east-1
networks:
- my-network
volumes:
dynamodata:
networks:
my-network:
driver: bridge