Run task of same dag on different servers - docker-compose

I need to run the following dag -
dag = DAG('dummy_for_testing',
default_args=default_args,schedule_interval=None)
t1 = BashOperator(
task_id='print_date',
bash_command='date',
dag=dag)
t2 = BashOperator(
task_id='print_host',
bash_command='hostname',
queue='druid_queue',
dag=dag)
t3 = BashOperator(
task_id='print_directory',
bash_command='pwd',
dag=dag)
t3.set_upstream(t2)
t2.set_upstream(t1)
where t1 and t3 runs on server A and t2 runs on server B(queue=druid_queue).
I am currently using puckel/docker-airflow to set up airflow. The yml files for the server looks like :
Server1
version: '2.1'
services:
redis:
image: 'redis:3.2.7'
ports:
- "10.0.11.4:6999:6379"
command: redis-server
postgres:
image: postgres:9.6
container_name: postgres-airflow
ports:
- "10.0.11.4:5434:5432"
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
webserver:
image: puckel/docker-airflow:1.10.2
container_name: airflow
restart: always
depends_on:
- postgres
- redis
environment:
- LOAD_EX=n
- FERNET_KEY=<>
- EXECUTOR=Celery
- user_logs_config_loc=dags/user_logs/configurations/
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
volumes:
- /data/druid-data/airflow/dags:/usr/local/airflow/dags
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "10.0.11.4:8085:8080"
command: webserver
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
flower:
image: puckel/docker-airflow:1.10.2
restart: always
depends_on:
- redis
environment:
- EXECUTOR=Celery
ports:
- "5555:5555"
command: flower
scheduler:
image: puckel/docker-airflow:1.10.2
restart: always
depends_on:
- webserver
volumes:
- /data/druid-data/airflow/dags:/usr/local/airflow/dags
environment:
- LOAD_EX=n
- FERNET_KEY=<>
- EXECUTOR=Celery
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
- user_logs_config_loc=dags/user_logs/configurations/
command: scheduler
worker:
image: puckel/docker-airflow:1.10.2
restart: always
depends_on:
- scheduler
volumes:
- /data/druid-data/airflow/dags:/usr/local/airflow/dags
environment:
- FERNET_KEY=<>
- EXECUTOR=Celery
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
- user_logs_config_loc=dags/user_logs/configurations/
command: worker
Server2
version: '2.1'
services:
redis:
image: 'redis:3.2.7'
ports:
- "10.0.11.5:6999:6379"
command: redis-server
postgres:
image: postgres:9.6
container_name: postgres-airflow
ports:
- "10.0.11.5:5434:5432"
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
webserver:
image: puckel/docker-airflow:latest
container_name: airflow
restart: always
depends_on:
- postgres
- redis
environment:
- LOAD_EX=n
- FERNET_KEY=<>
- EXECUTOR=Celery
- user_logs_config_loc=dags/user_logs/configurations/
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
volumes:
- /data/qa/druid-data/airflow/dags:/usr/local/airflow/dags
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "10.0.11.5:8085:8080"
command: webserver
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
flower:
image: puckel/docker-airflow:1.10.2
restart: always
depends_on:
- redis
environment:
- EXECUTOR=Celery
ports:
- "5555:5555"
command: flower
scheduler:
image: puckel/docker-airflow:1.10.2
restart: always
depends_on:
- webserver
volumes:
- ./dags:/usr/local/airflow/dags
- /data/qa/druid-data/airflow/dags:/usr/local/airflow/dags
environment:
- LOAD_EX=n
- FERNET_KEY=<>
- EXECUTOR=Celery
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
command: scheduler
worker:
image: puckel/docker-airflow:1.10.2
restart: always
depends_on:
- scheduler
volumes:
- ./dags:/usr/local/airflow/dags
- /data/qa/druid-data/airflow/dags:/usr/local/airflow/dags
environment:
- FERNET_KEY=<>
- EXECUTOR=Celery
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
command: worker -q druid_queue
The variable in server 1 looks like
broker_url=redis://redis:6379/1
result_backend=db+postgresql://airflow:airflow#postgres:5432/airflow
The variable in server 2 looks like
broker_url=redis://10.0.11.4:6999/1
result_backend=db+postgresql://airflow:airflow#10.0.11.4:5434/airflow
Is there something wrong with my configurations.
when I run the dag from the webserver of server A the dag is stuck:
The logs captured in the scheduler of Server A container:
[2019-04-12 14:42:35,184] {{jobs.py:1215}} INFO - Setting the follow tasks to queued state:
<TaskInstance: dummy_for_testing.print_date 2019-04-12 14:42:33.552786+00:00 [scheduled]>
[2019-04-12 14:42:35,194] {{jobs.py:1299}} INFO - Setting the following 1 tasks to queued state:
<TaskInstance: dummy_for_testing.print_date 2019-04-12 14:42:33.552786+00:00 [queued]>
[2019-04-12 14:42:35,194] {{jobs.py:1341}} INFO - Sending ('dummy_for_testing', 'print_date', datetime.datetime(2019, 4, 12, 14, 42, 33, 552786, tzinfo=<TimezoneInfo [UTC, GMT, +00:00:00, STD]>), 1) to executor with priority 3 and queue default
[2019-04-12 14:42:35,194] {{base_executor.py:56}} INFO - Adding to queue: airflow run dummy_for_testing print_date 2019-04-12T14:42:33.552786+00:00 --local -sd /usr/local/airflow/dags/dag_test.py
[2019-04-12 14:42:35,199] {{celery_executor.py:83}} INFO - [celery] queuing ('dummy_for_testing', 'print_date', datetime.datetime(2019, 4, 12, 14, 42, 33, 552786, tzinfo=<TimezoneInfo [UTC, GMT, +00:00:00, STD]>), 1) through celery, queue=default
[2019-04-12 14:42:37,152] {{jobs.py:1559}} INFO - Harvesting DAG parsing results
[2019-04-12 14:42:39,154] {{jobs.py:1559}} INFO - Harvesting DAG parsing results
[2019-04-12 14:42:40,610] {{sqlalchemy.py:79}} WARNING - DB connection invalidated. Reconnecting...
[2019-04-12 14:42:41,156] {{jobs.py:1559}} INFO - Harvesting DAG parsing results
[2019-04-12 14:42:41,179] {{jobs.py:1106}} INFO - 1 tasks up for execution:
<TaskInstance: dummy_for_testing.print_host 2019-04-12 14:42:33.552786+00:00 [scheduled]>
[2019-04-12 14:42:41,182] {{jobs.py:1141}} INFO - Figuring out tasks to run in Pool(name=None) with 128 open slots and 1 task instances in queue
[2019-04-12 14:42:41,184] {{jobs.py:1177}} INFO - DAG dummy_for_testing has 12/16 running and queued tasks
[2019-04-12 14:42:41,184] {{jobs.py:1215}} INFO - Setting the follow tasks to queued state:
<TaskInstance: dummy_for_testing.print_host 2019-04-12 14:42:33.552786+00:00 [scheduled]>
[2019-04-12 14:42:41,193] {{jobs.py:1299}} INFO - Setting the following 1 tasks to queued state:
<TaskInstance: dummy_for_testing.print_host 2019-04-12 14:42:33.552786+00:00 [queued]>
[2019-04-12 14:42:41,193] {{jobs.py:1341}} INFO - Sending ('dummy_for_testing', 'print_host', datetime.datetime(2019, 4, 12, 14, 42, 33, 552786, tzinfo=<TimezoneInfo [UTC, GMT, +00:00:00, STD]>), 1) to executor with priority 2 and queue druid_queue
[2019-04-12 14:42:41,194] {{base_executor.py:56}} INFO - Adding to queue: airflow run dummy_for_testing print_host 2019-04-12T14:42:33.552786+00:00 --local -sd /usr/local/airflow/dags/dag_test.py
[2019-04-12 14:42:41,198] {{celery_executor.py:83}} INFO - [celery] queuing ('dummy_for_testing', 'print_host', datetime.datetime(2019, 4, 12, 14, 42, 33, 552786, tzinfo=<TimezoneInfo [UTC, GMT, +00:00:00, STD]>), 1) through celery, queue=druid_queue
Server A config:
Server B config:
Sserver A celery Broker

It appears that you are running the same docker-compose stack on two different servers, but Server B has the worker started with command worker -q druid_queue. Typically you wan to be running airflow with only one scheduler, one database / results backend, and one message broker (redis) across all servers instead of running every service on every server.
Your compose file on the first server exposes redis at 10.0.1.4:6999 and below you noted that the broker_url on the second server is redis://10.0.11.4:6999/1. If the networking is setup properly then it might be as simple as updating the broker_url to redis://10.0.1.4:6999/1 (note: 11 -> 1)

Related

volume mounting clickhouse docker image to override config.xml to connect with tabix

clickhouse:
build: ./db/clickhouse
restart: unless-stopped
volumes:
# Store data to HDD
- ./clickhouse-data:/var/lib/clickhouse/
# Base Clickhouse cfg
- ./clickhouse/config.xml:/etc/clickhouse-server/config.xml
- ./clickhouse/users.xml:/etc/clickhouse-server/users.xml
ports:
- "8123:8123" # for http clients
- "9000:9000" # for console client
environment:
- CLICKHOUSE_USER=oussema
- CLICKHOUSE_PASSWORD=root
- CLICKHOUSE_DB=DWH
- CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1
ulimits:
nofile:
soft: 262144
hard: 262144
tabix:
image: spoonest/clickhouse-tabix-web-client
ports:
- "8080:80"
depends_on:
- clickhouse
restart: unless-stopped
environment:
- CH_NAME=clickhouse
- CH_HOST=https://127.0.0.1:8123
- CH_LOGIN=oussema
- CH_PASSWORD=root
It just working example for test purpose:
docker-compose.yml
version: "3.0"
services:
clickhouse:
image: yandex/clickhouse-server
ports:
- "8123:8123"
healthcheck:
test: wget --no-verbose --tries=1 --spider localhost:8123/ping || exit 1
interval: 2s
timeout: 2s
retries: 16
environment:
- CLICKHOUSE_USER=default
- CLICKHOUSE_PASSWORD=12345
tabix:
image: spoonest/clickhouse-tabix-web-client
ports:
- "8080:80"
depends_on:
- clickhouse
restart: unless-stopped
environment:
- CH_NAME=clickhouse
- CH_HOST=http://localhost:8123
- CH_LOGIN=default
- CH_PASSWORD=12345
It needs:
# run container
docker compose up
# browse the tabix endpoint
http://localhost:8080/

Clickhouse Client - Code: 62. DB::Exception: Empty query

I'm trying to run clickhouse-server and clickhouse-client services using Docker and Docker Compose. Based on clickhouse docker-compose file and another compose sample, I created the services in my docker-compose.yml file as you can see below:
docker-compose.yml:
ch_server:
container_name: myapp_ch_server
image: yandex/clickhouse-server
ports:
- "8181:8123"
- "9000:9000"
- "9009:9009"
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
volumes:
- ./ch_db_data:/var/lib/clickhouse/
- ./ch_db_logs:/val/log/clickhouse-server/
networks:
- myapp-network
ch_client:
container_name: myapp_ch_client
image: yandex/clickhouse-client
command: ['--host', 'ch_server']
networks:
- myapp-network
When I run docker-compose up command, the following exception occurs from clickhouse-client service:
myapp_ch_client | Code: 62. DB::Exception: Empty query
myapp_ch_client exited with code 62
Do you have any idea how to fix this error?
It just needs to pass the SQL-query in command-params:
version: "2.4"
services:
ch_server:
container_name: myapp_ch_server
image: yandex/clickhouse-server
ports:
- "8123:8123"
- "9000:9000"
- "9009:9009"
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
volumes:
- ./ch_db_data:/var/lib/clickhouse/
- ./ch_db_logs:/var/log/clickhouse-server/
networks:
- myapp-network
healthcheck:
test: wget --no-verbose --tries=1 --spider localhost:8123/ping || exit 1
interval: 2s
timeout: 2s
retries: 16
ch_client:
container_name: myapp_ch_client
image: yandex/clickhouse-client
command: ['--host', 'ch_server', '--query', 'select * from system.functions order by name limit 4']
networks:
- myapp-network
depends_on:
ch_server:
condition: service_healthy
networks:
myapp-network:
It doesn't make sense to define clickhouse-client in docker-compose. clickhouse-client usually run outside of docker-compose file:
define docker-compose that defines servers (such as ClickHouse (nodes of cluster), Zookeeper, Apache Kafka, etc). For example, let's consider the config with one node of ClickHouse:
version: "2.4"
services:
ch_server:
container_name: myapp_ch_server
image: yandex/clickhouse-server
ports:
- "8123:8123"
- "9000:9000"
- "9009:9009"
ulimits:
nproc: 65535
nofile:
soft: 262144
hard: 262144
volumes:
- ./ch_db_data:/var/lib/clickhouse/
- ./ch_db_logs:/var/log/clickhouse-server/
networks:
- myapp-network
healthcheck:
test: wget --no-verbose --tries=1 --spider localhost:8123/ping || exit 1
interval: 2s
timeout: 2s
retries: 16
networks:
myapp-network:
in the separate terminal run clickhouse-client
cd _folder_where_docker-compose_located
docker-compose exec ch_server clickhouse-client
2021 version as this tutorial https://dev.to/titronium/clickhouse-server-in-1-minute-with-docker-4gf2
clickhouse-client:
image: yandex/clickhouse-client:latest
depends_on:
- clickhouse-server
links:
- clickhouse-server
entrypoint:
- /bin/sleep
command:
- infinity
the last line command: - infinity mean it will wait you there forever to connect
Actually, you have access to a ClickHouse client on the command line of the ClickHouse server.
You can easily connect to your server container and call
clickhuse-client

container started from docker compose is unable to connect to another container

I have docker-compose that looks like this
version: '3.7'
networks:
iam_network:
external:
name: foundation_iam
rdc_network:
name: rdcstu3_net
services:
rdcdeploy:
restart: "no"
container_name: rdcdeploy
build:
context: ./rdcdeploy
args:
- build_version
- build_type
image: rdcdeploy:$build_version
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rdcrabbitmq:
restart: "no"
container_name: rdcrabbitmq
build:
context: ./rabbitmq
args:
- build_version
- build_type
image: rdcrabbitmq:$build_version
ports:
- "5772:5672"
- "15772:15672"
depends_on:
- rdcdeploy
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rdcdb:
restart: "no"
container_name: rdcdb
build:
context: ./postgres
args:
- build_version
- build_type
image: rdcpostgres:$build_version
ports:
- "5532:5432"
depends_on:
- rdcdeploy
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rdcdbdeploy:
restart: "no"
container_name: rdcdbdeploy
build:
context: ./rdcdbdeploy
args:
- build_version
- build_type
image: rdcdbdeploy:$build_version
depends_on:
- rdcdb
volumes:
- ./cfg:/cfg
networks:
- rdc_network
rihapp:
restart: "no"
container_name: rihapp
build:
context: ./rihserver
args:
- build_version
- build_type
image: rihapp:$build_version
ports:
- "9090:8080"
depends_on:
- rdcrabbitmq
- rdcdb
volumes:
- ./cfg:/cfg
networks:
- iam_network
- rdc_network
subscription_scheduler:
restart: "no"
container_name: subscription_scheduler
build:
context: ./subscription
args:
- build_version
- build_type
image: subscription_scheduler:$build_version
depends_on:
- rdcrabbitmq
- rdcdb
- rihapp
volumes:
- ./cfg:/cfg
networks:
- iam_network
- rdc_network
environment:
- rdc.subscription.instanceNumber=0
subscription_processor:
restart: "no"
container_name: subscription_processor
build:
context: ./subscription
args:
- build_version
- build_type
image: subscription_processor:$build_version
depends_on:
- rdcrabbitmq
- rdcdb
- rihapp
volumes:
- ./cfg:/cfg
networks:
- iam_network
- rdc_network
environment:
- rdc.subscription.instanceNumber=1
rdcsmoketest:
restart: "no"
container_name: rdcsmoketests
build:
context: ./rdcdeploy
image: rdcdeploy:$build_version
volumes:
- ./cfg:/cfg
depends_on:
- rihapp
networks:
- iam_network
- rdc_network
entrypoint:
- wait-for-rihapp.sh
- rdcdeploy
command: ["-x", "-z", "/cfg", "-c", "/cfg/config.yml", "docker"]
I start it using docker-compose up and it shows that the containers are started.
eedaa5e11a0e rdicdeploy:3.3.0.1 "wait-for-rihapp.sh…" 2 minutes ago Up 38 seconds rdicsmoketests
9178355cbca7 subscription_scheduler:3.3.0.1 "./wait-for-env.sh /…" 2 minutes ago Up 38 seconds subscription_scheduler
ae24a4b76f3e subscription_processor:3.3.0.1 "./wait-for-env.sh /…" 2 minutes ago Up 38 seconds subscription_processor
5f789ae74ef2 rihapp:3.3.0.1 "./wait_for_rdic_db.s…" 2 minutes ago Up 39 seconds 0.0.0.0:9090->8080/tcp rihapp
698b26d0ca37 rdicdbdeploy:3.3.0.1 "wait-for-env-db.sh …" 2 minutes ago Up 39 seconds rdicdbdeploy
592cb850f5b9 rdicrabbitmq:3.3.0.1 "wait-for-env.sh /cf…" 2 minutes ago Up 39 seconds 4369/tcp, 5671/tcp, 15671/tcp, 25672/tcp, 0.0.0.0:5772->5672/tcp, 0.0.0.0:15772->15672/tcp rdicrabbitmq
505a0f36528f rdicpostgres:3.3.0.1 "wait-for-env.sh /cf…" 2 minutes ago Up 39 seconds 0.0.0.0:5532->5432/tcp
But for some reason no container are able to connect either rabbitmq or postgres.
Logs for rabbitmq shows that they have started
2020-07-24 10:32:13.226 [info] <0.370.0> Running boot step direct_client defined by app rabbit
2020-07-24 10:32:13.226 [info] <0.370.0> Running boot step os_signal_handler defined by app rabbit
2020-07-24 10:32:13.226 [info] <0.489.0> Swapping OS signal event handler (erl_signal_server) for our own
2020-07-24 10:32:13.262 [info] <0.539.0> Management plugin: HTTP (non-TLS) listener started on port 15672
2020-07-24 10:32:13.262 [info] <0.645.0> Statistics database started.
2020-07-24 10:32:13.262 [info] <0.644.0> Starting worker pool 'management_worker_pool' with 3 processes in it
2020-07-24 10:32:13.480 [info] <0.8.0> Server startup complete; 3 plugins started.
* rabbitmq_management
* rabbitmq_web_dispatch
* rabbitmq_management_agent
completed with 3 plugins.
For postgres too
server started
CREATE DATABASE
CREATE ROLE
/usr/local/bin/docker-entrypoint.sh: ignoring /docker-entrypoint-initdb.d/*
waiting for server to shut down...LOG: received fast shutdown request
.LOG: aborting any active transactions
LOG: autovacuum launcher shutting down
LOG: shutting down
LOG: database system is shut down
done
server stopped
PostgreSQL init process complete; ready for start up.
LOG: database system was shut down at 2020-07-24 10:30:59 UTC
LOG: MultiXact member wraparound protections are now enabled
LOG: database system is ready to accept connections
LOG: autovacuum launcher started
Environment Available - proceeding with startup docker-entrypoint.sh postgres
LOG: database system was interrupted; last known up at 2020-07-24 10:31:00 UTC
LOG: database system was not properly shut down; automatic recovery in progress
LOG: invalid record length at 0/14EEEA0: wanted 24, got 0
LOG: redo is not required
LOG: MultiXact member wraparound protections are now enabled
LOG: database system is ready to accept connections
LOG: autovacuum launcher started
But the applications are trying to connect 5772 but the connection refused for rabbitmq and for postgres it also tells that
psql: error: could not connect to server: could not connect to server: Connection refused
rihapp | Is the server running on host "localhost" (127.0.0.1) and accepting
rihapp | TCP/IP connections on port 5532?
It also generates .env files that contains environment variables for APPS like
DATABASE_URL=postgres://rdc:rdc#localhost:5532/pg_db
spring.datasource.url=jdbc:postgresql://localhost:5532/pg_db
spring.rabbitmq.host=localhost
spring.rabbitmq.port=5772
What might be a problem? It feels like some kind of network problem.
It seems that you've configured the clients to contact the servers on localhost:X, am I getting this right?
In that case you need to be aware that containers in docker-compose have different network cgroups, and are able to reach each other through a bridge interface. This means that in the container, you should use rdcrabbitmq:5672 instead of localhost:5772

Celery task hangs when .apply_async() or .delay() is used within request processed by Django, but works fine when invoked in shell

Invokement of a following task:
task__determine_order_details_processing_or_created_status.apply_async(
args=[order_record.Order_ID],
eta=datetime.now(GMT_timezone)+timedelta(minutes=1)
)
Ends up in the workers' timeout. It looks like the method is never releasing the worker to continue its job
web_1 | [2019-11-21 05:43:43 +0000] [1] [CRITICAL] WORKER TIMEOUT (pid:1559)
web_1 | [2019-11-21 05:43:43 +0000] [1559] [INFO] Worker exiting (pid: 1559)
web_1 | [2019-11-21 05:43:43 +0000] [1636] [INFO] Booting worker with pid: 1636
Whereas, the same command invoked with the usage of Django shell creates a completely working celerty task:
celery_1 | [2019-11-21 05:47:06,500: INFO/MainProcess] Received task: task__determine_order_details_processing_or_created_status[f94708be-a0ab-4853-8785-a11c8c7ca9f1] ETA:[2019-11-21 05:48:06.304924+00:00]
docker-compose.yml:
web:
build: ./server
command: gunicorn server.wsgi:application --reload --limit-request-line 16376 --bind 0.0.0.0:8001
volumes:
- ./server:/usr/src
expose:
- 8001
env_file: .env.dev
links:
- memcached
depends_on:
- db_development_2
- redis
db_development_2:
restart: always
image: postgres:latest
volumes:
- postgres_development3:/var/lib/postgresql/volume/
env_file: .env.dev
logging:
driver: none
redis:
image: "redis:alpine"
restart: always
logging:
driver: none
celery:
build: ./server
command: celery -A server.celery worker -l info
env_file: .env.dev
volumes:
- ./server:/usr/src
depends_on:
- db_development_2
- redis
restart: always
celery-beat:
build: ./server
command: celery -A server.celery beat -l info
env_file: .env.dev
volumes:
- ./server:/usr/src
depends_on:
- db_development_2
- redis
restart: always
logging:
driver: none
Can you please share more details?
Error is from gunicorn right?
Are you running this in docker environment? Celery on different container?
What does WSGI<-->YOUR_APP command look like?
example:
gunicorn app.wsgi:tour_application -w 6 -b :8000 --timeout 120
Can you try with more time-out like 120 in above eg.?

Docker-compose: bind Celery to Postgres database

My Docker app runs with Flask as backend and Celery as an asyncronous task manager. Task results are then dumped in a SQLalchemy database owned by Postgres.
However, I am not being able to make Celery interact with Postgres.
Configuration:
docker-compose-dev.yml
web:
build:
context: ./services/web
dockerfile: Dockerfile-dev
volumes:
- './services/web:/usr/src/app'
ports:
- 5001:5000
environment:
- FLASK_ENV=development
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
- DATABASE_TEST_URL=postgres://postgres:postgres#web-db:5432/web_test
- SECRET_KEY=my_precious
depends_on:
- web-db
- redis
web-db:
build:
context: ./services/web/project/db
dockerfile: Dockerfile
ports:
- 5435:5432
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
celery:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery worker -A celery_worker.celery --loglevel=DEBUG --logfile=celery_logs/celery.log -Q cache
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
depends_on:
- web
- redis
links:
- redis:redis
- web-db
redis:
image: redis:5.0.3-alpine
restart: always
expose:
- '6379'
ports:
- '6379:6379'
monitor:
image: dev3_web
ports:
- 5555:5555
command: flower -A celery_worker.celery --port=5555 --broker=redis://redis:6379/0
depends_on:
- web
- redis
Log:
celery_1| Neither SQLALCHEMY_DATABASE_URI nor SQLALCHEMY_BINDS is set. Defaulting SQLALCHEMY_DATABASE_URI to "sqlite:///:memory:".
How do I bind Celery tasks to my Postgres database?
celery_1 | Neither SQLALCHEMY_DATABASE_URI nor SQLALCHEMY_BINDS is set. Defaulting SQLALCHEMY_DATABASE_URI to "sqlite:///:memory:".
suggests that the error from your celery container, which missing the environment valuable setup, you might need DATABASE_URL setup for it which created based on dev3_web.
changes need apply to your docker-compose-dev.yml:
celery:
image: dev3_web
restart: always
volumes:
- ./services/web:/usr/src/app
- ./services/web/celery_logs:/usr/src/app/celery_logs
command: celery worker -A celery_worker.celery --loglevel=DEBUG --logfile=celery_logs/celery.log -Q cache
environment:
- CELERY_BROKER=redis://redis:6379/0
- CELERY_RESULT_BACKEND=redis://redis:6379/0
- APP_SETTINGS=project.config.DevelopmentConfig
- DATABASE_URL=postgres://postgres:postgres#web-db:5432/web_dev
depends_on:
- web
- redis
links:
- redis:redis
- web-db