Meaning of syntax {1...5} in docker-compose.yml file - docker-compose

x-minio-common: &minio-common
image: quay.io/minio/minio:RELEASE.2022-08-13T21-54-44Z
command: server --console-address ":9001" http://minio{1...4}/data{1...2}
expose:
- "9000"
- "9001"
# environment:
# MINIO_ROOT_USER: minioadmin
# MINIO_ROOT_PASSWORD: minioadmin
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:9000/minio/health/live"]
interval: 30s
timeout: 20s
retries: 3
what does http://minio{1...4}/data{1...2} mean here? i guess it means this:
command: server --console-address ":9001" http://minio1/data1 http://minio1/data2 http://minio2/data1 http://minio2/data2 http://minio3/data1 http://minio3/data2 http://minio4/data1 http://minio4/data2
Is my guess right?
source code:https://raw.githubusercontent.com/minio/minio/master/docs/orchestration/docker-compose/docker-compose.yaml

Related

Broken DAG: [/usr/local/airflow/dags/my_dag.py] No module named 'airflow.operators.subdag'

I'm running airflow inside docker container and getting airflow image (puckel/docker-airflow:latest) from docker hub. I can access Airflow UI through localhost:8080 but without executing the DAG and the error mentioned in the subject above. I'm even writing pip command to install apache-airflow in my Dockerfile. Here is how my Dockerfile, docker-compose.yml and dag.py looks like:
Dockerfile:
FROM puckel/docker-airflow:latest
RUN pip install requests
RUN pip install pandas
RUN pip install 'apache-airflow'
docker-compose.yml:
version: '3.7'
services:
redis:
image: redis:5.0.5
environment:
REDIS_HOST: redis
REDIS_PORT: 6379
ports:
- 6379:6379
postgres:
image: postgres:9.6
environment:
- POSTGRES_USER=airflow
- POSTGRES_PASSWORD=airflow
- POSTGRES_DB=airflow
- PGDATA=/var/lib/postgresql/data/pgdata
volumes:
- ./pgdata:/var/lib/postgresql/data/pgdata
logging:
options:
max-size: 10m
max-file: "3"
webserver:
build: ./dockerfiles
restart: always
depends_on:
- postgres
- redis
environment:
- LOAD_EX=n
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
logging:
options:
max-size: 10m
max-file: "3"
volumes:
- ./dags:/usr/local/airflow/dags
- ./config/airflow.cfg:/usr/local/airflow/airflow.cfg
ports:
- "8080:8080"
command: webserver
healthcheck:
test: ["CMD-SHELL", "[ -f /usr/local/airflow/airflow-webserver.pid ]"]
interval: 30s
timeout: 30s
retries: 3
flower:
build: ./dockerfiles
restart: always
depends_on:
- redis
environment:
- EXECUTOR=Celery
ports:
- "5555:5555"
command: flower
scheduler:
build: ./dockerfiles
restart: always
depends_on:
- webserver
volumes:
- ./dags:/usr/local/airflow/dags
environment:
- LOAD_EX=n
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
command: scheduler
worker:
build: ./dockerfiles
restart: always
depends_on:
- scheduler
volumes:
- ./dags:/usr/local/airflow/dags
environment:
- FERNET_KEY=46BKJoQYlPPOexq0OhDZnIlNepKFf87WFwLbfzqDDho=
- EXECUTOR=Celery
command: worker
dag.py:
from airflow import DAG
from airflow.operators.subdag import SubDagOperator
from airflow.operators.python import PythonOperator, BranchPythonOperator
from airflow.operators.bash import BashOperator
from datetime import datetime
from random import randint
def _choosing_best_model(ti):
accuracies = ti.xcom_pull(task_ids=[
'training_model_A',
'training_model_B',
'training_model_C'
])
if max(accuracies) > 8:
return 'accurate'
return 'inaccurate'
def _training_model(model):
return randint(1, 10)
with DAG("test",
start_date=datetime(2021, 1 ,1),
schedule_interval='#daily',
catchup=False) as dag:
training_model_tasks = [
PythonOperator(
task_id=f"training_model_{model_id}",
python_callable=_training_model,
op_kwargs={
"model": model_id
}
) for model_id in ['A', 'B', 'C']
]
choosing_best_model = BranchPythonOperator(
task_id="choosing_best_model",
python_callable=_choosing_best_model
)
accurate = BashOperator(
task_id="accurate",
bash_command="echo 'accurate'"
)
inaccurate = BashOperator(
task_id="inaccurate",
bash_command=" echo 'inaccurate'"
)
training_model_tasks >> choosing_best_model >> [accurate, inaccurate]
Am I missing anything here? Please let me know if you can. Thanks :)

SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:5432 when using docker to use sequelize

I'm getting the following error when dockerizing a node postgres database using sequelize as an orm backend
Unhandled rejection SequelizeConnectionRefusedError: connect
ECONNREFUSED 127.0.0.1:5432 app_1 | at
connection.connect.err
(/home/app/node_modules/sequelize/lib/dialects/postgres/connection-manager.js:170:24)
These lines of code seems to be the culprit, docker should not be connecting these credentials as this is for my local.
if (process.env.NODE_ENV === "production") {
var sequelize = new Sequelize(process.env.DATABASE_URL);
} else {
// docker is looking at these credentials..... when it should not
var sequelize = new Sequelize("elifullstack", "eli", "", {
host: "127.0.0.1",
dialect: "postgres",
pool: {
max: 100,
min: 0,
idle: 200000,
// #note https://github.com/sequelize/sequelize/issues/8133#issuecomment-359993057
acquire: 1000000,
},
});
}
docker-compose.yml
# docker-compose.yml
version: "3"
services:
app:
build: ./server
depends_on:
- database
ports:
- 5000:5000
environment:
# database refers to the database server at the bottom called "database"
- PSQL_HOST=database
- PSQL_USER=postgres
- PORT=5000
- PSQL_NAME=elitypescript
command: npm run server
client:
build: ./client
image: react_client
links:
- app
working_dir: /home/node/app/client
volumes:
- ./:/home/node/app
ports:
- 3001:3001
command: npm run start
env_file:
- ./client/.env
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
volumes:
database:
./server/dockerFile
FROM node:10.6.0
COPY . /home/app
WORKDIR /home/app
COPY package.json ./
RUN npm install
EXPOSE 5000
I looked at other similar questions like the following, but it ultimately did not help solve the issue.
Docker - SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:3306
SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:3306
I solved it...
What i did was change this
host: "127.0.0.1",
to this
let sequelize;
if (process.env.NODE_ENV === "production") {
sequelize = new Sequelize(process.env.DATABASE_URL);
} else {
sequelize = new Sequelize(
process.env.POSTGRES_DB || "elitypescript",
process.env.POSTGRES_USER || "eli",
"",
{
host: process.env.PSQL_HOST || "localhost",
dialect: "postgres",
pool: {
max: 100,
min: 0,
idle: 200000,
// #note https://github.com/sequelize/sequelize/issues/8133#issuecomment-359993057
acquire: 1000000,
},
}
);
}
that way the host would be set to docker environment variable like this
PSQL_HOST: database
and that connects to
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
Edit
# docker-compose.yml
version: "3"
services:
app:
build: ./server
depends_on:
- database
ports:
- 5000:5000
environment:
PSQL_HOST: database
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-elitypescript}
command: npm run server
client:
build: ./client
image: react_client
links:
- app
working_dir: /home/node/app/client
volumes:
- ./:/home/node/app
ports:
- 3001:3001
command: npm run start
env_file:
- ./client/.env
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
volumes:
database:

mailcow + jwilder reverse proxy

I try to set up my own mailserver, Mailcow was recommended.
DNS-provider:
Cloudflare with
CNAME mail.examle.com => examle.com, proxied
Because it is proxies, I cannot use normal ports like mentioned in the docs. Therefore I have to setup some forwarding...
Router:
Fritzbox with port forwadring
2052 => 25
2053 => 465
8080 => 587
2082 => 143
2083 => 993
2086 => 110
2087 => 995
8880 => 4190
Docker:
I use jwilders reverse proxy and it's LE-companion, which works well with everything else I have hosted so far.
${DOCKERDIR}/docker-compose-js.yml
version: '3'
services:
proxy:
build: ./reverse_proxy
container_name: proxy
restart: always
ports:
- 80:80
- 443:443
volumes:
- ${DOCKERDIR}/reverse_proxy/certs:/etc/nginx/certs:ro
- ${DOCKERDIR}/reverse_proxy/vhost.d:/etc/nginx/vhost.d
- ${DOCKERDIR}/reverse_proxy/html:/usr/share/nginx/html
- /var/run/docker.sock:/tmp/docker.sock:ro
environment:
- PUID=33
- PGID=33
labels:
com.github.jrcs.letsencrypt_nginx_proxy_companion.nginx_proxy: ""
networks:
- proxy-tier
depends_on:
- le
le:
image: jrcs/letsencrypt-nginx-proxy-companion
container_name: le
volumes:
- ${DOCKERDIR}/reverse_proxy/certs:/etc/nginx/certs:rw
- ${DOCKERDIR}/reverse_proxy/vhost.d:/etc/nginx/vhost.d
- ${DOCKERDIR}/reverse_proxy/html:/usr/share/nginx/html
- /var/run/docker.sock:/var/run/docker.sock:ro
environment:
- PUID=33
- PGID=33
- DEFAULT_EMAIL=*****
- NGINX_PROXY_CONTAINER=proxy
networks:
- proxy-tier
networks:
proxy-tier:
Then there is a (slightly) modified file for mailcow, just mentioning the changes
%{DOCKERDIR}/mailcow/docker-compose.yml
nginx-mailcow:
...
# ports:
# - "${HTTPS_BIND:-0.0.0.0}:${HTTPS_PORT:-443}:${HTTPS_PORT:-443}"
# - "${HTTP_BIND:-0.0.0.0}:${HTTP_PORT:-80}:${HTTP_PORT:-80}"
...
There seems to be no way to remove those ports from it's original docker-compose.yml despite it not being recommended.
For all other changes I got
${DOCKERDIR}/mailcow/docker-compose-override.yml
version: '2.1'
services:
nginx-mailcow:
networks:
proxy-tier:
environment:
- VIRTUAL_HOST=${MAILCOW_HOSTNAME},${ADDITIONAL_SAN}
- VIRTUAL_PORT=8080
- VIRTUAL_PROTO=http
- LETSENCRYPT_HOST=${MAILCOW_HOSTNAME},${ADDITIONAL_SAN}
volumes:
- ${DOCKERDIR}/reverse_proxy/certs/${MAILCOW_HOSTNAME}:/etc/ssl/mail/
- ${DOCKERDIR}/reverse_proxy/certs/dhparam.pem:/etc/ssl/mail/dhparams.pem:ro
ports:
dovecot-mailcow:
volumes:
- ${DOCKERDIR}/reverse_proxy/certs/${MAILCOW_HOSTNAME}:/etc/ssl/mail/
- ${DOCKERDIR}/reverse_proxy/certs/dhparam.pem:/etc/ssl/mail/dhparams.pem:ro
postfix-mailcow:
volumes:
- ${DOCKERDIR}/reverse_proxy/certs/${MAILCOW_HOSTNAME}:/etc/ssl/mail/
- ${DOCKERDIR}/reverse_proxy/certs/dhparam.pem:/etc/ssl/mail/dhparams.pem:ro
networks:
proxy-tier:
And finally the mailcow.conf (changes only)
${DOCKERDIR}/mailcow/mailcow.conf
MAILCOW_HOSTNAME=mail.example.com
HTTP_PORT=8080
#HTTP_BIND=0.0.0.0
HTTP_BIND=proxy
HTTPS_PORT=8443
#HTTPS_BIND=0.0.0.0
HTTPS_BIND=proxy
SKIP_LETS_ENCRYPT=y
When I try to connect to mail.example.com I get Error 526 Invalid SSL certificate.
Could someone pls show me where my config is wrong and how to change it so I get mailcow working?

Multiple Orderer redundancy in Kafka based network

We're stuck configuring a fabric network based on 3 orgs with 1 peer each and 2 kafka-based orderers. For kafka ordering we use 4 kafka nodes with 3 zookeepers. It's deployed on some AWS ec2 instances as follows:
1: Org1
2: Org2
3: Org3
4: orderer0, orderer1, kafka0, kafka1, kafka2, kafka3, zookeeper0, zookeeper1, zookeeper2
The whole of the ordering nodes plus kafka cluster is located in the same machine for connectivity reasons (read somewhere they must be in the same machine to avoid these problems)
During our test, we try taking down the first orderer (orderer0) for redundancy testing with docker stop. We expected the network to continue working through orderer1, but instead it dies and stops working.
Looking at the peer's console, I can see some errors.
Could not connect to any of the endpoints: [orderer0.example.com:7050, orderer1.example.com:8050]
Find attached the content of the files related to the configuration of the system.
Orderer + kafka + zk
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
services:
zookeeper0.example.com:
container_name: zookeeper0.example.com
extends:
file: docker-compose-base.yaml
service: zookeeper0.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
zookeeper1.example.com:
container_name: zookeeper1.example.com
extends:
file: docker-compose-base.yaml
service: zookeeper1.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
zookeeper2.example.com:
container_name: zookeeper2.example.com
extends:
file: docker-compose-base.yaml
service: zookeeper2.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
kafka0.example.com:
container_name: kafka0.example.com
extends:
file: docker-compose-base.yaml
service: kafka0.example.com
depends_on:
- zookeeper0.example.com
- zookeeper1.example.com
- zookeeper2.example.com
- orderer0.example.com
- orderer1.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
kafka1.example.com:
container_name: kafka1.example.com
extends:
file: docker-compose-base.yaml
service: kafka1.example.com
depends_on:
- zookeeper0.example.com
- zookeeper1.example.com
- zookeeper2.example.com
- orderer0.example.com
- orderer1.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
kafka2.example.com:
container_name: kafka2.example.com
extends:
file: docker-compose-base.yaml
service: kafka2.example.com
depends_on:
- zookeeper0.example.com
- zookeeper1.example.com
- zookeeper2.example.com
- orderer0.example.com
- orderer1.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
kafka3.example.com:
container_name: kafka3.example.com
extends:
file: docker-compose-base.yaml
service: kafka3.example.com
depends_on:
- zookeeper0.example.com
- zookeeper1.example.com
- zookeeper2.example.com
- orderer0.example.com
- orderer1.example.com
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
orderer0.example.com:
container_name: orderer0.example.com
image: hyperledger/fabric-orderer:x86_64-1.1.0
environment:
- ORDERER_GENERAL_LOGLEVEL=debug
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_LISTEN_PORT=7050
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/etc/hyperledger/configtx/genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/etc/hyperledger/crypto/orderer/msp
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/etc/hyperledger/crypto/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/etc/hyperledger/crypto/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/etc/hyperledger/crypto/orderer/tls/ca.crt, /etc/hyperledger/crypto/peerOrg1/tls/ca.crt, /etc/hyperledger/crypto/peerOrg2/tls/ca.crt, /etc/hyperledger/crypto/peerOrg3/tls/ca.crt]
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/orderers
command: orderer
ports:
- 7050:7050
volumes:
- ./channel:/etc/hyperledger/configtx
- ./channel/crypto-config/ordererOrganizations/example.com/orderers/orderer0.example.com/:/etc/hyperledger/crypto/orderer
- ./channel/crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/:/etc/hyperledger/crypto/peerOrg1
- ./channel/crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/:/etc/hyperledger/crypto/peerOrg2
- ./channel/crypto-config/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/:/etc/hyperledger/crypto/peerOrg3
depends_on:
- kafka0.example.com
- kafka1.example.com
- kafka2.example.com
- kafka3.example.com
orderer1.example.com:
container_name: orderer1.example.com
image: hyperledger/fabric-orderer:x86_64-1.1.0
environment:
- ORDERER_GEN ERAL_LOGLEVEL=debug
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_LISTEN_PORT=8050
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/etc/hyperledger/configtx/genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/etc/hyperledger/crypto/orderer/msp
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/etc/hyperledger/crypto/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/etc/hyperledger/crypto/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/etc/hyperledger/crypto/orderer/tls/ca.crt, /etc/hyperledger/crypto/peerOrg1/tls/ca.crt, /etc/hyperledger/crypto/peerOrg2/tls/ca.crt, /etc/hyperledger/crypto/peerOrg3/tls/ca.crt]
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
working_dir: /opt/gopath/src/github.com/hyperledger/fabric/orderers
command: orderer
ports:
- 8050:7050
volumes:
- ./channel:/etc/hyperledger/configtx
- ./channel/crypto-config/ordererOrganizations/example.com/orderers/orderer1.example.com/:/etc/hyperledger/crypto/orderer
- ./channel/crypto-config/peerOrganizations/org1.example.com/peers/peer0.org1.example.com/:/etc/hyperledger/crypto/peerOrg1
- ./channel/crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/:/etc/hyperledger/crypto/peerOrg2
- ./channel/crypto-config/peerOrganizations/org3.example.com/peers/peer0.org3.example.com/:/etc/hyperledger/crypto/peerOrg3
depends_on:
- kafka0.example.com
- kafka1.example.com
- kafka2.example.com
- kafka3.example.com
Peer and Ca from Org2
#
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
services:
ca.org2.example.com:
image: hyperledger/fabric-ca:x86_64-1.1.0
environment:
- FABRIC_CA_HOME=/etc/hyperledger/fabric-ca-server
- FABRIC_CA_SERVER_CA_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem
- FABRIC_CA_SERVER_CA_KEYFILE=/etc/hyperledger/fabric-ca-server-config/efa7d0819b7083f6c06eb34da414acbcde79f607b9ce26fb04dee60cf79a389a_sk
- FABRIC_CA_SERVER_TLS_ENABLED=true
- FABRIC_CA_SERVER_TLS_CERTFILE=/etc/hyperledger/fabric-ca-server-config/ca.org2.example.com-cert.pem
- FABRIC_CA_SERVER_TLS_KEYFILE=/etc/hyperledger/fabric-ca-server-config/efa7d0819b7083f6c06eb34da414acbcde79f607b9ce26fb04dee60cf79a389a_sk
ports:
- "8054:7054"
command: sh -c 'fabric-ca-server start -b admin:adminpw -d'
volumes:
- ./channel/crypto-config/peerOrganizations/org2.example.com/ca/:/etc/hyperledger/fabric-ca-server-config
container_name: ca_peerOrg2
peer0.org2.example.com:
container_name: peer0.org2.example.com
extends:
file: base.yaml
service: peer-base
environment:
- CORE_PEER_ID=peer0.org2.example.com
- CORE_PEER_LOCALMSPID=Org2MSP
- CORE_PEER_ADDRESS=peer0.org2.example.com:7051
ports:
- 8051:7051
- 8053:7053
volumes:
- ./channel/crypto-config/peerOrganizations/org2.example.com/peers/peer0.org2.example.com/:/etc/hyperledger/crypto/peer
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
extra_hosts:
- "orderer0.example.com:xxx.xxx.xxx.xxx"
- "orderer1.example.com:xxx.xxx.xxx.xxx"
- "kafka0.example.com:xxx.xxx.xxx.xxx"
- "kafka1.example.com:xxx.xxx.xxx.xxx"
- "kafka2.example.com:xxx.xxx.xxx.xxx"
- "kafka3.example.com:xxx.xxx.xxx.xxx"
Orderer base
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
services:
orderer-base:
image: hyperledger/fabric-orderer:$IMAGE_TAG
environment:
- ORDERER_GENERAL_LOGLEVEL=error
- ORDERER_GENERAL_LISTENADDRESS=0.0.0.0
- ORDERER_GENERAL_GENESISMETHOD=file
- ORDERER_GENERAL_GENESISFILE=/var/hyperledger/orderer/orderer.genesis.block
- ORDERER_GENERAL_LOCALMSPID=OrdererMSP
- ORDERER_GENERAL_LOCALMSPDIR=/var/hyperledger/orderer/msp
# enabled TLS
- ORDERER_GENERAL_TLS_ENABLED=true
- ORDERER_GENERAL_TLS_PRIVATEKEY=/var/hyperledger/orderer/tls/server.key
- ORDERER_GENERAL_TLS_CERTIFICATE=/var/hyperledger/orderer/tls/server.crt
- ORDERER_GENERAL_TLS_ROOTCAS=[/var/hyperledger/orderer/tls/ca.crt]
# kafka
- CONFIGTX_ORDERER_ORDERERTYPE=kafka
- CONFIGTX_ORDERER_KAFKA_BROKERS=[kafka0.example.com,kafka1.example.com,kafka2.example.com,kafka3.example.com]
- ORDERER_KAFKA_RETRY_SHORTINTERVAL=1s
- ORDERER_KAFKA_RETRY_SHORTTOTAL=30s
- ORDERER_KAFKA_VERBOSE=true
working_dir: /opt/gopath/src/github.com/hyperledger/fabric
command: orderer
Kafka base
# Copyright IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
version: '2'
services:
zookeeper:
image: hyperledger/fabric-zookeeper
environment:
- ZOO_SERVERS=server.1=zookeeper0.example.com:2888:3888 server.2=zookeeper1.example.com:2888:3888 server.3=zookeeper2.example.com:2888:3888
restart: always
kafka:
image: hyperledger/fabric-kafka
restart: always
environment:
- KAFKA_MESSAGE_MAX_BYTES=103809024 # 99 * 1024 * 1024 B
- KAFKA_REPLICA_FETCH_MAX_BYTES=103809024 # 99 * 1024 * 1024 B
- KAFKA_UNCLEAN_LEADER_ELECTION_ENABLE=false
- KAFKA_MIN_INSYNC_REPLICAS=2
- KAFKA_DEFAULT_REPLICATION_FACTOR=3
- KAFKA_ZOOKEEPER_CONNECT=zookeeper0.example.com:2181,zookeeper1.example.com:2181,zookeeper2.example.com:2181
logging:
driver: "json-file"
options:
max-size: "1m"
max-file: "3"
configtx.yaml
Organizations:
- &OrdererOrg
Name: OrdererMSP
ID: OrdererMSP
MSPDir: crypto-config/ordererOrganizations/example.com/msp
- &Org1
Name: Org1MSP
ID: Org1MSP
MSPDir: crypto-config/peerOrganizations/org1.example.com/msp
AnchorPeers:
- Host: peer0.org1.example.com
Port: 7051
- &Org2
Name: Org2MSP
ID: Org2MSP
MSPDir: crypto-config/peerOrganizations/org2.example.com/msp
AnchorPeers:
- Host: peer0.org2.example.com
Port: 7051
- &Org3
Name: Org3MSP
ID: Org3MSP
MSPDir: crypto-config/peerOrganizations/org3.example.com/msp
AnchorPeers:
- Host: peer0.org3.example.com
Port: 7051
################################################################################
Orderer: &OrdererDefaults
OrdererType: kafka
Addresses:
- orderer0.example.com:7050
- orderer1.example.com:7050
BatchTimeout: 2s
BatchSize:
MaxMessageCount: 10
AbsoluteMaxBytes: 98 MB
PreferredMaxBytes: 512 KB
Kafka:
Brokers:
- kafka0.example.com:9092
- kafka1.example.com:9092
- kafka2.example.com:9092
- kafka3.example.com:9092
Organizations:
################################################################################
Application: &ApplicationDefaults
Organizations:
################################################################################
Profiles:
ThreeOrgsOrdererGenesis:
Orderer:
<<: *OrdererDefaults
Organizations:
- *OrdererOrg
Consortiums:
SampleConsortium:
Organizations:
- *Org1
- *Org2
- *Org3
ThreeOrgsChannel:
Consortium: SampleConsortium
Application:
<<: *ApplicationDefaults
Organizations:
- *Org1
- *Org2
- *Org3
May it be a configuration error? Connection problems are almost discarded because running the same network on a local machine gives the same result.
Thanks in advance
Regards
Finally got it running smooth. Turns out the problem wasn't in docker-compose files, but in the version of fabric sdk for the web service. I was using fabric-client and fabric-ca-client both on version 1.1, and this was missing until 1.2. (More info https://jira.hyperledger.org/browse/FABN-90)
Just to clarify, I was able to see transactions happening on both orderers because of the interconnection between them, but I was only attacking the first one. When that orderer went down, network would go dark.
I understood the way fabric deals with orderers, it points to the first orderer of the list, if it is down or unreachable, moves it to the bottom of the list and targets the next one. This is what's happening since 1.2, in older versions you have to code your own error controller so that it changes to the next orderer.
I'm not sure but it could be because of different network layer. Since it's a different compose file , docker do create different network layer for each composer.
Also, I don't see network mentioned in the yaml files.
Please check list of network layer using "docker network list".

replica Set mongo docker-compose

I'm trying to configure a mongodb replicaSet using docker-compose, but when I stop the master container it seems that it doesn't pass to the secondary.
redis:
image: redis
ports:
- "6379:6379"
mongo3:
hostname: mongo3
image: mongo
entrypoint: [ "/usr/bin/mongod", "--replSet", "rs", "--journal","--dbpath","/data/db","--smallfiles", "--rest" ]
volumes:
- ./data/mongo3:/data/db
ports:
- "27018:27017"
- "28018:28017"
restart: always
mongo2:
hostname: mongo2
image: mongo
entrypoint: [ "/usr/bin/mongod", "--replSet", "rs", "--journal","--dbpath","/data/db","--smallfiles", "--rest" ]
volumes:
- ./data/mongo2:/data/db
ports:
- "27019:27017"
- "28019:28017"
restart: always
mongo1:
hostname: mongo1
image: mongo
entrypoint: [ "/usr/bin/mongod", "--replSet", "rs", "--journal","--dbpath","/data/db","--smallfiles", "--rest" ]
volumes:
- ./data/mongo1:/data/db
ports:
- "27017:27017"
- "28017:28017"
links:
- mongo2:mongo2
- mongo3:mongo3
restart: always
web:
build: .
ports:
- "2000:2000"
volumes:
- .:/vip
links:
- redis
- mongo1
- mongo2
- mongo3
nginx:
restart: always
build: ./nginx/
ports:
- "80:80"
links:
- web:web
mongosetup:
image: mongo
links:
- mongo1:mongo1
- mongo2:mongo2
- mongo3:mongo3
volumes:
- ./scripts:/scripts
entrypoint: [ "/scripts/setup.sh" ]
setup.sh :
#!/bin/bash
MONGODB1=`ping -c 1 mongo1 | head -1 | cut -d "(" -f 2 | cut -d ")" -f 1`
MONGODB2=`ping -c 1 mongo2 | head -1 | cut -d "(" -f 2 | cut -d ")" -f 1`
MONGODB3=`ping -c 1 mongo3 | head -1 | cut -d "(" -f 2 | cut -d ")" -f 1`
echo "**********************************************" ${MONGODB1}
echo "Waiting for startup.."
until curl http://${MONGODB1}:28017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
echo curl http://${MONGODB1}:28017/serverStatus\?text\=1 2>&1 | grep uptime | head -1
echo "Started.."
echo SETUP.sh time now: `date +"%T" `
mongo --host ${MONGODB1}:27017 <<EOF
var cfg = {
"_id": "rs",
"version": 1,
"members": [
{
"_id": 0,
"host": "${MONGODB1}:27017",
"priority": 2
},
{
"_id": 1,
"host": "${MONGODB2}:27017",
"priority": 0
},
{
"_id": 2,
"host": "${MONGODB3}:27017",
"priority": 0
}
],settings: {chainingAllowed: true}
};
rs.initiate(cfg, { force: true });
rs.reconfig(cfg, { force: true });
rs.slaveOk();
db.getMongo().setReadPref('nearest');
db.getMongo().setSlaveOk();
EOF
I had a similar issue and resolved it with the following compose file:
version: "3.8"
services:
mongo1:
image: mongo:4.2
container_name: mongo1
command: ["--replSet", "my-replica-set", "--bind_ip_all", "--port", "30001"]
volumes:
- ./data/mongo-1:/data/db
ports:
- 30001:30001
healthcheck:
test: test $$(echo "rs.initiate({_id:'my-replica-set',members:[{_id:0,host:\"mongo1:30001\"},{_id:1,host:\"mongo2:30002\"},{_id:2,host:\"mongo3:30003\"}]}).ok || rs.status().ok" | mongo --port 30001 --quiet) -eq 1
interval: 10s
start_period: 30s
mongo2:
image: mongo:4.2
container_name: mongo2
command: ["--replSet", "my-replica-set", "--bind_ip_all", "--port", "30002"]
volumes:
- ./data/mongo-2:/data/db
ports:
- 30002:30002
mongo3:
image: mongo:4.2
container_name: mongo3
command: ["--replSet", "my-replica-set", "--bind_ip_all", "--port", "30003"]
volumes:
- ./data/mongo-3:/data/db
ports:
- 30003:30003
with the following in my /etc/hosts file:
127.0.0.1 mongo1
127.0.0.1 mongo2
127.0.0.1 mongo3
I documented it in a GitHub repo and with a little blog post here:
https://github.com/UpSync-Dev/docker-compose-mongo-replica-set
https://www.upsync.dev/2021/02/02/run-mongo-replica-set.html
Update: This does not work! You do need to run rs.initiate()
With MongoDB 4.0, you don't need a 4th container to run a setup script. It is really simple to bring up a replicaSet of 3 containers:
version: "3"
services:
mongo1:
hostname: mongo1
container_name: localmongo1
image: mongo:4.0-xenial
expose:
- 27017
restart: always
entrypoint: [ "/usr/bin/mongod", "--bind_ip_all", "--replSet", "rs0" ]
mongo2:
hostname: mongo2
container_name: localmongo2
image: mongo:4.0-xenial
expose:
- 27017
restart: always
entrypoint: [ "/usr/bin/mongod", "--bind_ip_all", "--replSet", "rs0" ]
mongo3:
hostname: mongo3
container_name: localmongo3
image: mongo:4.0-xenial
expose:
- 27017
restart: always
entrypoint: [ "/usr/bin/mongod", "--bind_ip_all", "--replSet", "rs0" ]
More info here: https://github.com/msound/localmongo/tree/4.0
I was looking for how to start MongoDB Replica Set with one DB instance for local development and ended up here. I found the answers here too complicated, so I came up with the following solution:
docker-compose.yml
version: "3"
services:
mongo:
hostname: mongodb
container_name: mongodb
image: mongo:latest
restart: always
ports:
- "27017:27017"
volumes:
- ./scripts:/docker-entrypoint-initdb.d/
command: ["--replSet", "rs0", "--bind_ip_all"]
And there is a folder called 'scripts' in the current directory with a single file in it called 'init.js' (the name is not important). This folder mounted as a volume to the '/docker-entrypoint-initdb.d/', which is a special folder. When MongoDB is started, all the files in this directory will be executed. The content of file is:
init.js
rs.initiate();
I would adivse you to have a look at khezen/mongo.
You can deploy a mongo replica set across a 3 nodes docker swarm with the following:
version: '3'
services:
replica1:
image: khezen/mongo:slim
deploy:
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
palcement:
node.hostname: node-1
environment:
RS_NAME: shard1
SHARD_SVR: 'y'
AUTH: 'y'
volumes:
- /data/mongo/replica1:/data/db
networks:
- mongo_cluster
replica2:
image: khezen/mongo:slim
deploy:
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
palcement:
node.hostname: node-2
environment:
RS_NAME: shard1
SHARD_SVR: 'y'
AUTH: 'y'
volumes:
- /data/mongo/replica2:/data/db
networks:
- mongo_cluster
replica3:
image: khezen/mongo:slim
deploy:
mode: replicated
replicas: 1
update_config:
parallelism: 1
delay: 10s
restart_policy:
condition: on-failure
palcement:
node.hostname: node-3
environment:
RS_NAME: shard1
SHARD_SVR: 'y'
MASTER: replica3
SLAVES: replica1 replica2
AUTH: 'y'
volumes:
- /data/mongo/replica3:/data/db
networks:
- mongo_cluster
networks:
mongo_cluster:
driver: overlay
disclaimer: I am the maintainer of this image.
I set up a gist with a guide on how to set it up using a docker-compose file and mongoose. https://gist.github.com/harveyconnor/518e088bad23a273cae6ba7fc4643549
I had similar problem in setting replica set up in a standalone mongodb service with authentication and here are what I ended up with.
docker-compose.yml:
version: '3.7'
services:
...
db:
image: mongo
restart: always
expose:
- 27017
environment:
MONGO_INITDB_DATABASE: ${DATABASE_NAME}
MONGO_INITDB_ROOT_USERNAME: ${DATABASE_USER}
MONGO_INITDB_ROOT_PASSWORD: ${DATABASE_PASSWORD}
MONGO_REPLICA_SET_NAME: ${MONGO_REPLICA_SET_NAME}
command: ["--replSet", "${MONGO_REPLICA_SET_NAME}", "--bind_ip_all"]
healthcheck:
test: test $$(echo "rs.status().ok" | mongo -u $${MONGO_INITDB_ROOT_USERNAME} -p $${MONGO_INITDB_ROOT_PASSWORD} --quiet) -eq 1
interval: 10s
start_period: 30s
volumes:
- ./db:/data/db
- ./scripts/set-credentials.sh:/docker-entrypoint-initdb.d/set-credentials.sh
replica-setup:
image: mongo
restart: on-failure
networks:
default:
volumes:
- ./scripts/setup-replica.sh:/scripts/setup-replica.sh
entrypoint: [ "bash", "/scripts/setup-replica.sh" ]
depends_on:
- db
environment:
MONGO_INITDB_ROOT_USERNAME: ${DATABASE_USER}
MONGO_INITDB_ROOT_PASSWORD: ${DATABASE_PASSWORD}
./scripts/setup-replica.sh:
#!/bin/bash
MONGODB1=db
echo "Waiting for MongoDB startup..."
until curl http://${MONGODB1}:27017/serverStatus\?text\=1 2>&1 | grep uptime | head -1; do
printf '.'
sleep 1
done
# check if replica set is already initiated
RS_STATUS=$( mongo --quiet --host ${MONGODB1}:27017 -u $MONGO_INITDB_ROOT_USERNAME -p $MONGO_INITDB_ROOT_PASSWORD --eval "rs.status().ok" )
if [[ $RS_STATUS != 1 ]]
then
echo "[INFO] Replication set config invalid. Reconfiguring now."
RS_CONFIG_STATUS=$( mongo --quiet --host ${MONGODB1}:27017 -u $MONGO_INITDB_ROOT_USERNAME -p $MONGO_INITDB_ROOT_PASSWORD --eval "rs.status().codeName" )
if [[ $RS_CONFIG_STATUS == 'InvalidReplicaSetConfig' ]]
then
mongo --quiet --host ${MONGODB1}:27017 -u $MONGO_INITDB_ROOT_USERNAME -p $MONGO_INITDB_ROOT_PASSWORD <<EOF
config = rs.config()
config.members[0].host = db # Here is important to set the host name of the db instance
rs.reconfig(config, {force: true})
EOF
else
echo "[INFO] MongoDB setup finished. Initiating replicata set."
mongo --quiet --host ${MONGODB1}:27017 -u $MONGO_INITDB_ROOT_USERNAME -p $MONGO_INITDB_ROOT_PASSWORD --eval "rs.initiate()" > /dev/null
fi
else
echo "[INFO] Replication set already initiated."
fi
./scripts/set-credentials.sh:
#!/bin/bash
set -e
mongo -- "$MONGO_INITDB_DATABASE" <<EOF
var rootUser = '$MONGO_INITDB_ROOT_USERNAME';
var rootPassword = '$MONGO_INITDB_ROOT_PASSWORD';
var admin = db.getSiblingDB('admin');
admin.auth(rootUser, rootPassword);
var user = '$MONGO_INITDB_ROOT_USERNAME';
var password = '$MONGO_INITDB_ROOT_PASSWORD';
db.createUser({user: user, pwd: password, roles: ["readWrite"]});
EOF
What I achieved through is:
Setup a mongodb service with username/password authentication for a default collection
Initialize replica set when it's first time running services
Reconfigure replica set member when there's a previous db data
Health check the mongodb service by checking replica set status
If you just need single node replica set of MongoDB via docker-compose.yml you can simply use this:
mongodb:
image: mongo:5
restart: always
command: ["--replSet", "rs0", "--bind_ip_all"]
ports:
- 27018:27017
healthcheck:
test: mongo --eval "rs.initiate()"
start_period: 5s
This setup works for me. I have also setup everything in https://github.com/nguyenduyhust/docker-mongodb-replica-set.
Great if that's what you're looking for.
Dockerfile
FROM mongo
RUN mkdir /config
WORKDIR /config
COPY wait-for-it.sh .
COPY mongo-setup.js .
COPY mongo-setup.sh .
RUN chmod +x /config/wait-for-it.sh
RUN chmod +x /config/mongo-setup.sh
CMD [ "bash", "-c", "/config/wait-for-it.sh mongodb1:27011 -- /config/mongo-setup.sh"]
docker-compose.yml
version: "3"
services:
mongodb1:
container_name: mongo1
image: mongo
restart: always
volumes:
- ./volumes/mongodb1:/data/db
ports:
- "27011:27011"
expose:
- "27011"
entrypoint:
[
"/usr/bin/mongod",
"--port", "27011",
"--replSet", "rs0",
"--bind_ip_all",
]
mongodb2:
container_name: mongo2
image: mongo
restart: always
volumes:
- ./volumes/mongodb2:/data/db
ports:
- "27012:27012"
expose:
- "27012"
entrypoint:
[
"/usr/bin/mongod",
"--port", "27012",
"--replSet", "rs0",
"--bind_ip_all",
]
mongodb3:
container_name: mongo3
image: mongo
restart: always
volumes:
- ./volumes/mongodb3:/data/db
ports:
- "27013:27013"
expose:
- "27013"
entrypoint:
[
"/usr/bin/mongod",
"--port", "27013",
"--replSet", "rs0",
"--bind_ip_all",
]
mongosetup:
container_name: mongosetup
image: "mongo-setup"
build: "./mongo-setup"
depends_on:
- mongodb1
mongo-express:
container_name: mongo-express
image: mongo-express
environment:
ME_CONFIG_MONGODB_URL: mongodb://mongodb1:27011,mongodb2:27012,mongodb3:27013/?replicaSet=rs0
ports:
- 8081:8081
restart: always
depends_on:
- mongodb1
- mongosetup
mongo-setup.js
rsconf = {
_id : "rs0",
members: [
{
"_id": 0,
"host": "mongodb1:27011",
"priority": 3
},
{
"_id": 1,
"host": "mongodb2:27012",
"priority": 2
},
{
"_id": 2,
"host": "mongodb3:27013",
"priority": 1
}
]
}
rs.initiate(rsconf);
mongo-setup.sh
#!/usr/bin/env bash
if [ ! -f /data/mongo-init.flag ]; then
echo "Init replicaset"
mongo mongodb://mongodb1:27011 mongo-setup.js
touch /data/mongo-init.flag
else
echo "Replicaset already initialized"
fi