I have a docker-compose file starting kong and konga services. Kong starts correctly but konga fails with the error below:
debug: Preparing database...
Using postgres DB Adapter.
Failed to connect to DB { error: password authentication failed for user "kong"
at Connection.parseE (/app/node_modules/sails-postgresql/node_modules/pg/lib/connection.js:539:11)
at Connection.parseMessage (/app/node_modules/sails-postgresql/node_modules/pg/lib/connection.js:366:17)
at Socket.<anonymous> (/app/node_modules/sails-postgresql/node_modules/pg/lib/connection.js:105:22)
at Socket.emit (events.js:198:13)
at addChunk (_stream_readable.js:288:12)
at readableAddChunk (_stream_readable.js:269:11)
at Socket.Readable.push (_stream_readable.js:224:10)
at TCP.onStreamRead [as onread] (internal/stream_base_commons.js:94:17)
name: 'error',
length: 100,
severity: 'FATAL',
code: '28P01',
detail: undefined,
hint: undefined,
position: undefined,
internalPosition: undefined,
internalQuery: undefined,
where: undefined,
schema: undefined,
table: undefined,
column: undefined,
dataType: undefined,
constraint: undefined,
file: 'auth.c',
line: '333',
routine: 'auth_failed' }
In my docker-compose, Kong service is from their github page and runs fine.. I believe, the problem is with Konga. Konga service is below:
version: '2.1'
volumes:
kong_data: {}
services:
db:
image: postgres:latest
environment:
POSTGRES_DB: ${KONG_PG_DATABASE:-kong}
POSTGRES_PASSWORD: ${KONG_PG_PASSWORD:-kong}
POSTGRES_USER: ${KONG_PG_USER:-kong}
healthcheck:
test: ["CMD", "pg_isready", "-U", "${KONG_PG_USER:-kong}"]
interval: 30s
timeout: 30s
retries: 3
restart: on-failure
stdin_open: true
tty: true
networks:
- kong_net
volumes:
- kong_data:/var/lib/postgresql/data
konga-prepare:
image: pantsel/konga:latest
command: “-c prepare -a postgres -u postgresql://kong#db:5432/konga_db”
networks:
- kong_net
restart: on-failure
links:
- db
depends_on:
- db
konga:
image: pantsel/konga:latest
restart: always
networks:
- kong_net
environment:
DB_ADAPTER: postgres
DB_HOST: db
DB_PORT: 5432
DB_USER: konga
DB_PASSWORD:
TOKEN_SECRET: km1GUr4RkcQD7DewhJPNXrCuZwcKmqjb
DB_DATABASE: konga_db
NODE_ENV: production
depends_on:
- db
ports:
- "1337:1337"
How do I solve the authentication error?
I had the same issue, I solved it by adding password to konga-prepare command as:
docker run --rm --network=kong-net pantsel/konga:latest -c prepare -a postgres -u postgresql://kong:kong#kong-database:5432/konga_database
Related
I wrote the following docker-compose file:
version: '3.7'
services:
mongo-db-showcase-db:
image: mongo:latest
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: '${DB_USER}'
MONGO_INITDB_ROOT_PASSWORD: '${DB_PASSWORD}'
ports:
- '27018:27017'
frontend-showcase:
build:
context: showcase
dockerfile: Dockerfile-frontend-showcase
image: showcase-frontend-showcase-image:latest
ports:
- '80:80'
backend-showcase:
build:
context: showcase
dockerfile: Dockerfile-backend-showcase
image: showcase-backend-showcase-image:latest
environment:
DATABASE_URL: 'mongodb://${DB_USER}:${DB_PASSWORD}#mongo-db-showcase-db:27018/'
ports:
- '3000:3000'
links:
- mongo-db-showcase-db
mongo-db-admin-manager-db:
image: mongo:latest
restart: always
environment:
MONGO_INITDB_ROOT_USERNAME: '${DB_USER}'
MONGO_INITDB_ROOT_PASSWORD: '${DB_PASSWORD}'
ports:
- '27017:27017'
frontend-manager:
build:
context: manager-app
dockerfile: Dockerfile-frontend-manager
image: manager-frontend-manager-image:latest
ports:
- '8080:80'
backend-manager:
build:
context: manager-app
dockerfile: Dockerfile-backend-manager
image: manager-backend-manager-image:latest
environment:
DATABASE_URL: 'mongodb://${DB_USER}:${DB_PASSWORD}#mongo-db-admin-manager-db:27017/'
ports:
- '8000:8000'
links:
- mongo-db-admin-manager-db
Here I have two mongo db container, one reachable to port 27017 the other should be reachable to port 27018. The service connecting to the db into port 27017 connect fine, the other doesn't connect. The error I get is:
MongooseServerSelectionError: connect ECONNREFUSED 172.20.0.6:27018
at NativeConnection.Connection.openUri (/app/node_modules/mongoose/lib/connection.js:807:32)
at /app/node_modules/mongoose/lib/index.js:342:10
at /app/node_modules/mongoose/lib/helpers/promiseOrCallback.js:32:5
at new Promise (<anonymous>)
at promiseOrCallback (/app/node_modules/mongoose/lib/helpers/promiseOrCallback.js:31:10)
at Mongoose._promiseOrCallback (/app/node_modules/mongoose/lib/index.js:1176:10)
at Mongoose.connect (/app/node_modules/mongoose/lib/index.js:341:20)
at Object.<anonymous> (/app/backend/app.js:26:10)
at Module._compile (internal/modules/cjs/loader.js:999:30)
at Object.Module._extensions..js (internal/modules/cjs/loader.js:1027:10) {
reason: TopologyDescription {
type: 'Unknown',
servers: Map { 'mongo-db-showcase-db:27018' => [ServerDescription] },
stale: false,
compatible: true,
heartbeatFrequencyMS: 10000,
localThresholdMS: 15,
logicalSessionTimeoutMinutes: undefined
}
}
172.20.0.6 is the address of the container.
What could be the problem? Any suggestion is really appreciated.
Thanks in advance to anyone that would want to answer.
Your connection string port should be 27017 because all your container running inside a internal network, 27018 is the port you forwarded and can only connect from outside.
DATABASE_URL: 'mongodb://${DB_USER}:${DB_PASSWORD}#mongo-db-showcase-db:27017/'
I'm running Postgres DB with pg-admin and GO on the docker-compose.
Problem: I can connect from pg-admin to Postgres. But cannot establish a connection from Go.
I tried different combinations of authentication string but it does not work. String format same as here https://github.com/karlkeefer/pngr - but different container name - database
(ERROR) Connection URl:
backend_1 | 2021/08/08 14:24:40 DB connection: database://main:fugZwypczB94m0LP7CcH#postgres:5432/temp_db?sslmode=disable
backend_1 | 2021/08/08 14:24:40 Unalble to open DB connection: dial tcp 127.0.0.1:5432: connect: connection refused
(URI generation same as here https://github.com/karlkeefer/pngr)
Docker:
version: '3.8'
services:
backend:
restart: always
build:
context: backend
target: dev
volumes:
- ./backend:/root
ports:
- "5000:5000"
env_file: .env
depends_on:
- database
database:
build: database
restart: always
environment:
POSTGRES_DB: ${POSTGRES_DB}
POSTGRES_USER: ${POSTGRES_USER}
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
PGDATA: /var/lib/postgresql/data
volumes:
- ./database/data:/var/lib/postgresql/data
- ./logs/databse:/var/log/postgresql
- ./database/migrations:/docker-entrypoint-initdb.d/migrations
ports:
- "5432:5432"
database-admin:
image: dpage/pgadmin4:5.5
restart: always
environment:
PGADMIN_DEFAULT_EMAIL: ${PG_ADMIN_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${PG_ADMIN_PASSWORD}
PGADMIN_LISTEN_PORT: 80
ports:
- "8080:80"
volumes:
- ./database/admin:/var/lib/pgadmin
links:
- "database:pgsql-server"
depends_on:
- database
volumes:
database:
database-admin:
Environment:
POSTGRES_HOST=postgres
POSTGRES_PORT=5432
POSTGRES_DB=temp_db
POSTGRES_USER=main
POSTGRES_PASSWORD=fugZwypczB94m0LP7CcH
PG_ADMIN_EMAIL=admin#temp.com
PG_ADMIN_PASSWORD=ayzi2ta8f1TnX3vKQSN1
PG_ADMIN_PORT=80
GO Code:
db, err = sqlx.Open("postgres", str)
str
func buildConnectionString() string {
user := os.Getenv("POSTGRES_USER")
pass := os.Getenv("POSTGRES_PASSWORD")
if user == "" || pass == "" {
log.Fatalln("You must include POSTGRES_USER and POSTGRES_PASSWORD environment variables")
}
host := os.Getenv("POSTGRES_HOST")
port := os.Getenv("POSTGRES_PORT")
dbname := os.Getenv("POSTGRES_DB")
if host == "" || port == "" || dbname == "" {
log.Fatalln("You must include POSTGRES_HOST, POSTGRES_PORT, and POSTGRES_DB environment variables")
}
str := fmt.Sprintf("database://%s:%s#%s:%s/%s?sslmode=disable", user, pass, host, port, dbname)
log.Println("DB connection: " + str)
return str
}
Thanks in advance!
You reference the database hostname as postgres (POSTGRES_HOST=postgres) which is fine, but the container/service name is database.
Either change the name in your compose.yaml from database to postgres or add an explicit hostname field:
database:
build: database
restart: always
hostname: postgres # <- add this
You may also want to add a dedicated network for multiple container services to talk to one another (or prevent others from). To do this, add this to each service your want to use a specific network e.g.
database:
# ...
networks:
- mynet
backend:
# ...
networks:
- mynet
and define the network at the end of your compose.yaml
networks:
mynet:
name: my-shared-db-network
I am trying to set up a ReplicaSet but I'm having problem with the initialisation.
The FIRST time I run
db_1 | uncaught exception: Error: couldn't add user: not master
And each time after
db_1 | {"t":{"$date":"2020-09-16T16:06:05.341+00:00"},"s":"I", "c":"ACCESS", "id":20249, "ctx":"conn1","msg":"Authentication failed","attr":{"mechanism":"SCRAM-SHA-256","principalName":"user","authenticationDatabase":"admin","client":"172.18.0.5:37916","result":"UserNotFound: Could not find user \"user\" for db \"admin\""}}
db_1 | {"t":{"$date":"2020-09-16T16:06:05.342+00:00"},"s":"I", "c":"ACCESS", "id":20249, "ctx":"conn1","msg":"Authentication failed","attr":{"mechanism":"SCRAM-SHA-1","principalName":"user","authenticationDatabase":"admin","client":"172.18.0.5:37916","result":"UserNotFound: Could not find user \"user\" for db \"admin\""}}
db_1 | {"t":{"$date":"2020-09-16T16:06:05.349+00:00"},"s":"I", "c":"NETWORK", "id":22944, "ctx":"conn1","msg":"connection ended","attr":{"remote":"172.18.0.5:37916","connectionCount":0}}
setup_1 | Error: Authentication failed. :
setup_1 | connect#src/mongo/shell/mongo.js:362:17
setup_1 | #(connect):2:6
setup_1 | exception: connect failed
setup_1 | exiting with code 1
docker_setup_1 exited with code 1
my setup is:
/ docker-compose.yml
version: "3"
services:
db:
image: db
build:
context: .
dockerfile: DockerfileDb
environment:
MONGO_INITDB_ROOT_USERNAME: user
MONGO_INITDB_ROOT_PASSWORD: xxxx
ports:
- "34000:34000"
volumes:
- mongodata:/data/db
- ./mongologs:/data/logs
db2:
image: db
ports:
- "34001:34000"
volumes:
- mongodata2:/data/db
db3:
image: db
ports:
- "34002:34000"
volumes:
- mongodata3:/data/db
setup:
image: setup
build: ./replicaSetup
depends_on:
- db
- db2
- db3
links:
- "db:database"
- "db2:database2"
- "db3:database3"
volumes:
mongodata:
mongodata2:
mongodata3:
/ DockerfileDb
FROM mongo
WORKDIR /usr/src/config
COPY replicaSetup/mongod.conf .
COPY replicaSetup/shared.key .
EXPOSE 34000
RUN chmod 700 shared.key
RUN chown 999:999 shared.key
CMD ["--config", "./mongod.conf"]
/ replicaSetup / mongod.conf
net:
port: 34000
bindIpAll : true
security:
authorization: enabled
keyFile: ./shared.key
replication:
oplogSizeMB: 1024
replSetName: amsdb
/ replicaSetup / Dockerfile
FROM mongo
# Create app directory
WORKDIR /usr/src/configs
# Install app dependencies
COPY replicaSet.js .
COPY setup.sh .
CMD ["./setup.sh"]
/ replicaSetup / setup.sh
sleep 10 | echo Sleeping
mongo mongodb://database:34000 -u "user" -p "xxxx" replicaSet.js
/ replicaSetup / replicaSet.js
rsconf = {
_id : "amsdb",
members: [
{ _id : 0, host : "database:34000"},
{ _id : 1, host : "database2:34001" },
{ _id : 2, host : "database3:34002" }
]
}
rs.initiate(rsconf);
rs.conf();
Thanks for any help!
You can do this by just using the base mongo image in docker-compose
Your setup should look like:
/ docker-compose.yml
version: "3.0"
services:
# Worker 1
mongo1:
image: mongo:latest
volumes:
- ./replicaSetup:/opt/keyfile
- mongodata:/data/db
environment:
MONGO_INITDB_ROOT_USERNAME: user
MONGO_INITDB_ROOT_PASSWORD: xxxx
ports:
- 27017:27017
command: 'mongod --auth --keyFile /opt/keyfile/shared.key --replSet amsdb'
# Worker 2
mongo2:
image: mongo:latest
volumes:
- ./replicaSetup:/opt/keyfile
- mongodata2:/data/db
environment:
MONGO_INITDB_ROOT_USERNAME: user
MONGO_INITDB_ROOT_PASSWORD: xxxx
ports:
- 27018:27017
command: 'mongod --auth --keyFile /opt/keyfile/shared.key --replSet amsdb'
# Worker 3
mongo3:
image: mongo:latest
volumes:
- ./replicaSetup:/opt/keyfile
- mongodata3:/data/db
environment:
MONGO_INITDB_ROOT_USERNAME: user
MONGO_INITDB_ROOT_PASSWORD: xxxx
ports:
- 27019:27017
command: 'mongod --auth --keyFile /opt/keyfile/shared.key --replSet amsdb'
volumes:
mongodata:
mongodata2:
mongodata3:
/ replicaSetup / Dockerfile - stays the same
/ replicaSetup / setup.sh - stays the same
/ replicaSetup / replicaSet.js
rsconf = {
_id : "amsdb",
members: [
{ _id : 0, host : "172.17.0.1:27017", priority:1 },
{ _id : 1, host : "172.17.0.1:27018", priority:1 },
{ _id : 2, host : "172.17.0.1:27019", priority:1 }
]
}
rs.initiate(rsconf);
rs.conf();
At the time of writing "mongo:latest" resolves to v4.4.1. The answer is for that version of entrypoint.sh https://github.com/docker-library/mongo/blob/master/4.4/docker-entrypoint.sh
In order to process MONGO_INITDB_ROOT_* environment variables and add the user to the database, the database should be started in standalone mode. It appears that the current implementation does not support replica set configuration in .conf file but only through command line arguments.
Either pass arguments in Dockerfile command: "--bind_ip_all --replSet amsdb --port 34000 ... etc" or create a PR for docker-entrypoint.sh to support docker.conf
I am trying to connect to postgres db image using knex. But I am getting error:-
error: role "admin" does not exist
My knexfile is ass below:
module.exports = {
development: {
debug: true,
client: 'pg',
connection: {
database: process.env.POSTGRES_DB,
user: process.env.POSTGRES_USER,
password: process.env.POSTGRES_PASSWORD
},
migrations: {
directory: __dirname + '/src/migrations'
},
seeds: {
directory: __dirname + '/src/seeds'
}
}
};
And my docker-compose file is:-
version: '3.1'
services:
db:
image: postgres
restart: always
volumes:
- ./docker-data/db-data:/var/lib/postgresql/data
environment:
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD}
POSTGRESS_USER: ${POSTGRES_USER}
POSTGRESS_DB: ${POSTGRES_DB}
ports:
- 5432:5432
pgadmin:
depends_on:
- db
image: dpage/pgadmin4
restart: always
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD}
volumes:
- ./docker-data/pgadmin-data:/root/.pgadmin
ports:
- 8080:80
Environment variables are straight forward
POSTGRES_USER=admin
POSTGRES_PASSWORD=admin
POSTGRES_DB=configs
PGADMIN_DEFAULT_EMAIL=admin#admin.com
PGADMIN_DEFAULT_PASSWORD=admin
Packages details:
"knex": "^0.21.1",
"pg": "^8.3.0"
I ran command docker-compose up and on completion i created migrations and post that i ran npx knex migrate:latest. This is resulting in error
error: role "admin" does not exist
at Parser.parseErrorMessage (/Users/test/packages/config-server/node_modules/pg-protocol/dist/parser.js:278:15)
I am not able to debug and fix. Please help.
I'm getting the following error when dockerizing a node postgres database using sequelize as an orm backend
Unhandled rejection SequelizeConnectionRefusedError: connect
ECONNREFUSED 127.0.0.1:5432 app_1 | at
connection.connect.err
(/home/app/node_modules/sequelize/lib/dialects/postgres/connection-manager.js:170:24)
These lines of code seems to be the culprit, docker should not be connecting these credentials as this is for my local.
if (process.env.NODE_ENV === "production") {
var sequelize = new Sequelize(process.env.DATABASE_URL);
} else {
// docker is looking at these credentials..... when it should not
var sequelize = new Sequelize("elifullstack", "eli", "", {
host: "127.0.0.1",
dialect: "postgres",
pool: {
max: 100,
min: 0,
idle: 200000,
// #note https://github.com/sequelize/sequelize/issues/8133#issuecomment-359993057
acquire: 1000000,
},
});
}
docker-compose.yml
# docker-compose.yml
version: "3"
services:
app:
build: ./server
depends_on:
- database
ports:
- 5000:5000
environment:
# database refers to the database server at the bottom called "database"
- PSQL_HOST=database
- PSQL_USER=postgres
- PORT=5000
- PSQL_NAME=elitypescript
command: npm run server
client:
build: ./client
image: react_client
links:
- app
working_dir: /home/node/app/client
volumes:
- ./:/home/node/app
ports:
- 3001:3001
command: npm run start
env_file:
- ./client/.env
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
volumes:
database:
./server/dockerFile
FROM node:10.6.0
COPY . /home/app
WORKDIR /home/app
COPY package.json ./
RUN npm install
EXPOSE 5000
I looked at other similar questions like the following, but it ultimately did not help solve the issue.
Docker - SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:3306
SequelizeConnectionRefusedError: connect ECONNREFUSED 127.0.0.1:3306
I solved it...
What i did was change this
host: "127.0.0.1",
to this
let sequelize;
if (process.env.NODE_ENV === "production") {
sequelize = new Sequelize(process.env.DATABASE_URL);
} else {
sequelize = new Sequelize(
process.env.POSTGRES_DB || "elitypescript",
process.env.POSTGRES_USER || "eli",
"",
{
host: process.env.PSQL_HOST || "localhost",
dialect: "postgres",
pool: {
max: 100,
min: 0,
idle: 200000,
// #note https://github.com/sequelize/sequelize/issues/8133#issuecomment-359993057
acquire: 1000000,
},
}
);
}
that way the host would be set to docker environment variable like this
PSQL_HOST: database
and that connects to
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
Edit
# docker-compose.yml
version: "3"
services:
app:
build: ./server
depends_on:
- database
ports:
- 5000:5000
environment:
PSQL_HOST: database
POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-password}
POSTGRES_USER: ${POSTGRES_USER:-postgres}
POSTGRES_DB: ${POSTGRES_DB:-elitypescript}
command: npm run server
client:
build: ./client
image: react_client
links:
- app
working_dir: /home/node/app/client
volumes:
- ./:/home/node/app
ports:
- 3001:3001
command: npm run start
env_file:
- ./client/.env
database:
image: postgres:9.6.8-alpine
volumes:
- database:/var/lib/postgresql/data
ports:
- 3030:5432
volumes:
database: