i deplyed my application on kubernetes but have been getting this error:
**MountVolume.SetUp failed for volume "airflow-volume" : mount failed: mount failed: exit status 32 Mounting command: systemd-run Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/pods/4a3c3d0b-b7e8-49bc-8a78-5a8bdc932eca/volumes/kubernetes.io~glusterfs/airflow-volume --scope -- mount -t glusterfs -o auto_unmount,backup-volfile-servers=10.0.2.107:10.0.2.24,log-file=/var/lib/kubelet/plugins/kubernetes.io/glusterfs/airflow-volume/worker-844c9db787-vprt8-glusterfs.log,log-level=ERROR 10.0.2.107:/airflow /var/lib/kubelet/pods/4a3c3d0b-b7e8-49bc-8a78-5a8bdc932eca/volumes/kubernetes.io~glusterfs/airflow-volume Output: Running scope as unit run-22059.scope. mount: /var/lib/kubelet/pods/4a3c3d0b-b7e8-49bc-8a78-5a8bdc932eca/volumes/kubernetes.io~glusterfs/airflow-volume: unknown filesystem type 'glusterfs'. , the following error information was pulled from the glusterfs log to help diagnose this issue: could not open log file for pod worker-844c9db787-vprt8**
AND
**Unable to attach or mount volumes: unmounted volumes=[airflow-volume], unattached volumes=[airflow-volume default-token-s6pvd]: timed out waiting for the condition**
Any suggestions?
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: web
namespace: airflow
spec:
replicas: 1
selector:
matchLabels:
tier: web
template:
metadata:
labels:
app: airflow
tier: web
spec:
imagePullSecrets:
- name: peeriqregistrykey
restartPolicy: Always
containers:
# Airflow Webserver Container
- name: web
image: peeriq/data_availability_service:airflow-metadata-cutover
volumeMounts:
- mountPath: /usr/local/airflow
name: airflow-volume
envFrom:
- configMapRef:
name: airflow-config
env:
- name: VAULT_ADDR
valueFrom:
secretKeyRef:
name: vault-credentials
key: VAULT_ADDR
- name: VAULT_TOKEN
valueFrom:
secretKeyRef:
name: vault-credentials
key: VAULT_TOKEN
- name: DJANGO_AUTH_USER
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_USER
- name: DJANGO_AUTH_PASS
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_PASS
- name: FERNET_KEY
valueFrom:
secretKeyRef:
name: airflow-secrets
key: FERNET_KEY
- name: POSTGRES_SERVICE_HOST
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_SERVICE_HOST
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_PASSWORD
ports:
- name: web
containerPort: 8080
args: ["webserver"]
# Airflow Scheduler Container
- name: scheduler
image: peeriq/data_availability_service:airflow-metadata-cutover
volumeMounts:
- mountPath: /usr/local/airflow
name: airflow-volume
envFrom:
- configMapRef:
name: airflow-config
env:
- name: AWS_DEFAULT_REGION
value: us-east-1
- name: ETL_AWS_ACCOUNT_NUMBER
valueFrom:
secretKeyRef:
name: aws-creds
key: ETL_AWS_ACCOUNT_NUMBER
- name: VAULT_ADDR
valueFrom:
secretKeyRef:
name: vault-credentials
key: VAULT_ADDR
- name: VAULT_TOKEN
valueFrom:
secretKeyRef:
name: vault-credentials
key: VAULT_TOKEN
- name: DJANGO_AUTH_USER
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_USER
- name: DJANGO_AUTH_PASS
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_PASS
- name: FERNET_KEY
valueFrom:
secretKeyRef:
name: airflow-secrets
key: FERNET_KEY
- name: POSTGRES_SERVICE_HOST
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_SERVICE_HOST
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_PASSWORD
args: ["scheduler"]
volumes:
- name: airflow-volume
# This GlusterFS volume must already exist.
glusterfs:
endpoints: glusterfs-cluster
path: /airflow
readOnly: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: flower
namespace: airflow
spec:
replicas: 1
selector:
matchLabels:
tier: flower
template:
metadata:
labels:
app: airflow
tier: flower
spec:
imagePullSecrets:
- name: peeriqregistrykey
restartPolicy: Always
containers:
- name: flower
image: peeriq/data_availability_service:airflow-metadata-cutover
volumeMounts:
- mountPath: /usr/local/airflow
name: airflow-volume
envFrom:
- configMapRef:
name: airflow-config
env:
# To prevent the error: ValueError: invalid literal for int() with base 10: 'tcp://10.0.0.83:5555'
- name: FLOWER_PORT
value: "5555"
- name: DJANGO_AUTH_USER
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_USER
- name: DJANGO_AUTH_PASS
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_PASS
- name: POSTGRES_SERVICE_HOST
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_SERVICE_HOST
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_PASSWORD
ports:
- name: flower
containerPort: 5555
args: ["flower"]
volumes:
- name: airflow-volume
# This GlusterFS volume must already exist.
glusterfs:
endpoints: glusterfs-cluster
path: /airflow
readOnly: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: worker
namespace: airflow
spec:
replicas: 1
selector:
matchLabels:
tier: worker
template:
metadata:
labels:
app: airflow
tier: worker
spec:
imagePullSecrets:
- name: peeriqregistrykey
restartPolicy: Always
containers:
- name: worker
image: peeriq/data_availability_service:airflow-metadata-cutover
volumeMounts:
- mountPath: /usr/local/airflow
name: airflow-volume
envFrom:
- configMapRef:
name: airflow-config
env:
- name: AWS_DEFAULT_REGION
value: us-east-1
- name: ETL_AWS_ACCOUNT_NUMBER
valueFrom:
secretKeyRef:
name: aws-creds
key: ETL_AWS_ACCOUNT_NUMBER
- name: VAULT_ADDR
valueFrom:
secretKeyRef:
name: vault-credentials
key: VAULT_ADDR
- name: VAULT_TOKEN
valueFrom:
secretKeyRef:
name: vault-credentials
key: VAULT_TOKEN
- name: DJANGO_AUTH_USER
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_USER
- name: DJANGO_AUTH_PASS
valueFrom:
secretKeyRef:
name: django-auth
key: DJANGO_AUTH_PASS
- name: FERNET_KEY
valueFrom:
secretKeyRef:
name: airflow-secrets
key: FERNET_KEY
- name: POSTGRES_SERVICE_HOST
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_SERVICE_HOST
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: rds-postgres
key: POSTGRES_PASSWORD
args: ["worker"]
volumes:
- name: airflow-volume
# This GlusterFS volume must already exist.
glusterfs:
endpoints: glusterfs-cluster
path: /airflow
readOnly: false
You must install package glusterfs-fuse on your Kubernetes nodes, otherwise it won't be able to mount glusterfs volumes.
The part of the message unknown filesystem type 'glusterfs' can mean that there is something wrong with your volume definition or a storage class if you use it. But this is a guess.
I had the same error, and the reason in my kubernetes was that the nfs server was unavailable. After starting the nfs server, it was solved.
Related
I have a project where we are consuming data from kafka and publishing to mongo. In fact the code base does only one task, may be mongo to kafka migration, kafka to mongo migration or something else.
we have to consume from different kafka topics and publish to different mongo collections. Now these are parallel streams of work.
Current design is to have one codebase which can consume from Any topic and publish to Any mongo collection which is configurable using Environment variables. So we created One kubernetes Pod and have multiple containers inside it. each container has different environment variables.
My questions:
Is it wise to use multiple containers in one pod. Easy to distinguish, but as they are tightly coupled , i am guessing high chance of failure and not actually proper microservice design.
Should I create multiple deployments for each of these pipelines ? Would be very difficult to maintain as each will have different deployment configs.
Is there any better way to address this ?
Sample of step 1:
apiVersion: apps/v1
kind: Deployment
metadata:
annotations: {}
name: test-raw-mongodb-sink-apps
namespace: test-apps
spec:
selector:
matchLabels:
app: test-raw-mongodb-sink-apps
template:
metadata:
labels:
app: test-raw-mongodb-sink-apps
spec:
containers:
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-alchemy
- name: INPUT_TOPIC
value: test.raw.ptv.alchemy
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8081"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/dpl/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-alchemy
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-bloomberg
- name: INPUT_TOPIC
value: test.raw.pretrade.bloomberg
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8082"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-bloomberg
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-calypso
- name: INPUT_TOPIC
value: test.raw.ptv.calypso
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8083"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-calypso
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-dtres
- name: INPUT_TOPIC
value: test.raw.ptv.dtres
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8084"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-dtres
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-feds
- name: INPUT_TOPIC
value: test.raw.ptv.feds
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8085"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-feds
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-hoops
- name: INPUT_TOPIC
value: test.raw.ptv.hoops
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8086"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-hoops
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxcore
- name: INPUT_TOPIC
value: test.raw.ptv.murex_core
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8087"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxcore
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxeqd
- name: INPUT_TOPIC
value: test.raw.ptv.murex_eqd_sa
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8088"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxeqd
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxgts
- name: INPUT_TOPIC
value: test.raw.ptv.murex_gts_sa
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8089"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxgts
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxmr
- name: INPUT_TOPIC
value: test.raw.ptv.murex_mr
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8090"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxmr
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxgtscf
- name: INPUT_TOPIC
value: test.raw.cashflow.murex_gts_sa
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8091"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxgtscf
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxcoll
- name: INPUT_TOPIC
value: test.raw.collateral.mxcoll
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8092"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxcoll
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-mxcoll-link
- name: INPUT_TOPIC
value: test.raw.collateral.mxcoll_link
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8093"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-mxcoll-link
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-ost
- name: INPUT_TOPIC
value: test.raw.ptv.ost
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8094"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-ost
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
- env:
- name: EVENTS_TOPIC
value: test.ops.proc-events
- name: GROUP_ID
value: test-mongodb-sink-posmon
- name: INPUT_TOPIC
value: test.raw.ptp.posmon
- name: MONGODB_AUTH_DB
value: admin
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: MONGODB_PASSWORD
value: test123
- name: MONGODB_PORT
value: "27017"
- name: MONGODB_USERNAME
value: root
- name: SERVER_PORT
value: "8095"
- name: KAFKA_BROKERS
value: kafka-cluster-kafka-bootstrap.kafka:9093
- name: TRUSTSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: ca.password
name: kafka-ca-cert
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
key: user.password
name: kafka
image: tools.testCompany.co.za:8093/local/tt--mongodb-map:0.0.7.0-SNAPSHOT
name: test-mongodb-sink-posmon
securityContext:
allowPrivilegeEscalation: true
privileged: true
volumeMounts:
- mountPath: /app/resources
name: properties
- mountPath: /stores
name: stores
readOnly: true
Thanks
A templating tool like Helm will let you fill in the environment-variable values from deploy-time settings. In Helm this would look like:
env:
- name: EVENTS_TOPIC
value: {{ .Values.eventsTopic }}
- name: GROUP_ID
value: {{ .Values.groupId }}
- name: INPUT_TOPIC
value: {{ .Values.inputTopic }}
You could then deploy this multiple times with different sets of topics:
helm install alchemy . \
--set eventsTopic=test.ops.proc-events \
--set groupId=test-mongodb-sink-alchemy \
--set inputTopic=test.raw.ptv.alchemy
helm install bloomberg . \
--set eventsTopic=test.ops.proc-events \
--set groupId=test-mongodb-sink-bloomberg \
--set inputTopic=test.raw.pretrade.bloomberg
You could write the Helm chart to be configured with a list of topic sets, too, and only deploy the set once:
{{- $top := . -}}{{-/* because "range" overwrites "." */-}}
{{- $topic := range $topics -}}
---
apiVersion: v1
kind: Deployment
metadata:
name: {{ $topic.name }}
spec:
...
env:
- name: EVENT_TOPIC
value: {{ $top.Values.eventTopic }}{{/* common to all deployments */}}
- name: GROUP_ID
value: test-mongodb-sink-{{ $topic.name }}
- name: INPUT_TOPIC
value: {{ $topic.inputTopic }}
Write configuration like:
eventTopic: test.ops.proc-events
topics:
- name: alchemy
inputTopic: test.raw.ptv.alchemy
- name: bloomberg
inputTopic: test.raw.pretrade.bloomberg
And deploy like:
helm install connector . -f topic-listing.yaml
In any case, you will want only one container per pod. There are a couple of reasons for this. If the list of topics ever changes, this lets you create or delete deployments without interfering with the other topics; if everything was in a single pod, you'd have to stop and restart everything together, and it can take Kafka a minute or two to figure out what happens. In a Kafka context, you can also run as many consumers as there are partitions on a topic, but not really more; if you have a very busy topic you can easily set that deployment's replicas: to have multiple consumers for multiple partitions, but if everything together is in one pod, your only choice is to scale everything together.
Is it wise to use multiple containers in one pod. Easy to distinguish, but as they are tightly coupled , i am guessing high chance of failure and not actually proper microservice design.
You most likely want to deploy them as separate services, so that you can update or re-configure them independently of eachother.
Should I create multiple deployments for each of these pipelines ? Would be very difficult to maintain as each will have different deployment configs.
Kustomize is a built-in tool in kubectl that is a good choice when you want to deploy the same manifest in multiple environments with different configurations. This solution require no additional tool other than kubectl.
Deploying to multiple environments with Kustomize
Directory structure:
base/
- deployment.yaml # fully deployable manifest - no templating
- kustomization.yaml # default values e.g. for dev environment
app1/
- kustomization.yaml # specific values for app1
app2/
- kustomization.yaml # specific values for app2
Example Deployment manifest with Kustomization
Here, the environment variables is loaded from a ConfigMap such that we can use configMapGenerator. This file is base/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb-sink
namespace: test-apps
spec:
template: // some fiels, e.g. labels are omitted in example
spec:
containers:
- name: mongodb-sink
image: mongodb-map:0.0.7.0-SNAPSHOT
env:
- name: MONGODB_HOST0
value: test-mongodb-0.test-mongodb-headless.test-infra
- name: MONGODB_HOST1
value: test-mongodb-1.test-mongodb-headless.test-infra
- name: GROUP_ID
valueFrom:
configMapKeyRef:
name: my-values
key: GROUP_ID
- name: INPUT_TOPIC
valueFrom:
configMapKeyRef:
name: my-values
key: INPUT_TOPIC
...
Also add a base/kustomization.yaml file to describe the configMapGenerator and related files.
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
configMapGenerator:
- name: my-values
behavior: replace
literals:
- GROUP_ID=test-mongodb-sink-calypso
- INPUT_TOPIC=test.raw.ptv.calypso
... # also add your other values
Preview Manifests
kubectl kustomize base/
Apply Manifests
kubectl apply -k base/
Add config for app1 and app2
With app1 we now want to use the manifest we have in base/ and just overlay what is different for app1. This file is app1/kustomization.yaml and similar for app2/kustomization.yaml.
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
bases:
- ../base
namePrefix: bloomberg-sink- # this gives your Deployment a prefixed name
configMapGenerator:
- name: my-values
behavior: replace
literals:
- GROUP_ID=test-mongodb-sink-bloomberg
- INPUT_TOPIC=test.raw.pretrade.bloomberg
... # also add your other values
Preview Manifests
kubectl kustomize app1/
Apply Manifests
kubectl apply -k app1/
Documentation
Kubernetes: Declarative Management of Kubernetes Objects Using Kustomize
SIG CLI: Kustomization file
I am trying to run a cron job in kubernetes that needs to access a database. This is the database yaml:
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
component: db
name: db
spec:
selector:
matchLabels:
component: db
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
component: db
spec:
containers:
- name: db
image: mysql:5.7
ports:
- containerPort: 3306
args:
- --transaction-isolation=READ-COMMITTED
- --binlog-format=ROW
- --max-connections=1000
- --bind-address=0.0.0.0
env:
- name: MYSQL_DATABASE
valueFrom:
secretKeyRef:
key: MYSQL_DATABASE
name: db-secrets
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
key: MYSQL_PASSWORD
name: db-secrets
- name: MYSQL_ROOT_PASSWORD
valueFrom:
secretKeyRef:
key: MYSQL_ROOT_PASSWORD
name: db-secrets
- name: MYSQL_USER
valueFrom:
secretKeyRef:
key: MYSQL_USER
name: db-secrets
volumeMounts:
- mountPath: /var/lib/mysql
name: db-persistent-storage
restartPolicy: Always
volumes:
- name: db-persistent-storage
persistentVolumeClaim:
claimName: db-pvc
And this is the yaml for the cronjob:
apiVersion: batch/v1beta1
kind: CronJob
metadata:
name: cron
spec:
schedule: "0 0 * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: cron
image: iulbricht/shopware-status-tool:1.0.0
env:
- name: USERNAME
valueFrom:
secretKeyRef:
key: USERNAME
name: cron-secrets
- name: PASSWORD
valueFrom:
secretKeyRef:
key: PASSWORD
name: cron-secrets
- name: DATABASE_DSN
valueFrom:
secretKeyRef:
key: DATABASE_DSN
name: cron-secrets
- name: DHL_API_KEY
valueFrom:
secretKeyRef:
key: DHL_API_KEY
name: cron-secrets
- name: SHOP_API
valueFrom:
secretKeyRef:
key: SHOP_API
name: cron-secrets
restartPolicy: OnFailure
When the cronjob runs I always get the following message: default addr for network 'db:3306' unknown. The mysql connection string is as follows: mysql://username:password#db:3306/shopware
I am using Kustomization and the db and cron are in the save namespace.
Can anyone help me find a way to solve this?
Can you please try this connection string
username:password#tcp(db:3306)/shopware
I am trying to apply below deployment, but I get below error
The Deployment "example" is invalid: spec.template.spec.containers[0].env[0].valueFrom.fieldRef.fieldPath: Invalid value: "spec.template.metadata.annotations.configHash": error converting fieldPath: field label not supported: spec.template.metadata.annotations.configHash
I have tried different ways of accessing the fieldPath like:
spec.template.metadata.annotations['configHash']
spec.template.metadata.['annotations'].['configHash']
spec.template.metadata.['annotations'['configHash']]
Nothing seems to work . Any help will be appreciated.
Kubernetes - 1.16.8-gke.15
apiVersion: apps/v1
kind: Deployment
metadata:
name: ecc-web
labels:
app: ecc
spec:
replicas: 1
selector:
matchLabels:
app: ecc
template:
metadata:
labels:
app: ecc
annotations:
configHash: b6651e50d35182bd8fc2f75a5af4aca79387079860fb953896399a1ad16e317d
spec:
volumes:
- name: opt-ecc-logs
emptyDir: {}
securityContext:
fsGroup: 1000
containers:
- name: ecc-web
image: gcr.io/gke-nonprod/ecc:release-11
envFrom:
- configMapRef:
name: env-config
env:
- name: CONFIG_HASH
valueFrom:
fieldRef:
fieldPath: spec.template.metadata.annotations.configHash
- name: DB_PASSWORD
valueFrom:
secretKeyRef:
name: ecc-secret
key: mysql_svcacct_ecc_dev_password
ports:
- containerPort: 80
imagePullPolicy: Always
securityContext:
privileged: true
volumeMounts:
- name: opt-ecc-logs
mountPath: /opt/ecc/logs
- name: application-log
image: busybox
command: ["/bin/sh","-c"]
args: ["touch /opt/ecc/logs/application.log;chown -R wsapp:wsapp /opt/ecc/logs/;tail -n+1 -f /opt/ecc/logs/application.log"]
securityContext:
runAsUser: 1000
runAsGroup: 1000
volumeMounts:
- name: opt-ecc-logs
mountPath: /opt/ecc/logs
Just use:
env:
- name: CONFIG_HASH
valueFrom:
fieldRef:
fieldPath: metadata.annotations['configHash']
Instead of spec.template.metadata.annotations.configHash
We have a pod which uses Java based HTTP server. HTTP POST requests are sent to the server from JMeter. We are currently measuring scale-out percentage for this http server by increasing the number of replicas. Attached here is deployment yaml which is configured to use both nodePort and hostPort for exposing the service.
Our tests shows nodePort scale-out percentage is 57 whereas hostPort scale-out percentage is ~95.
We would like to know why this difference in behavior and how to tune nodePort to get scale-out percentage same as hostPort.
Given below are test details:
K8S version: v1.9.6,
system details: 1 master, 3 worker, 1 NFS server
OS: CentOS Linux release 7.3
System resource: 16 GiB RAM(in each K8S node), 12 CPU (in each K8S
node), 1 TB storage.
Deployment YAML file:
apiVersion: v1
kind: Service
metadata:
name: itom-di-receiver-svc
namespace: opsbridge1
spec:
type: NodePort
ports:
- name: receiver-port
nodePort: 30001
port: 8443
protocol: TCP
targetPort: 5050
selector:
app: itom-di-receiver
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: itom-di-receiver-dpl
namespace: opsbridge1
annotations:
deployment.microfocus.com/default-replica-count: "1"
deployment.microfocus.com/runlevel: UP
spec:
replicas: 1
template:
metadata:
labels:
app: itom-di-receiver
annotations:
pod.boostport.com/vault-approle: opsbridge1-di
pod.boostport.com/vault-init-container: install
spec:
containers:
- name: itom-di-receiver-cnt
image: localhost:5000/hpeswitomsandbox/itom-data-ingestion-receiver:1.3.0-029
livenessProbe:
exec:
command:
- cat
- /receiver/receiver-status.txt
initialDelaySeconds: 180
periodSeconds: 20
readinessProbe:
exec:
command:
- "/receiver/bin/readiness.sh"
initialDelaySeconds: 30
periodSeconds: 20
failureThreshold: 18
securityContext:
capabilities:
drop:
- all
add:
- CHOWN
- SETGID
- SETUID
imagePullPolicy: IfNotPresent
env:
- name: gid
value: "1999"
- name: uid
value: "1999"
- name: KAFKA_SECURITY_PROTOCOL
value: "SSL"
- name: KAFKA_HOSTNAME
valueFrom:
configMapKeyRef:
name: itom-di-kafka-cm
key: kafka.advertised.host.name
- name: KAFKA_PORT
valueFrom:
configMapKeyRef:
name: itom-di-kafka-cm
key: kafka.advertised.port
- name: KAFKA_DEFAULT_TOPIC
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: kafka.default.topic
- name: KAFKA_MAP_CONTEXT_TO_TOPIC
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: kafka.map.context.to.topic
- name: RECEIVER_KAFKA_TLS_ENABLE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.tls.enable
- name: RECEIVER_JVM_ARGS
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.jvm.args
### Receiver Performance Tuning Environment Variables ###
- name: RECEIVER_COMPUTE_THREADS
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.compute.threads
- name: KAFKA_PRODUCER_BATCH_SIZE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.batch.size.kb
- name: RECEIVER_REQUEST_BODY_MAXSIZE_MB
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.request.body.maxsize.mb
- name: KAFKA_PRODUCER_LINGER_MS
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.linger.ms
- name: KAFKA_PRODUCER_ACKS
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.acks
- name: KAFKA_PRODUCER_COMPRESSION_TYPE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.compression.type
- name: KAFKA_PRODUCER_BUFFER_MEMORY
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.buffer.memory.mb
- name: KAFKA_PRODUCER_MAX_BLOCK_MS
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.max.block.ms
- name: RECEIVER_HEADER_FIELDNAME_FOR_TOPIC
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.header.fieldname.for.topic
- name: RECEIVER_HEADER_FIELDNAME_FOR_TOPIC_KEY
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.header.fieldname.for.topic.key
- name: RECEIVER_TOPIC_FROM_HEADER
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.topic.from.header
- name: KAFKA_PRODUCER_RETRIES
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.retries
- name: KAFKA_PRODUCER_MAX_IN_FLIGHT_REQUEST_PER_CONNECTION
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.kafka.producer.max.in.flight.requests.per.connection
### Security Environment Variables ###
- name: RECEIVER_PROTOCOL
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.protocol
- name: RECEIVER_AUTH_METHOD
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.auth.method
- name: RECEIVER_KEYSTORE_TYPE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.keystore.type
- name: RECEIVER_TRUSTSTORE_TYPE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.truststore.type
- name: RECEIVER_EXTERNAL_JAR_ENABLE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.external.jar.enable
- name: RECEIVER_JAR_VALIDATE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.jar.validate
- name: CERTIFICATE_REVOCATION
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.cert.revocation.enable
- name: CRL_FILE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.cert.revocation.crlfile.enable
- name: PREFER_OCSP
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.cert.revocation.ocsp.enable
- name: ENABLE_SOFT_FAIL
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.cert.revocation.softfail.enable
- name: PREFER_CRL
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.cert.revocation.preferCRL.enable
- name: RESPONDER_URL
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: ocsp.responderURL
- name: CERT_SUBJECT_NAME
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: ocsp.responderCertSubjectName
- name: CERT_ISSUER_NAME
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: ocsp.responderCertIssuerName
- name: CERT_SERIAL_NUMBER
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: ocsp.responderCertSerialNumber
- name: RECEIVER_FIPS_ENABLE
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.fips.enable
- name: RECEIVER_APIKEY_REFRESH_INTERVAL_MINS
valueFrom:
configMapKeyRef:
name: itom-di-receiver-cm
key: receiver.apikey.refresh.interval.mins
ports:
- containerPort: 5050
hostPort: 5051
resources:
limits:
cpu: "4"
memory: "2048Mi"
requests:
cpu: "1"
memory: "1024Mi"
volumeMounts:
- name: di-receiver-volume
mountPath: /receiver/conf
subPath: di/receiver/conf
- name: di-receiver-volume
mountPath: /receiver/conf/endpoint
subPath: di/receiver/conf/endpoint
- name: di-receiver-volume
mountPath: /receiver/conf/schema
subPath: di/receiver/conf/schema
- name: di-receiver-volume
mountPath: /receiver/conf/crl
subPath: di/receiver/conf/crl
- name: di-receiver-log-volume
mountPath: /receiver/log
subPath: di/receiver/log
- name: di-receiver-volume
mountPath: /receiver/ext
subPath: di/receiver/ext
- name: di-receiver-volume
mountPath: /receiver/data
subPath: di/receiver/data
- name: di-receiver-volume
mountPath: /receiver/samples
subPath: di/receiver/samples
- name: vault-token
mountPath: /var/run/secrets/boostport.com
- name: secret-volume
mountPath: /receiver/ssl/ca
- name: secret-volume-kafka
mountPath: /receiver/ssl/store/receiver-kafka
- name: kubernetes-vault-renew
image: localhost:5000/kubernetes-vault-renew:0.5.0
imagePullPolicy: IfNotPresent
volumeMounts:
- name: vault-token
mountPath: /var/run/secrets/boostport.com
initContainers:
- env:
- name: VAULT_ROLE_ID
value: "66d8c1aa-6079-a65f-38c3-89bd7a6fdd2c"
- name: CERT_COMMON_NAME
value: "smperfqa31.hpeswlab.net"
image: localhost:5000/kubernetes-vault-init:0.5.0
imagePullPolicy: IfNotPresent
name: install
resources: {}
securityContext:
runAsUser: 1999
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/run/secrets/boostport.com
name: vault-token
- name: itom-di-init-receiver-cnt
image: localhost:5000/hpeswitomsandbox/itom-data-ingestion-receiver:1.3.0-029
command: ["/receiver/bin/run.sh","dependency"]
env:
- name: KAFKA_HOSTNAME
valueFrom:
configMapKeyRef:
name: itom-di-kafka-cm
key: kafka.advertised.host.name
volumes:
- name: di-receiver-volume
persistentVolumeClaim:
claimName: conf-volume
- name: di-receiver-log-volume
persistentVolumeClaim:
claimName: log-volume
- name: vault-token
emptyDir: {}
- name: secret-volume
secret:
secretName: receiver-secret
- name: secret-volume-kafka
secret:
secretName: receiver-kafka-secret
helm install --name testapp ./testapp
Error: release testapp failed: ReplicationController "registry-creds-via-helm" is invalid: spec.template.spec.containers[0].env[0].valueFrom.secretKeyRef.name: Invalid value: "": a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')
Can anyone point out what is the problem with my below yaml?
# cat testapp/templates/replicationController-ngn.yaml
apiVersion: v1
kind: ReplicationController
metadata:
name: registry-creds-via-helm
namespace: kube-system
labels:
version: v1.6
spec:
replicas: 1
selector:
name: registry-creds-via-helm
version: v1.9
template:
metadata:
labels:
name: registry-creds-via-helm
version: v1.9
spec:
containers:
- image: upmcenterprises/registry-creds:1.9
name: registry-creds
imagePullPolicy: Always
env:
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
name: registry-creds
key: AWS_SECRET_ACCESS_KEY
- name: awsaccount
valueFrom:
secretKeyRef:
name: registry-creds
key: aws-account
- name: awsregion
valueFrom:
secretKeyRef:
name: registry-creds
key: aws-region
- name: aws-assume-role
valueFrom:
secretKeyRef:
name: registry-creds
key: aws_assume_role