Local Airflow Instance with minikube - kubernetes

I'm trying to run local Airflow instance on my laptop using minikube, deployment.yml file with the following command: kubectl apply -f ./deployment.yml.
After slightly tweaking this file I was able to end up with all three pods: postgres, webserver, scheduler running fine.
The result of the kubectl get pods
The content of the file:
---
# Source: airflow/templates/rbac/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: release-name-airflow
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
automountServiceAccountToken: true
---
# Source: airflow/charts/postgresql/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: release-name-postgresql
namespace: default
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.0.6
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
password: "**************"
# We don't auto-generate LDAP password when it's not provided as we do for other passwords
---
# Source: airflow/templates/config/secret.yaml
apiVersion: v1
kind: Secret
metadata:
name: release-name-airflow
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
type: Opaque
data:
airflow-password: "*************"
# Airflow keys must be base64-encoded, hence we need to pipe to 'b64enc' twice
# The auto-generation mechanism available at "common.secrets.passwords.manage" isn't compatible with encoding twice
# Therefore, we can only use this function if the secret already exists
airflow-fernet-key: "TldwdU0zRklTREZ0VDFkamVWUjFaMlozWTFKdWNFNUxTRXRxVm5Oa1p6az0="
airflow-secret-key: "VldWaWQySkhSVUZQZDNWQlltbG1UVzUzVkdwWmVVTkxPR1ZCZWpoQ05tUT0="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: airflow-dependencies
namespace: "default"
data:
requirements.txt: |-
apache-airflow==2.2.3
pytest==6.2.4
python-slugify<5.0
funcy==1.16
apache-airflow-providers-mongo
apache-airflow-providers-postgres
apache-airflow-providers-slack
apache-airflow-providers-amazon
airflow_clickhouse_plugin
apache-airflow-providers-sftp
surveymonkey-python
---
# Source: airflow/templates/rbac/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: release-name-airflow
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
rules:
- apiGroups:
- ""
resources:
- "pods"
verbs:
- "create"
- "list"
- "get"
- "watch"
- "delete"
- "patch"
- apiGroups:
- ""
resources:
- "pods/log"
verbs:
- "get"
- apiGroups:
- ""
resources:
- "pods/exec"
verbs:
- "create"
- "get"
---
# Source: airflow/templates/rbac/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: release-name-airflow
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: release-name-airflow
subjects:
- kind: ServiceAccount
name: release-name-airflow
namespace: default
---
# Source: airflow/charts/postgresql/templates/primary/svc-headless.yaml
apiVersion: v1
kind: Service
metadata:
name: release-name-postgresql-hl
namespace: default
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.0.6
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
# Use this annotation in addition to the actual publishNotReadyAddresses
# field below because the annotation will stop being respected soon but the
# field is broken in some versions of Kubernetes:
# https://github.com/kubernetes/kubernetes/issues/58662
service.alpha.kubernetes.io/tolerate-unready-endpoints: "true"
spec:
type: ClusterIP
clusterIP: None
# We want all pods in the StatefulSet to have their addresses published for
# the sake of the other Postgresql pods even before they're ready, since they
# have to be able to talk to each other in order to become ready.
publishNotReadyAddresses: true
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: primary
---
# Source: airflow/charts/postgresql/templates/primary/svc.yaml
apiVersion: v1
kind: Service
metadata:
name: release-name-postgresql
namespace: default
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.0.6
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
annotations:
spec:
type: ClusterIP
ports:
- name: tcp-postgresql
port: 5432
targetPort: tcp-postgresql
nodePort: null
selector:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: primary
---
# Source: airflow/templates/web/service.yaml
apiVersion: v1
kind: Service
metadata:
name: release-name-airflow
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
spec:
type: NodePort
ports:
- name: http
port: 8080
nodePort: 30303
selector:
app.kubernetes.io/name: airflow
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: web
---
# Source: airflow/templates/scheduler/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: release-name-airflow-scheduler
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: scheduler
spec:
selector:
matchLabels:
app.kubernetes.io/name: airflow
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: scheduler
replicas: 1
strategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: scheduler
annotations:
checksum/configmap: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
spec:
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: airflow
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: scheduler
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
serviceAccountName: release-name-airflow
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: airflow-scheduler
image: "docker.io/bitnami/airflow-scheduler:2.2.3-debian-10-r57"
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
env:
- name: AIRFLOW_FERNET_KEY
valueFrom:
secretKeyRef:
name: release-name-airflow
key: airflow-fernet-key
- name: AIRFLOW_SECRET_KEY
valueFrom:
secretKeyRef:
name: release-name-airflow
key: airflow-secret-key
- name: AIRFLOW_LOAD_EXAMPLES
value: "no"
- name: AIRFLOW_DATABASE_NAME
value: "bitnami_airflow"
- name: AIRFLOW_DATABASE_USERNAME
value: "bn_airflow"
- name: AIRFLOW_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: release-name-postgresql
key: password
- name: AIRFLOW_DATABASE_HOST
value: "release-name-postgresql"
- name: AIRFLOW_DATABASE_PORT_NUMBER
value: "5432"
- name: AIRFLOW_EXECUTOR
value: LocalExecutor
- name: AIRFLOW_WEBSERVER_HOST
value: release-name-airflow
- name: AIRFLOW_WEBSERVER_PORT_NUMBER
value: "8080"
- name: AIRFLOW__CORE__DAGS_FOLDER
value: /opt/bitnami/airflow/dags
- name: AIRFLOW__CORE__ENABLE_XCOM_PICKLING
value: "True"
- name: AIRFLOW__CORE__DONOT_PICKLE
value: "False"
resources:
limits: {}
requests: {}
volumeMounts:
- mountPath: /bitnami/python/requirements.txt
name: requirements
subPath: requirements.txt
- mountPath: /opt/bitnami/airflow/dags/src
name: airflow-dags
volumes:
- name: requirements
configMap:
name: airflow-dependencies
- name: airflow-dags
hostPath:
# directory location on host
path: /Users/admin/Desktop/FXC_Airflow/dags/src
# this field is optional
type: Directory
---
# Source: airflow/templates/web/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: release-name-airflow-web
namespace: default
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: web
spec:
selector:
matchLabels:
app.kubernetes.io/name: airflow
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: web
replicas: 1
strategy:
rollingUpdate: {}
type: RollingUpdate
template:
metadata:
labels:
app.kubernetes.io/name: airflow
helm.sh/chart: airflow-12.0.5
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: web
annotations:
checksum/configmap: 01ba4719c80b6fe911b091a7c05124b64eeece964e09c058ef8f9805daca546b
spec:
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: airflow
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: web
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
serviceAccountName: release-name-airflow
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: airflow-web
image: docker.io/bitnami/airflow:2.2.3-debian-10-r62
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
env:
- name: AIRFLOW_FERNET_KEY
valueFrom:
secretKeyRef:
name: release-name-airflow
key: airflow-fernet-key
- name: AIRFLOW_SECRET_KEY
valueFrom:
secretKeyRef:
name: release-name-airflow
key: airflow-secret-key
- name: AIRFLOW_LOAD_EXAMPLES
value: "no"
- name: AIRFLOW_DATABASE_NAME
value: "bitnami_airflow"
- name: AIRFLOW_DATABASE_USERNAME
value: "bn_airflow"
- name: AIRFLOW_DATABASE_PASSWORD
valueFrom:
secretKeyRef:
name: release-name-postgresql
key: password
- name: AIRFLOW_DATABASE_HOST
value: "release-name-postgresql"
- name: AIRFLOW_DATABASE_PORT_NUMBER
value: "5432"
- name: AIRFLOW_EXECUTOR
value: LocalExecutor
- name: AIRFLOW_WEBSERVER_HOST
value: "0.0.0.0"
- name: AIRFLOW_WEBSERVER_PORT_NUMBER
value: "8080"
- name: AIRFLOW_USERNAME
value: airflow
- name: AIRFLOW_PASSWORD
valueFrom:
secretKeyRef:
name: release-name-airflow
key: airflow-password
- name: AIRFLOW_BASE_URL
value: "http://127.0.0.1:8080"
- name: AIRFLOW_LDAP_ENABLE
value: "no"
- name: AIRFLOW__CORE__DAGS_FOLDER
value: /opt/bitnami/airflow/dags
- name: AIRFLOW__CORE__ENABLE_XCOM_PICKLING
value: "True"
- name: AIRFLOW__CORE__DONOT_PICKLE
value: "False"
ports:
- name: http
containerPort: 8080
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 180
periodSeconds: 20
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: http
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
tcpSocket:
port: http
resources:
limits:
cpu: "2"
memory: 4Gi
requests: {}
volumeMounts:
- mountPath: /bitnami/python/requirements.txt
name: requirements
subPath: requirements.txt
- mountPath: /opt/bitnami/airflow/dags/src
name: airflow-dags
volumes:
- name: requirements
configMap:
name: airflow-dependencies
- name: airflow-dags
hostPath:
# directory location on host
path: /Users/admin/Desktop/FXC_Airflow/dags/src
# this field is optional
type: Directory
---
# Source: airflow/charts/postgresql/templates/primary/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: release-name-postgresql
namespace: default
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.0.6
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
annotations:
spec:
replicas: 1
serviceName: release-name-postgresql-hl
updateStrategy:
rollingUpdate: {}
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: primary
template:
metadata:
name: release-name-postgresql
labels:
app.kubernetes.io/name: postgresql
helm.sh/chart: postgresql-11.0.6
app.kubernetes.io/instance: release-name
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: primary
annotations:
spec:
serviceAccountName: default
affinity:
podAffinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: postgresql
app.kubernetes.io/instance: release-name
app.kubernetes.io/component: primary
namespaces:
- "default"
topologyKey: kubernetes.io/hostname
weight: 1
nodeAffinity:
securityContext:
fsGroup: 1001
initContainers:
containers:
- name: postgresql
image: docker.io/bitnami/postgresql:14.1.0-debian-10-r80
imagePullPolicy: "IfNotPresent"
securityContext:
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: POSTGRESQL_PORT_NUMBER
value: "5432"
- name: POSTGRESQL_VOLUME_DIR
value: "/bitnami/postgresql"
- name: PGDATA
value: "/bitnami/postgresql/data"
# Authentication
- name: POSTGRES_USER
value: "bn_airflow"
- name: POSTGRES_PASSWORD
valueFrom:
secretKeyRef:
name: release-name-postgresql
key: password
- name: POSTGRES_DB
value: "bitnami_airflow"
# Replication
# Initdb
# Standby
# LDAP
- name: POSTGRESQL_ENABLE_LDAP
value: "no"
# TLS
- name: POSTGRESQL_ENABLE_TLS
value: "no"
# Audit
- name: POSTGRESQL_LOG_HOSTNAME
value: "false"
- name: POSTGRESQL_LOG_CONNECTIONS
value: "false"
- name: POSTGRESQL_LOG_DISCONNECTIONS
value: "false"
- name: POSTGRESQL_PGAUDIT_LOG_CATALOG
value: "off"
# Others
- name: POSTGRESQL_CLIENT_MIN_MESSAGES
value: "error"
- name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES
value: "pgaudit"
ports:
- name: tcp-postgresql
containerPort: 5432
livenessProbe:
failureThreshold: 6
initialDelaySeconds: 30
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/sh
- -c
- exec pg_isready -U "bn_airflow" -d "dbname=bitnami_airflow" -h 127.0.0.1 -p 5432
readinessProbe:
failureThreshold: 6
initialDelaySeconds: 5
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
exec:
command:
- /bin/sh
- -c
- -e
- |
exec pg_isready -U "bn_airflow" -d "dbname=bitnami_airflow" -h 127.0.0.1 -p 5432
[ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ]
resources:
limits: {}
requests:
cpu: 250m
memory: 256Mi
volumeMounts:
- name: dshm
mountPath: /dev/shm
- name: data
mountPath: /bitnami/postgresql
volumes:
- name: dshm
emptyDir:
medium: Memory
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- "ReadWriteOnce"
resources:
requests:
storage: "8Gi"
The idea is that after successful deployment I would be able to access webserver UI through the localhost:30303, but can't for some reason. It feels like there should be a minor change to fix it...
For now what I've tried is to connect to the webserver pod: kubectl exec -it <webserver pod name> -- /bin/bash and run two commands airflow db init and airflow web server -p 8080.

Related

Zombie kubernetes pod keeps being restarted by zombie Replica Set

I'm managing a kubernetes cluster and there is a duplicate pod that keeps coming back, but the duplicate ReplicaSet controlling it also keeps coming back after deletion. It's very strange. I also can't set the replica set to desire 0 pods, but that might be by design.
I can't really think of more information to share.
Anyone recognise this issue and know how to fix it?
Here's the ReplicaSet
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: cash-idemix-ca-6bc646cbcc
namespace: boc-cash-portal
uid: babc0236-2053-4088-b8e8-b8ae2ed9303c
resourceVersion: '25466073'
generation: 1
creationTimestamp: '2022-11-28T13:18:42Z'
labels:
app.kubernetes.io/instance: cash-idemix-ca
app.kubernetes.io/name: cash-idemix-ca
pod-template-hash: 6bc646cbcc
annotations:
deployment.kubernetes.io/desired-replicas: '1'
deployment.kubernetes.io/max-replicas: '2'
deployment.kubernetes.io/revision: '7'
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: cash-idemix-ca
uid: 2a3300ed-f666-4a30-98b7-7ab2ebcb2a0d
controller: true
blockOwnerDeletion: true
managedFields:
- manager: kube-controller-manager
operation: Update
apiVersion: apps/v1
time: '2022-11-28T13:18:42Z'
fieldsType: FieldsV1
- manager: kube-controller-manager
operation: Update
apiVersion: apps/v1
time: '2022-11-29T13:27:37Z'
fieldsType: FieldsV1
subresource: status
selfLink: >-
/apis/apps/v1/namespaces/boc-cash-portal/replicasets/cash-idemix-ca-6bc646cbcc
status:
replicas: 1
fullyLabeledReplicas: 1
observedGeneration: 1
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: cash-idemix-ca
app.kubernetes.io/name: cash-idemix-ca
pod-template-hash: 6bc646cbcc
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: cash-idemix-ca
app.kubernetes.io/name: cash-idemix-ca
pod-template-hash: 6bc646cbcc
annotations:
kubectl.kubernetes.io/restartedAt: '2022-07-19T11:45:27Z'
spec:
volumes:
- name: fabric-ca-server-home
persistentVolumeClaim:
claimName: cash-idemix-ca-fabric-ca-server-home
containers:
- name: cash-idemix-ca
image: ca.icr.io/samara-dev-container-images/cash-idemix-ca:0.3.0
command:
- sh
args:
- '-c'
- >-
sleep 1 && fabric-ca-server start -b
$(IDEMIX_ADMIN_USERNAME):$(IDEMIX_ADMIN_PASSWORD) --port 7054
--idemix.curve gurvy.Bn254 --loglevel debug
ports:
- name: api
containerPort: 7054
protocol: TCP
env:
- name: FABRIC_CA_SERVER_HOME
value: /idemix-config/fabric-ca-gurvy
- name: IDEMIX_ADMIN_USERNAME
valueFrom:
secretKeyRef:
name: cash-idemix-ca-admin-credentials
key: IDEMIX_ADMIN_USERNAME
- name: IDEMIX_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: cash-idemix-ca-admin-credentials
key: IDEMIX_ADMIN_PASSWORD
resources: {}
volumeMounts:
- name: fabric-ca-server-home
mountPath: /idemix-config/fabric-ca-gurvy
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
serviceAccountName: cash-idemix-ca
serviceAccount: cash-idemix-ca
securityContext: {}
imagePullSecrets:
- name: all-icr-io
schedulerName: default-scheduler
Edit: The Fool was correct, there is a deployment that recreates the the ReplicaSet. Though in the settings it seems to say that it only want to create 1 replica; so I still don't see why it wants to create two of them.
I'm using Lens to manage my cluster, and it shows that the desired number of replicas is indeed 2. I can set it to 1, but the change won't persist. Anything else where I could look?
cash-idemix-ca Deployment:
apiVersion: apps/v1
kind: Deployment
metadata:
name: cash-idemix-ca
namespace: boc-cash-portal
uid: 2a3300ed-f666-4a30-98b7-7ab2ebcb2a0d
resourceVersion: '25467341'
generation: 10
creationTimestamp: '2022-07-18T14:13:57Z'
labels:
app.kubernetes.io/instance: cash-idemix-ca
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/name: cash-idemix-ca
app.kubernetes.io/version: manual-0.2.1
helm.sh/chart: cash-idemix-ca-0.12.0
annotations:
deployment.kubernetes.io/revision: '7'
kubectl.kubernetes.io/last-applied-configuration: >
{"apiVersion":"apps/v1","kind":"Deployment","metadata":{"annotations":{},"labels":{"app.kubernetes.io/instance":"cash-idemix-ca","app.kubernetes.io/managed-by":"Helm","app.kubernetes.io/name":"cash-idemix-ca","app.kubernetes.io/version":"manual-0.2.1","helm.sh/chart":"cash-idemix-ca-0.12.0"},"name":"cash-idemix-ca","namespace":"boc-cash-portal"},"spec":{"replicas":1,"revisionHistoryLimit":0,"selector":{"matchLabels":{"app.kubernetes.io/instance":"cash-idemix-ca","app.kubernetes.io/name":"cash-idemix-ca"}},"template":{"metadata":{"labels":{"app.kubernetes.io/instance":"cash-idemix-ca","app.kubernetes.io/name":"cash-idemix-ca"}},"spec":{"containers":[{"args":["-c","sleep
1 \u0026\u0026 fabric-ca-server start -b
$(IDEMIX_ADMIN_USERNAME):$(IDEMIX_ADMIN_PASSWORD) --port 7054
--idemix.curve gurvy.Bn254 --loglevel
debug"],"command":["sh"],"env":[{"name":"FABRIC_CA_SERVER_HOME","value":"/idemix-config/fabric-ca-gurvy"},{"name":"IDEMIX_ADMIN_USERNAME","valueFrom":{"secretKeyRef":{"key":"IDEMIX_ADMIN_USERNAME","name":"cash-idemix-ca-admin-credentials"}}},{"name":"IDEMIX_ADMIN_PASSWORD","valueFrom":{"secretKeyRef":{"key":"IDEMIX_ADMIN_PASSWORD","name":"cash-idemix-ca-admin-credentials"}}}],"image":"ca.icr.io/samara-dev-container-images/cash-idemix-ca:0.3.0","imagePullPolicy":"IfNotPresent","name":"cash-idemix-ca","ports":[{"containerPort":7054,"name":"api","protocol":"TCP"}],"volumeMounts":[{"mountPath":"/idemix-config/fabric-ca-gurvy","name":"fabric-ca-server-home","readOnly":false}]}],"imagePullSecrets":[{"name":"all-icr-io"}],"serviceAccountName":"cash-idemix-ca","volumes":[{"name":"fabric-ca-server-home","persistentVolumeClaim":{"claimName":"cash-idemix-ca-fabric-ca-server-home"}}]}}}}
status:
observedGeneration: 10
replicas: 2
updatedReplicas: 1
readyReplicas: 1
availableReplicas: 1
unavailableReplicas: 1
conditions:
- type: Available
status: 'True'
lastUpdateTime: '2022-11-28T13:53:56Z'
lastTransitionTime: '2022-11-28T13:53:56Z'
reason: MinimumReplicasAvailable
message: Deployment has minimum availability.
- type: Progressing
status: 'False'
lastUpdateTime: '2022-11-29T13:37:38Z'
lastTransitionTime: '2022-11-29T13:37:38Z'
reason: ProgressDeadlineExceeded
message: ReplicaSet "cash-idemix-ca-6bc646cbcc" has timed out progressing.
spec:
replicas: 1
selector:
matchLabels:
app.kubernetes.io/instance: cash-idemix-ca
app.kubernetes.io/name: cash-idemix-ca
template:
metadata:
creationTimestamp: null
labels:
app.kubernetes.io/instance: cash-idemix-ca
app.kubernetes.io/name: cash-idemix-ca
annotations:
kubectl.kubernetes.io/restartedAt: '2022-07-19T11:45:27Z'
spec:
volumes:
- name: fabric-ca-server-home
persistentVolumeClaim:
claimName: cash-idemix-ca-fabric-ca-server-home
containers:
- name: cash-idemix-ca
image: ca.icr.io/samara-dev-container-images/cash-idemix-ca:0.3.0
command:
- sh
args:
- '-c'
- >-
sleep 1 && fabric-ca-server start -b
$(IDEMIX_ADMIN_USERNAME):$(IDEMIX_ADMIN_PASSWORD) --port 7054
--idemix.curve gurvy.Bn254 --loglevel debug
ports:
- name: api
containerPort: 7054
protocol: TCP
env:
- name: FABRIC_CA_SERVER_HOME
value: /idemix-config/fabric-ca-gurvy
- name: IDEMIX_ADMIN_USERNAME
valueFrom:
secretKeyRef:
name: cash-idemix-ca-admin-credentials
key: IDEMIX_ADMIN_USERNAME
- name: IDEMIX_ADMIN_PASSWORD
valueFrom:
secretKeyRef:
name: cash-idemix-ca-admin-credentials
key: IDEMIX_ADMIN_PASSWORD
resources: {}
volumeMounts:
- name: fabric-ca-server-home
mountPath: /idemix-config/fabric-ca-gurvy
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
imagePullPolicy: IfNotPresent
restartPolicy: Always
terminationGracePeriodSeconds: 30
dnsPolicy: ClusterFirst
serviceAccountName: cash-idemix-ca
serviceAccount: cash-idemix-ca
securityContext: {}
imagePullSecrets:
- name: all-icr-io
schedulerName: default-scheduler
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 25%
maxSurge: 25%
revisionHistoryLimit: 0
progressDeadlineSeconds: 600

logstash with loki, grafana not picking all the kubernetes pod logs

I have setup running some log generator with loki and logstash. Grafana is able to identify the datasource and labels are picking, but the log generator logs are coming under grafana labels. What iam doing wrong here.
---
# Source: logstash/templates/poddisruptionbudget.yaml
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: "logstash-logstash-pdb"
labels:
app: "logstash-logstash"
chart: "logstash"
heritage: "Helm"
release: "logstash"
spec:
maxUnavailable: 1
selector:
matchLabels:
app: "logstash-logstash"
---
# Source: logstash/templates/configmap-pipeline.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-logstash-pipeline
labels:
app: "logstash-logstash"
chart: "logstash"
heritage: "Helm"
release: "logstash"
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
logstash.conf: |
input {
file {
path => ["/var/log/*.log"]
start_position => "beginning"
ignore_older => 0
sincedb_path => "/dev/null"
}
}
filter {
if [kubernetes] {
mutate {
add_field => {
"container_name" => "%{[kubernetes][container][name]}"
"namespace" => "%{[kubernetes][namespace]}"
"pod" => "%{[kubernetes][pod][name]}"
}
replace => { "host" => "%{[kubernetes][node][name]}"}
}
}
mutate {
remove_field => ["tags"]
}
}
output {
stdout { codec => rubydebug}
loki {
url => "http://loki-loki-distributed-distributor.loki-benchmark.svc.cluster.local:3100/loki/api/v1/push"
}
}
---
# Source: logstash/templates/service-headless.yaml
kind: Service
apiVersion: v1
metadata:
name: "logstash-logstash-headless"
labels:
app: "logstash-logstash"
chart: "logstash"
heritage: "Helm"
release: "logstash"
spec:
clusterIP: None
selector:
app: "logstash-logstash"
ports:
- name: http
port: 9600
---
# Source: logstash/templates/statefulset.yaml
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: logstash-logstash
labels:
app: "logstash-logstash"
chart: "logstash"
heritage: "Helm"
release: "logstash"
spec:
serviceName: logstash-logstash-headless
selector:
matchLabels:
app: "logstash-logstash"
release: "logstash"
replicas: 1
podManagementPolicy: Parallel
updateStrategy:
type: RollingUpdate
template:
metadata:
name: "logstash-logstash"
labels:
app: "logstash-logstash"
chart: "logstash"
heritage: "Helm"
release: "logstash"
annotations:
pipelinechecksum: e5576a55d691ae22c1da1204f1e548e8aa936dc6415af52eb65699f5a155bb8
spec:
securityContext:
fsGroup: 1000
runAsUser: 1000
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "logstash-logstash"
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 120
volumes:
- name: logstashpipeline
configMap:
name: logstash-logstash-pipeline
containers:
- name: "logstash"
securityContext:
capabilities:
drop:
- ALL
runAsNonRoot: true
runAsUser: 1000
image: "grafana/logstash-output-loki:1.0.1"
imagePullPolicy: "IfNotPresent"
livenessProbe:
failureThreshold: 3
httpGet:
path: /
port: http
initialDelaySeconds: 300
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 5
readinessProbe:
failureThreshold: 3
httpGet:
path: /
port: http
initialDelaySeconds: 60
periodSeconds: 10
successThreshold: 3
timeoutSeconds: 5
ports:
- name: http
containerPort: 9600
resources:
limits:
cpu: 1000m
memory: 1536Mi
requests:
cpu: 100m
memory: 1536Mi
env:
- name: LS_JAVA_OPTS
value: "-Xmx1g -Xms1g"
- name: XPACK_MONITORING_ENABLED
value: "false"
volumeMounts:
- name: logstashpipeline
mountPath: /usr/share/logstash/pipeline/logstash.conf
subPath: logstash.conf
You can try adding these include fields in logtash configuration which should help you reslove the issue.
output {
stdout { codec => rubydebug}
loki {
url => "http://loki-loki-distributed-distributor.loki-benchmark.svc.cluster.local:3100/loki/api/v1/push"
include_fields => ["container_name","namespace","pod","host"]
}
}

Failed to connect mongo-express to mongoDb in k8s

I configured mongodb with user name and password, and deployed mongoDb and mongoDb express.
The problem is that I'm getting the following error in mongo-express logs:
Could not connect to database using connectionString: mongodb://username:password#mongodb://lc-mongodb-service:27017:27017/"
I can see that the connection string contains 27017 port twice, and also "mongodb://" in the middle that should not be there.
This is my mongo-express deployment:
apiVersion: apps/v1
kind: Deployment
metadata:
name: lc-mongo-express
labels:
app: lc-mongo-express
spec:
replicas: 1
selector:
matchLabels:
app: lc-mongo-express
template:
metadata:
labels:
app: lc-mongo-express
spec:
containers:
- name: lc-mongo-express
image: mongo-express
ports:
- containerPort: 8081
env:
- name: ME_CONFIG_MONGODB_SERVER
valueFrom:
configMapKeyRef:
name: lc-configmap
key: DATABASE_URL
- name: ME_CONFIG_MONGODB_ADMINUSERNAME
valueFrom:
secretKeyRef:
name: lc-secret
key: MONGO_ROOT_USERNAME
- name: ME_CONFIG_MONGODB_ADMINPASSWORD
valueFrom:
secretKeyRef:
name: lc-secret
key: MONGO_ROOT_PASSWORD
---
apiVersion: v1
kind: Service
metadata:
name: lc-mongo-express-service
spec:
selector:
app: lc-mongo-express
type: LoadBalancer
ports:
- protocol: TCP
port: 8081
targetPort: 8081
And my mongoDb deployment:
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: lc-mongodb-pvc
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
volumeMode: Filesystem
storageClassName: gp2
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: lc-mongodb
labels:
app: lc-mongodb
spec:
replicas: 1
serviceName: lc-mongodb-service
selector:
matchLabels:
app: lc-mongodb
template:
metadata:
labels:
app: lc-mongodb
spec:
volumes:
- name: lc-mongodb-storage
persistentVolumeClaim:
claimName: lc-mongodb-pvc
containers:
- name: lc-mongodb
image: "mongo"
ports:
- containerPort: 27017
env:
- name: MONGO_INITDB_ROOT_USERNAME
valueFrom:
secretKeyRef:
name: lc-secret
key: MONGO_ROOT_USERNAME
- name: MONGO_INITDB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: lc-secret
key: MONGO_ROOT_PASSWORD
command:
- mongod
- --auth
volumeMounts:
- mountPath: '/data/db'
name: lc-mongodb-storage
---
apiVersion: v1
kind: Service
metadata:
name: lc-mongodb-service
labels:
name: lc-mongodb
spec:
selector:
app: lc-mongodb
ports:
- protocol: TCP
port: 27017
targetPort: 27017
What am I doing wrong?
Your connection string format is wrong
You should be trying out something like
mongodb://[username:password#]host1[:port1][,...hostN[:portN]][/[defaultauthdb][?options]]
Now suppose if you are using the Node js
const MongoClient = require('mongodb').MongoClient;
const uri = "mongodb+srv://<username>:<password>#<Mongo service Name>/<Database name>?retryWrites=true&w=majority";
const client = new MongoClient(uri, { useNewUrlParser: true });
client.connect(err => {
// creating collection
const collection = client.db("test").collection("devices");
// perform actions on the collection object
client.close();
});
also you missing the Db path args: ["--dbpath","/data/db"] in command while using the PVC and configuring the path
apiVersion: apps/v1
kind: Deployment
metadata:
creationTimestamp: null
labels:
app: mongo
name: mongo
spec:
replicas: 1
selector:
matchLabels:
app: mongo
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
app: mongo
spec:
containers:
- image: mongo
name: mongo
args: ["--dbpath","/data/db"]
livenessProbe:
exec:
command:
- mongo
- --disableImplicitSessions
- --eval
- "db.adminCommand('ping')"
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
readinessProbe:
exec:
command:
- mongo
- --disableImplicitSessions
- --eval
- "db.adminCommand('ping')"
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 6
env:
- name: MONGO_INITDB_ROOT_USERNAME
valueFrom:
secretKeyRef:
name: mongo-creds
key: username
- name: MONGO_INITDB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongo-creds
key: password
volumeMounts:
- name: "mongo-data-dir"
mountPath: "/data/db"
volumes:
- name: "mongo-data-dir"
persistentVolumeClaim:
claimName: "pvc"

consul StatefulSet failing

I am trying to deploy consul using kubernetes StatefulSet with following manifest
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: dev-ethernet
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
namespace: dev-ethernet
labels:
app: consul
---
apiVersion: v1
kind: Secret
metadata:
name: consul-secret
namespace: dev-ethernet
data:
consul-gossip-encryption-key: "aIRpNkHT/8Tkvf757sj2m5AcRlorWNgzcLI4yLEMx7M="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-config
namespace: dev-ethernet
data:
server.json: |
{
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"disable_host_node_id": true,
"data_dir": "/consul/data",
"log_level": "INFO",
"datacenter": "us-west-2",
"domain": "cluster.local",
"ports": {
"http": 8500
},
"retry_join": [
"provider=k8s label_selector=\"app=consul,component=server\""
],
"server": true,
"telemetry": {
"prometheus_retention_time": "5m"
},
"ui": true
}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
namespace: dev-ethernet
spec:
selector:
matchLabels:
app: consul
component: server
serviceName: consul
podManagementPolicy: Parallel
replicas: 3
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
template:
metadata:
labels:
app: consul
component: server
annotations:
consul.hashicorp.com/connect-inject: "false"
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
securityContext:
fsGroup: 1000
containers:
- name: consul
image: "consul:1.8"
args:
- "agent"
- "-advertise=$(POD_IP)"
- "-bootstrap-expect=3"
- "-config-file=/etc/consul/config/server.json"
- "-encrypt=$(GOSSIP_ENCRYPTION_KEY)"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: GOSSIP_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: consul-secret
key: consul-gossip-encryption-key
volumeMounts:
- name: data
mountPath: /consul/data
- name: config
mountPath: /etc/consul/config
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave
ports:
- containerPort: 8500
name: ui-port
- containerPort: 8400
name: alt-port
- containerPort: 53
name: udp-port
- containerPort: 8080
name: http-port
- containerPort: 8301
name: serflan
- containerPort: 8302
name: serfwan
- containerPort: 8600
name: consuldns
- containerPort: 8300
name: server
volumes:
- name: config
configMap:
name: consul-config
volumeClaimTemplates:
- metadata:
name: data
labels:
app: consul
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: aws-gp2
resources:
requests:
storage: 3Gi
But gets ==> encrypt has invalid key: illegal base64 data at input byte 1 when container starts.
I have generated consul-gossip-encryption-key locally using docker run -i -t consul keygen
Anyone knows whats wrong here ?
secret.data must be base64 string.
try
kubectl create secret generic consul-gossip-encryption-key --from-literal=key="$(docker run -i -t consul keygen)" --dry-run -o=yaml
and replace
apiVersion: v1
kind: Secret
metadata:
name: consul-secret
namespace: dev-ethernet
data:
consul-gossip-encryption-key: "aIRpNkHT/8Tkvf757sj2m5AcRlorWNgzcLI4yLEMx7M="
ref: https://www.consul.io/docs/k8s/helm#v-global-gossipencryption

Flowable not creating the directory in AKS volume for uploaded file

I created the Yaml (Deploy, PV, PVC, and Service) files for flowable to run on AKS. It's running and I can see the flowable browser UI. The problem is that when I start a process and the process has a form to upload the file, here I am getting an error
/data/uncategorized/a6506912-816c-11ea-8c98-e20c3b5b12e4 (No such file or directory)
Here are my YAML files
Deployment:
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
creationTimestamp: null
generation: 1
labels:
k8s-app: flowable-app
name: flowable-app
selfLink: /apis/extensions/v1beta1/namespaces/ingress-basic/deployments/flowable-app
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: flowable-app
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
k8s-app: flowable-app
name: flowable-app
spec:
containers:
- env:
- name: FLOWABLE_CONTENT_STORAGE_ROOT-FOLDER
value: /data
- name: SPRING_DATASOURCE_DRIVER-CLASS-NAME
value: org.postgresql.Driver
- name: SPRING_DATASOURCE_URL
value: jdbc:postgresql://0.0.0.0:5432/flowable
- name: SPRING_DATASOURCE_USERNAME
value: xxxxx
- name: SPRING_DATASOURCE_PASSWORD
value: xxxxx
- name: FLOWABLE_COMMON_APP_IDM-ADMIN_USER
value: admin
- name: FLOWABLE_COMMON_APP_IDM-ADMIN_PASSWORD
value: test
- name: FLOWABLE_COMMON_APP_IDM-REDIRECT-URL
value: http://1.1.1.1:8080/flowable-idm
- name: FLOWABLE_COMMON_APP_REDIRECT_ON_AUTH_SUCCESS
value: http://1.1.1.1:8080/flowable-task/
volumeMounts:
- mountPath: /data
name: flowable-data
image: xxxxx
imagePullPolicy: Always
name: flowable-app
resources: {}
securityContext:
privileged: false
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumes:
- name: flowable-data
persistentVolumeClaim:
claimName: flowable-volume-claim
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: regcred
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
status: {}
PersistentVolume and PersistentVolumeClaim:
kind: PersistentVolume
apiVersion: v1
metadata:
name: flowable-volume
labels:
type: local
app: flowable-app
spec:
storageClassName: managed-premium
capacity:
storage: 5Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/mnt/data/flowable/data"
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: flowable-volume-claim
spec:
accessModes:
- ReadWriteOnce
storageClassName: managed-premium
resources:
requests:
storage: 5Gi
Service:
apiVersion: v1
kind: Service
metadata:
creationTimestamp: null
labels:
k8s-app: flowable-app
name: flowable-app
selfLink: /api/v1/namespaces/ingress-basic/services/flowable-app
spec:
externalTrafficPolicy: Cluster
ports:
- name: tcp-4000-4000-bj5xg
nodePort: 31789
port: 8080
protocol: TCP
targetPort: 8080
selector:
k8s-app: flowable-app
sessionAffinity: None
type: LoadBalancer
status:
loadBalancer: {}