error_class=Fluent::ConfigError error="Invalid Kubernetes API v1 endpoint https://10.96.0.1:443/api: Timed out connecting to server" - kubernetes-helm

Few of our GPU servers not able to send logs to splunk. I am getting below error . Can someone please help me here.
config error file="/fluentd/etc/fluent.conf" error_class=Fluent::ConfigError error="Invalid Kubernetes API v1 endpoint https://10.96.0.1:443/api: Timed out connecting to server"
Kubernetes version (use kubectl version):1.17
Ruby version (use ruby --version):
OS (e.g: cat /etc/os-release):
Splunk version:
Splunk Connect for Kubernetes helm chart version: 1.4.10
Here is the values.yaml file
COMPUTED VALUES:
USER-SUPPLIED VALUES: null
global:
kubernetes:
clusterName: cluster_name
logLevel: info
metrics:
service:
enabled: true
headless: true
monitoring_agent_enabled: null
monitoring_agent_index_name: null
prometheus_enabled: null
serviceMonitor:
additionalLabels: {}
enabled: false
interval: ""
metricsPort: 24231
scrapeTimeout: 10s
splunk:
hec:
host: splunk-hec.oi.tivo.com
indexRouting: null
insecureSSL: true
port: 8088
protocol: https
token: 779EE032-1473-40F8-AA19-********
splunk-kubernetes-logging:
affinity: {}
buffer:
'#type': memory
chunk_limit_records: 100000
chunk_limit_size: 20m
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 5
retry_type: periodic
total_limit_size: 600m
charEncodingUtf8: false
containers:
logFormatType: json
path: /var/log
pathDest: /var/lib/docker/containers
removeBlankEvents: true
customFilters: {}
enabled: true
extraVolumeMounts: []
extraVolumes: []
fluentd:
path: /var/log/containers/*.log
global:
kubernetes:
clusterName: cluster_name
logLevel: info
metrics:
service:
enabled: true
headless: true
serviceMonitor:
additionalLabels: {}
enabled: false
interval: ""
metricsPort: 24231
scrapeTimeout: 10s
splunk:
hec:
caFile: null
clientCert: null
clientKey: null
host: splunk-hec.oi.tivo.com
indexName: null
insecureSSL: true
port: 8088
protocol: https
token: 779EE032-1473-40F8-AA19-*******
image:
name: splunk/fluentd-hec
pullPolicy: IfNotPresent
registry: docker.io
tag: 1.2.8
usePullSecret: false
indexFields: []
journalLogPath: /var/log/journal
k8sMetadata:
cache_ttl: 3600
podLabels:
- app
- k8s-app
- release
watch: true
kubernetes:
clusterName: ***-m**-lv
securityContext: false
logs:
dns-controller:
from:
pod: dns-controller
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:dns-controller
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
dns-sidecar:
from:
container: sidecar
pod: kube-dns
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kubedns-sidecar
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
dnsmasq:
from:
pod: kube-dns
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:dnsmasq
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
docker:
from:
journald:
unit: docker.service
sourcetype: kube:docker
timestampExtraction:
format: '%Y-%m-%dT%H:%M:%S.%NZ'
regexp: time="(?<time>\d{4}-\d{2}-\d{2}T[0-2]\d:[0-5]\d:[0-5]\d.\d{9}Z)"
etcd:
from:
container: etcd-container
pod: etcd-server
timestampExtraction:
format: '%Y-%m-%d %H:%M:%S.%N'
regexp: (?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
etcd-events:
from:
container: etcd-container
pod: etcd-server-events
timestampExtraction:
format: '%Y-%m-%d %H:%M:%S.%N'
regexp: (?<time>\d{4}-[0-1]\d-[0-3]\d [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
etcd-minikube:
from:
container: etcd
pod: etcd-minikube
timestampExtraction:
format: '%Y-%m-%d %H:%M:%S.%N'
regexp: (?<time>\d{4}-\d{2}-\d{2} [0-2]\d:[0-5]\d:[0-5]\d\.\d{6})
kube-apiserver:
from:
pod: kube-apiserver
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kube-apiserver
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
kube-audit:
from:
file:
path: /var/log/kube-apiserver-audit.log
sourcetype: kube:apiserver-audit
timestampExtraction:
format: '%Y-%m-%dT%H:%M:%SZ'
kube-controller-manager:
from:
pod: kube-controller-manager
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kube-controller-manager
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
kube-dns-autoscaler:
from:
container: autoscaler
pod: kube-dns-autoscaler
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kube-dns-autoscaler
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
kube-proxy:
from:
pod: kube-proxy
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kube-proxy
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
kube-scheduler:
from:
pod: kube-scheduler
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kube-scheduler
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
kubedns:
from:
pod: kube-dns
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kubedns
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
kubelet:
from:
journald:
unit: kubelet.service
multiline:
firstline: /^\w[0-1]\d[0-3]\d/
sourcetype: kube:kubelet
timestampExtraction:
format: '%m%d %H:%M:%S.%N'
regexp: \w(?<time>[0-1]\d[0-3]\d [^\s]*)
nodeSelector:
beta.kubernetes.io/os: linux
podSecurityPolicy:
apiGroup: policy
apparmor_security: true
create: false
rbac:
create: true
openshiftPrivilegedSccBinding: false
resources:
requests:
cpu: 100m
memory: 200Mi
secret:
create: true
sendAllMetadata: false
serviceAccount:
create: true
sourcetypePrefix: kube
splunk:
hec:
indexName: ml_logs
indexRouting: false
indexRoutingDefaultIndex: default
ingest_api: {}
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
splunk-kubernetes-metrics:
affinity: {}
aggregatorBuffer:
'#type': memory
chunk_limit_records: 10000
chunk_limit_size: 10m
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 5
retry_type: periodic
total_limit_size: 400m
aggregatorTolerations: {}
buffer:
'#type': memory
chunk_limit_records: 10000
chunk_limit_size: 10m
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 5
retry_type: periodic
total_limit_size: 400m
customFilters: {}
enabled: true
global:
kubernetes:
clusterName: cluster_name
logLevel: info
metrics:
service:
enabled: true
headless: true
serviceMonitor:
additionalLabels: {}
enabled: false
interval: ""
metricsPort: 24231
scrapeTimeout: 10s
splunk:
hec:
caFile: null
clientCert: null
clientKey: null
host: splunk-hec.oi.tivo.com
indexName: null
insecureSSL: true
port: 8088
protocol: https
token: 779EE032-1473-40F8-AA19-*******
image:
name: splunk/k8s-metrics
pullPolicy: IfNotPresent
registry: docker.io
tag: 1.1.7
usePullSecret: false
imageAgg:
name: splunk/k8s-metrics-aggr
pullPolicy: IfNotPresent
registry: docker.io
tag: 1.1.7
usePullSecret: false
kubernetes:
clusterName: xperi-ml-lv
insecureSSL: true
kubeletAddress: '"#{ENV[''KUBERNETES_NODE_IP'']}"'
kubeletPort: 10250
useRestClientSSL: true
metricsInterval: 60s
nodeSelector:
beta.kubernetes.io/os: linux
podSecurityPolicy:
apiGroup: policy
apparmor_security: true
create: false
rbac:
create: true
resources:
fluent:
limits:
cpu: 200m
memory: 300Mi
requests:
cpu: 200m
memory: 300Mi
secret:
create: true
serviceAccount:
create: true
name: splunk-kubernetes-metrics
usePullSecrets: false
splunk:
hec:
indexName: em_metrics
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
splunk-kubernetes-objects:
affinity: {}
buffer:
'#type': memory
chunk_limit_records: 10000
chunk_limit_size: 20m
flush_interval: 5s
flush_thread_count: 1
overflow_action: block
retry_max_times: 5
retry_type: periodic
total_limit_size: 600m
checkpointFile:
name: kubernetes-objects.pos
customFilters: {}
enabled: true
global:
kubernetes:
clusterName: cluster_name
logLevel: info
metrics:
service:
enabled: true
headless: true
serviceMonitor:
additionalLabels: {}
enabled: false
interval: ""
metricsPort: 24231
scrapeTimeout: 10s
splunk:
hec:
caFile: null
clientCert: null
clientKey: null
host: splunk-hec.oi.tivo.com
indexName: null
insecureSSL: true
port: 8088
protocol: https
token: 779EE032-1473-40F8-AA19-****
image:
name: splunk/kube-objects
pullPolicy: IfNotPresent
registry: docker.io
tag: 1.1.8
usePullSecret: false
indexFields: []
kubernetes:
clusterName: xperi-ml
insecureSSL: true
nodeSelector:
beta.kubernetes.io/os: linux
objects:
apps:
v1:
- interval: 60s
name: daemon_sets
core:
v1:
- interval: 60s
name: pods
- interval: 60s
name: nodes
- interval: 60s
name: namespaces
- interval: 60s
name: persistent_volumes
- interval: 60s
name: persistent_volume_claims
- mode: watch
name: events
- interval: 60s
name: services
podSecurityPolicy:
apiGroup: policy
apparmor_security: true
create: false
rbac:
create: true
resources:
requests:
cpu: 100m
memory: 200Mi
secret:
create: true
serviceAccount:
create: true
name: splunk-kubernetes-objects
usePullSecrets: false
splunk:
hec:
indexName: em_meta
tolerations: []
Deomonset yaml file
[kavsingh#sjc2-nixutil01 ~]$ kubectl get ds -n splunk-sck lv-splunk-connect-splunk-kubernetes-logging -o yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
annotations:
deprecated.daemonset.template.generation: "1"
meta.helm.sh/release-name: lv-splunk-connect
meta.helm.sh/release-namespace: splunk-sck
creationTimestamp: "2021-12-12T20:51:14Z"
generation: 1
labels:
app: splunk-kubernetes-logging
app.kubernetes.io/managed-by: Helm
chart: splunk-kubernetes-logging-1.4.10
engine: fluentd
heritage: Helm
release: lv-splunk-connect
name: lv-splunk-connect-splunk-kubernetes-logging
namespace: splunk-sck
resourceVersion: "361624389"
selfLink: /apis/apps/v1/namespaces/splunk-sck/daemonsets/lv-splunk-connect-splunk-kubernetes-logging
uid: b8ddba03-f084-4162-a380-32b2e112dcb1
spec:
revisionHistoryLimit: 10
selector:
matchLabels:
app: splunk-kubernetes-logging
release: lv-splunk-connect
template:
metadata:
annotations:
checksum/config: ea7843ca58d9389c5480c7d8c53c6669e8c93c96c792c5b62c7a51264b9ff6ea
creationTimestamp: null
labels:
app: splunk-kubernetes-logging
release: lv-splunk-connect
spec:
containers:
- env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: MY_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: MY_POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: SPLUNK_HEC_TOKEN
valueFrom:
secretKeyRef:
key: splunk_hec_token
name: splunk-kubernetes-logging
image: docker.io/splunk/fluentd-hec:1.2.8
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3
httpGet:
path: /api/plugins.json
port: 24220
scheme: HTTP
initialDelaySeconds: 60
periodSeconds: 60
successThreshold: 1
timeoutSeconds: 1
name: splunk-fluentd-k8s-logs
resources:
requests:
cpu: 100m
memory: 200Mi
securityContext:
privileged: false
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /var/log
name: varlog
- mountPath: /var/lib/docker/containers
name: varlogdest
readOnly: true
- mountPath: /var/log/journal
name: journallogpath
readOnly: true
- mountPath: /fluentd/etc
name: conf-configmap
- mountPath: /fluentd/etc/splunk
name: secrets
readOnly: true
dnsPolicy: ClusterFirst
nodeSelector:
beta.kubernetes.io/os: linux
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: lv-splunk-connect-splunk-kubernetes-logging
serviceAccountName: lv-splunk-connect-splunk-kubernetes-logging
terminationGracePeriodSeconds: 30
tolerations:
- effect: NoSchedule
key: node-role.kubernetes.io/master
volumes:
- hostPath:
path: /var/log
type: ""
name: varlog
- hostPath:
path: /var/lib/docker/containers
type: ""
name: varlogdest
- hostPath:
path: /var/log/journal
type: ""
name: journallogpath
- configMap:
defaultMode: 420
name: lv-splunk-connect-splunk-kubernetes-logging
name: conf-configmap
- name: secrets
secret:
defaultMode: 420
secretName: splunk-kubernetes-logging
updateStrategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
status:
currentNumberScheduled: 62
desiredNumberScheduled: 62
numberAvailable: 57
numberMisscheduled: 2
numberReady: 57
numberUnavailable: 5
observedGeneration: 1
updatedNumberScheduled: 62
...
I want all pods of splunk should be running.

Related

MongoDB Prometheus exporter not scrapping all metrics

I have a Mongo deployment with a metrics exporter sidecar. Now when I load the associated dashboard in Grafana the metrics are not showing - it appears the exporter is not scrapping all metrics :
What is working
Only the Mongo UP metric is working i. e mongodb_up{env=~""} and part of Server Metrics
What is not working
All of the following metrics in the dashboard are showing no data : opscounters, Replication Set Metrics, Cursor Metrics
My configuration:
Deployment.yaml (using Percona MongoDB Exporter)
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb-prom
namespace: "labs"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.12
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
replicas: 1
strategy:
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/component: mongodb
template:
metadata:
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.12
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
serviceAccountName: mongodb-prom
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/component: mongodb
namespaces:
- "labs"
topologyKey: kubernetes.io/hostname
weight: 100
securityContext:
fsGroup: 1001
sysctls: []
containers:
- name: mongodb
image: docker.io/bitnami/mongodb:5.0.9-debian-10-r15
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MONGODB_ROOT_USER
value: "root"
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-prom
key: mongodb-root-password
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: "0"
- name: MONGODB_DISABLE_SYSTEM_LOG
value: "no"
- name: MONGODB_DISABLE_JAVASCRIPT
value: "no"
- name: MONGODB_ENABLE_JOURNAL
value: "yes"
- name: MONGODB_PORT_NUMBER
value: "27017"
- name: MONGODB_ENABLE_IPV6
value: "no"
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: "no"
ports:
- name: mongodb
containerPort: 27017
volumeMounts:
- name: datadir
mountPath: /bitnami/mongodb
- name: datadir
mountPath: /tmp
- name: metrics
image: percona/mongodb_exporter:0.35
imagePullPolicy: "IfNotPresent"
args:
- "--mongodb.direct-connect=false"
- "--mongodb.uri=mongodb://username:password#mongodb-prom/admin"
ports:
- name: metrics
containerPort: 9216
resources:
requests:
memory: 128Mi
cpu: 250m
volumes:
- name: datadir
persistentVolumeClaim:
claimName: mongodb
metrics-svc.yaml
apiVersion: v1
kind: Service
metadata:
name: mongodb-metrics
namespace: "labs"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.12
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: metrics
annotations:
prometheus.io/path: /metrics
prometheus.io/port: '9216'
prometheus.io/scrape: "true"
spec:
type: ClusterIP
ports:
- port: 9216
targetPort: metrics
protocol: TCP
name: http-metrics
selector:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/component: mongodb
service.yaml
apiVersion: v1
kind: Service
metadata:
name: mongodb-prom
namespace: "labs"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.12
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
type: ClusterIP
sessionAffinity: None
ports:
- name: "mongodb"
port: 27017
targetPort: mongodb
selector:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/component: mongodb
What I have tried
I have tried using the Bitnami MongoDB Exporter version for the sidecar. It yields the exact same results :
- name: metrics
image: docker.io/bitnami/mongodb-exporter:0.32.0-debian-11-r5
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /bin/bash
- -ec
args:
- |
/bin/mongodb_exporter --web.listen-address ":9216" --mongodb.uri "mongodb://$MONGODB_ROOT_USER:$(echo $MONGODB_ROOT_PASSWORD | sed -r "s/#/%40/g;s/:/%3A/g")#localhost:27017/admin?"
env:
- name: MONGODB_ROOT_USER
value: "root"
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-prom
key: mongodb-root-password
ports:
- name: metrics
containerPort: 9216
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
httpGet:
path: /
port: metrics
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
httpGet:
path: /
port: metrics
resources:
limits: {}
requests: {}
```
I am following the reference implementation given here and I have added the clusterMonitor role to the db user as below:
db.grantRolesToUser("root",[{ role: "clusterMonitor", db: "admin" }, { role: "read", db: "local" }])
and when I execute db.getUsers(); I get
[
{
"_id" : "admin.root",
"userId" : UUID("d8e181fc-6429-447e-bbcb-cec252f0792f"),
"user" : "root",
"db" : "admin",
"roles" : [
{
"role" : "clusterMonitor",
"db" : "admin"
},
{
"role" : "root",
"db" : "admin"
},
{
"role" : "read",
"db" : "local"
}
],
"mechanisms" : [
"SCRAM-SHA-1",
"SCRAM-SHA-256"
]
}
]
Even after granting these roles the dashboard is still not loading the missing metrics.
I have updated the Prometheus and Grafana versions in the dashboard's JSON to match my installed versions (Could this affect anything?)
The default dashboard I am using is here.
What am I missing ?
For anyone struggling with this I had to add the --collect-all and --compatibility-mode flags for the container to pull the metrics as in the configuration below:
apiVersion: apps/v1
kind: Deployment
metadata:
name: mongodb-prom
namespace: "labs"
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.12
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
replicas: 1
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
selector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/component: mongodb
template:
metadata:
labels:
app.kubernetes.io/name: mongodb
helm.sh/chart: mongodb-12.1.12
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/managed-by: Helm
app.kubernetes.io/component: mongodb
spec:
serviceAccountName: mongodb-prom
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- podAffinityTerm:
labelSelector:
matchLabels:
app.kubernetes.io/name: mongodb
app.kubernetes.io/instance: mongodb-prom
app.kubernetes.io/component: mongodb
namespaces:
- "labs"
topologyKey: kubernetes.io/hostname
weight: 100
securityContext:
fsGroup: 1001
sysctls: []
containers:
- name: mongodb
image: docker.io/bitnami/mongodb:5.0.9-debian-10-r15
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
env:
- name: BITNAMI_DEBUG
value: "false"
- name: MONGODB_ROOT_USER
value: "root"
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-prom
key: mongodb-root-password
- name: ALLOW_EMPTY_PASSWORD
value: "no"
- name: MONGODB_SYSTEM_LOG_VERBOSITY
value: "0"
- name: MONGODB_DISABLE_SYSTEM_LOG
value: "no"
- name: MONGODB_DISABLE_JAVASCRIPT
value: "no"
- name: MONGODB_ENABLE_JOURNAL
value: "yes"
- name: MONGODB_PORT_NUMBER
value: "27017"
- name: MONGODB_ENABLE_IPV6
value: "no"
- name: MONGODB_ENABLE_DIRECTORY_PER_DB
value: "no"
ports:
- name: mongodb
containerPort: 27017
volumeMounts:
- name: datadir
mountPath: /bitnami/mongodb
- name: datadir
mountPath: /tmp
- name: metrics
image: docker.io/bitnami/mongodb-exporter:0.32.0-debian-11-r5
imagePullPolicy: "IfNotPresent"
securityContext:
runAsNonRoot: true
runAsUser: 1001
command:
- /bin/bash
- -ec
args:
- |
/bin/mongodb_exporter --web.listen-address ":9216" --mongodb.uri "mongodb://$MONGODB_ROOT_USER:$(echo $MONGODB_ROOT_PASSWORD | sed -r "s/#/%40/g;s/:/%3A/g")#localhost:27017/admin?" --collect-all --compatible-mode
env:
- name: MONGODB_ROOT_USER
value: "root"
- name: MONGODB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-prom
key: mongodb-root-password
ports:
- name: metrics
containerPort: 9216
livenessProbe:
failureThreshold: 3
initialDelaySeconds: 15
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 5
httpGet:
path: /
port: metrics
readinessProbe:
failureThreshold: 3
initialDelaySeconds: 5
periodSeconds: 5
successThreshold: 1
timeoutSeconds: 1
httpGet:
path: /
port: metrics
resources:
limits: {}
requests: {}
volumes:
- name: datadir
persistentVolumeClaim:
claimName: mongodb
The solution is from the following thread:
GithubIssue

telepresence: error: workload "xxx-service.default" not found

I have this chart of a personal project deployed in minikube:
---
# Source: frontend/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: xxx-app-service
spec:
selector:
app: xxx-app
ports:
- protocol: TCP
port: 3000
targetPort: 3000
---
# Source: frontend/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '3'
creationTimestamp: '2022-06-19T21:57:01Z'
generation: 3
labels:
app: xxx-app
name: xxx-app
namespace: default
resourceVersion: '43299'
uid: 7c43767a-abbd-4806-a9d2-6712847a0aad
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: xxx-app
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: xxx-app
spec:
containers:
- image: "registry.gitlab.com/esavara/xxx/wm:staging"
name: frontend
imagePullPolicy: Always
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
ports:
- containerPort: 3000
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 10
periodSeconds: 3
env:
- name: PORT
value: "3000"
resources: {}
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: regcred
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
# Source: frontend/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
creationTimestamp: '2022-06-19T22:28:58Z'
generation: 1
name: xxx-app
namespace: default
resourceVersion: '44613'
uid: b58dcd17-ee1f-42e5-9dc7-d915a21f97b5
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: "xxx-app-service"
port:
number: 3000
path: /
pathType: Prefix
status:
loadBalancer:
ingress:
- ip: 192.168.39.80
---
# Source: frontend/templates/gitlab-registry-sealed.json
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "regcred",
"namespace": "default",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "regcred",
"namespace": "default",
"creationTimestamp": null
},
"type": "kubernetes.io/dockerconfigjson",
"data": null
},
"encryptedData": {
".dockerconfigjson": "AgBpHoQw1gBq0IFFYWnxlBLLYl1JC23TzbRWGgLryVzEDP8p+NAGjngLFZmtklmCEHLK63D9pp3zL7YQQYgYBZUjpEjj8YCTOhvnjQIg7g+5b/CPXWNoI5TuNexNJFKFv1mN5DzDk9np/E69ogRkDJUvUsbxvVXs6/TKGnRbnp2NuI7dTJ18QgGXdLXs7S416KE0Yid9lggw06JrDN/OSxaNyUlqFGcRJ6OfGDAHivZRV1Kw9uoX06go3o+AjVd6eKlDmmvaY/BOc52bfm7pY2ny1fsXGouQ7R7V1LK0LcsCsKdAkg6/2DU3v32mWZDKJgKkK5efhTQr1KGOBoLuuHKX6nF5oMA1e1Ii3wWe77lvWuvlpaNecCBTc7im+sGt0dyJb4aN5WMLoiPGplGqnuvNqEFa/nhkbwXm5Suke2FQGKyzDsMIBi9p8PZ7KkOJTR1s42/8QWVggTGH1wICT1RzcGzURbanc0F3huQ/2RcTmC4UvYyhCUqr3qKrbAIYTNBayfyhsBaN5wVRnV5LiPxjLbbOmObSr/8ileJgt1HMQC3V9pVLZobaJvlBjr/mmNlrF118azJOFY+a/bqzmtBWwlcvVuel/EaOb8uA8mgwfnbnShMinL1OWTHoj+D0ayFmUdsQgMYwRmStnC7x/6OXThmBgfyrLguzz4V2W8O8qbdDz+O5QoyboLUuR9WQb/ckpRio2qa5tidnKXzZzbWzFEevv9McxvCC1+ovvw4IullU8ra3FutnTentmPHKU2OPr1EhKgFKIX20u8xZaeIJYLCiZlVQohylpjkHnBZo1qw+y1CTiDwytunpmkoGGAXLx++YQSjEJEo889PmVVlSwZ8p/Rdusiz1WbgKqFt27yZaOfYzR2bT++HuB5x6zqfK6bbdV/UZndXs"
}
}
}
I'm trying to use Telepresence to redirect the traffic from the deployed application to a Docker container which have my project mounted inside and has hot-reloading, to continue the development of it but inside Kubernetes, but running telepresence intercept xxx-app-service --port 3000:3000 --env-file /tmp/xxx-app-service.env fails with the following error:
telepresence: error: workload "xxx-app-service.default" not found
Why is this happening and how do I fix it?

spring cloud dataflow UI, which is configured with k8s, redirects to port 8000

We have spring cloud dataflow services deployed in our EKS(Elastic Kubernetes Service in AWS).
The service was deployed with helm chart with default configuration more or less, the only thing we configured really is Spring Cloud Data Flow external database.
Now, a very strange thing is happening when trying to enter the UI:
enter https://<url of scdf>/dashboard
wait for maybe more than a min?
browser fails to load the UI and the url is changed to http://<url of scdf>:8000/dashboard/index.html
then removing the :8000 from address bar and trying again and it works!
We have no clue where does port 8000 is coming from?
Also, when u go to the root path https://<url of scdf> u see the different endpoints of the services, and they all show 8000 port - again, we haven't configured this port no where...
We have many other services which are exposed via k8s(registered as ingress services) and they are all working as expected
UPDATE ADDING HELM CHART
compname:
chartName: compname/spring-cloud-dataflow
chartPath: path
deploymentName: spring-cloud-dataflow
namespace: default
productLine: productline
# Original chart values: https://github.com/bitnami/charts/blob/master/bitnami/spring-cloud-dataflow/values.yaml
vault:
enabled: true
global:
imageRegistry: ""
imagePullSecrets: []
storageClass: ""
nameOverride: ""
fullnameOverride: ""
commonLabels: {}
kubeVersion: ""
clusterDomain: cluster.local
extraDeploy: []
server:
image:
registry: registery
repository: infra
tag: bitnami-docker-spring-cloud-dataflow-2.9.1-debian-10-r27
pullPolicy: IfNotPresent
pullSecrets: []
debug: false
hostAliases: []
composedTaskRunner:
image:
registry: registry
repository: infra
tag: bitnami-docker-spring-cloud-dataflow-composed-task-runner-2.9.1-debian-10.r27
configuration:
streamingEnabled: false
batchEnabled: true
accountName: default
trustK8sCerts: false
containerRegistries: {}
grafanaInfo: ""
metricsDashboard: ""
defaultSpringApplicationJSON: true
existingConfigmap: ""
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
replicaCount: 1
strategyType: RollingUpdate
podAffinityPreset: ""
podAntiAffinityPreset: soft
containerPort: 8080
nodeAffinityPreset:
type: ""
key: ""
values: []
affinity: {}
nodeSelector: {}
tolerations: []
podAnnotations: {}
priorityClassName: ""
podSecurityContext:
fsGroup: 1001
containerSecurityContext:
runAsUser: 1001
resources:
limits: {}
requests: {}
livenessProbe:
enabled: true
initialDelaySeconds: 120
timeoutSeconds: 1
periodSeconds: 20
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 120
timeoutSeconds: 1
periodSeconds: 20
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
service:
type: ClusterIP
port: 8080
nodePort: ""
clusterIP: ""
externalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
annotations: {}
ingress:
enabled: true
path: /
pathType: ImplementationSpecific
hostname: hostname
annotations: {}
tls: false
extraHosts: []
extraTls: []
secrets: []
initContainers: []
sidecars: []
pdb:
create: false
minAvailable: 1
maxUnavailable: ""
autoscaling:
enabled: false
minReplicas: ""
maxReplicas: ""
targetCPU: ""
targetMemory: ""
extraVolumes: []
extraVolumeMounts: []
jdwp:
enabled: false
port: 5005
proxy: {}
skipper:
enabled: false
hostAliases: []
image:
registry: registry
repository: infra
tag: bitnami-docker-spring-cloud-skipper-2.8.1-debian-10-r26
pullPolicy: IfNotPresent
pullSecrets: []
debug: false
configuration:
accountName: default
trustK8sCerts: false
existingConfigmap: ""
extraEnvVars: []
extraEnvVarsCM: ""
extraEnvVarsSecret: ""
replicaCount: 1
strategyType: RollingUpdate
podAffinityPreset: ""
podAntiAffinityPreset: soft
nodeAffinityPreset:
type: ""
key: ""
values: []
affinity: {}
nodeSelector: {}
tolerations: []
podAnnotations: {}
priorityClassName: ""
podSecurityContext:
fsGroup: 1001
containerSecurityContext:
runAsUser: 1001
resources:
limits: {}
requests: {}
livenessProbe:
enabled: true
initialDelaySeconds: 120
timeoutSeconds: 1
periodSeconds: 20
failureThreshold: 6
successThreshold: 1
readinessProbe:
enabled: true
initialDelaySeconds: 120
timeoutSeconds: 1
periodSeconds: 20
failureThreshold: 6
successThreshold: 1
customLivenessProbe: {}
customReadinessProbe: {}
service:
type: ClusterIP
port: 80
nodePort: ""
clusterIP: ""
externalTrafficPolicy: Cluster
loadBalancerIP: ""
loadBalancerSourceRanges: []
annotations: {}
initContainers: []
sidecars: []
pdb:
create: false
minAvailable: 1
maxUnavailable: ""
autoscaling:
enabled: false
minReplicas: ""
maxReplicas: ""
targetCPU: ""
targetMemory: ""
extraVolumes: []
extraVolumeMounts: []
jdwp:
enabled: false
port: 5005
externalSkipper:
host: localhost
port: 7577
deployer:
metadata:
annotations: {}
resources:
limits:
cpu: 500m
memory: 1024Mi
requests: {}
readinessProbe:
initialDelaySeconds: 120
livenessProbe:
initialDelaySeconds: 90
nodeSelector: ""
tolerations: {}
volumeMounts: {}
volumes: {}
environmentVariables: ""
podSecurityContext:
runAsUser: 1001
serviceAccount:
create: true
name: ""
rbac:
create: true
metrics:
enabled: true
image:
registry: registry
repository: infra
tag: bitnami-docker-prometheus-rsocket-proxy-1.3.0-debian-10-r334
pullPolicy: IfNotPresent
pullSecrets: []
resources:
limits: {}
requests: {}
replicaCount: 1
podAffinityPreset: ""
podAntiAffinityPreset: soft
nodeAffinityPreset:
type: ""
key: ""
values: []
affinity: {}
nodeSelector: {}
tolerations: []
podAnnotations: {}
priorityClassName: ""
service:
httpPort: 8080
rsocketPort: 7001
serviceMonitor:
enabled: true
extraLabels: {}
namespace: ""
interval: ""
scrapeTimeout: ""
pdb:
create: false
minAvailable: 1
maxUnavailable: ""
autoscaling:
enabled: false
minReplicas: ""
maxReplicas: ""
targetCPU: ""
targetMemory: ""
waitForBackends:
enabled: true
image:
registry: registry
repository: infra
tag: bitnami-docker-kubectl-1.19.16-debian-10-r27
pullPolicy: IfNotPresent
pullSecrets: []
resources:
limits: {}
requests: {}
mariadb:
enabled: false
architecture: standalone
auth:
rootPassword: ""
username: dataflow
password: change-me
database: dataflow
forcePassword: false
usePasswordFiles: false
initdbScripts:
create_databases.sql: |
CREATE OR REPLACE USER 'skipper'#'%' identified by 'change-me';
CREATE DATABASE IF NOT EXISTS `skipper`;
GRANT ALL ON skipper.* to 'skipper'#'%';
FLUSH PRIVILEGES;
flyway:
enabled: true
externalDatabase:
host: host
port: 3306
driver: ""
scheme: "mysql"
password: "vault:kubernetes/productline/data/scdf#SCDF_DB_PASSWORD"
existingPasswordSecret: "True"
existingPasswordKey: "password"
dataflow:
url: ""
database: dataflow
username: "vault:kubernetes/productline/data/scdf#SCDF_DB_USER"
skipper:
url: ""
database: skipper
username: skipper
hibernateDialect: ""
rabbitmq:
enabled: false
auth:
username: user
externalRabbitmq:
enabled: false
host: localhost
port: 5672
username: guest
password: guest
vhost: ""
existingPasswordSecret: ""
kafka:
enabled: false
replicaCount: 1
offsetsTopicReplicationFactor: 1
zookeeper:
replicaCount: 1
externalKafka:
enabled: false
brokers: localhost:9092
zkNodes: localhost:2181
![enter image description here](apiVersion: v1 kind: Pod metadata: name: nginx spec: containers: - name: nginx image: nginx:1.14.2 ports: - containerPort: 80)

Kubernetes NiFi Cluster setup in AKS

Thought to post this because it might help someone. I couldn't find Kubernetes NiFi setup without helm package, so I have prepared the below configuration YAML for Kubernetes NiFi Cluster setup.
Here's the link for Zookeeper Cluster setup in AKS
Please comment if you see any issues anywhere in the configuration or if you would like to provide any suggestions. Increase disk storage configuration according to your usage.
apiVersion: v1
kind: Service
metadata:
name: nifi-hs
labels:
app: nifi
spec:
ports:
- port: 1025
name: nodeport
- port: 8080
name: client
clusterIP: None
selector:
app: nifi
---
apiVersion: v1
kind: Service
metadata:
name: nifi-cs
labels:
app: nifi
annotations:
service.beta.kubernetes.io/azure-dns-label-name: nifi
spec:
ports:
- port: 80
targetPort: 8080
name: client
selector:
app: nifi
type: LoadBalancer
---
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: nifi-pdb
spec:
selector:
matchLabels:
app: nifi
maxUnavailable: 1
---
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: nifi-sc
selfLink: /apis/storage.k8s.io/v1/storageclasses/nifi-sc
labels:
addonmanager.kubernetes.io/mode: EnsureExists
kubernetes.io/cluster-service: 'true'
provisioner: kubernetes.io/azure-disk
parameters:
cachingmode: ReadOnly
kind: Managed
storageaccounttype: StandardSSD_LRS
reclaimPolicy: Delete
allowVolumeExpansion: true
volumeBindingMode: WaitForFirstConsumer
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nifi
spec:
selector:
matchLabels:
app: nifi
serviceName: nifi-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: nifi
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- k: "app"
operator: In
values:
- nifi
topologyKey: "kubernetes.io/hostname"
containers:
- name: nifi
image: "apache/nifi:1.13.0"
env:
- name: NIFI_CLUSTER_IS_NODE
value: "true"
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NIFI_CLUSTER_ADDRESS
value: $(HOSTNAME).nifi-hs
- name: NIFI_CLUSTER_NODE_PROTOCOL_PORT
value: "1025"
- name: NIFI_WEB_HTTP_HOST
value: $(HOSTNAME).nifi-hs.ns1.svc.cluster.local
#- name: NIFI_WEB_HTTP_PORT
# value: "80"
- name: NIFI_CLUSTER_NODE_PROTOCOL_MAX_THREADS
value: "100"
- name: NIFI_ZK_CONNECT_STRING
value: "zk-cs:2181"
- name: NIFI_ELECTION_MAX_CANDIDATES
value: "3"
ports:
- containerPort: 8080
name: client
- containerPort: 1025
name: nodeport
volumeMounts:
- name: nifi-database
mountPath: "/opt/nifi/nifi-current/database_repository"
- name: nifi-flowfile
mountPath: "/opt/nifi/nifi-current/flowfile_repository"
- name: nifi-content
mountPath: "/opt/nifi/nifi-current/content_repository"
- name: nifi-provenance
mountPath: "/opt/nifi/nifi-current/provenance_repository"
- name: nifi-state
mountPath: "/opt/nifi/nifi-current/state"
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: nifi-database
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-flowfile
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-content
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-provenance
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-state
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
The labelselector in affinity block is missing few words. Below is the updated working yaml block for statefulset.
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nifi
spec:
selector:
matchLabels:
app: nifi
serviceName: nifi-hs
replicas: 3
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
template:
metadata:
labels:
app: nifi
spec:
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: "app"
operator: In
values:
- nifi
topologyKey: "kubernetes.io/hostname"
containers:
- name: nifi
image: "apache/nifi:1.13.0"
env:
- name: NIFI_CLUSTER_IS_NODE
value: "true"
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NIFI_CLUSTER_ADDRESS
value: $(HOSTNAME).nifi-hs
- name: NIFI_CLUSTER_NODE_PROTOCOL_PORT
value: "1025"
- name: NIFI_WEB_HTTP_HOST
value: $(HOSTNAME).nifi-hs.ns1.svc.cluster.local
#- name: NIFI_WEB_HTTP_PORT
# value: "80"
- name: NIFI_CLUSTER_NODE_PROTOCOL_MAX_THREADS
value: "100"
- name: NIFI_ZK_CONNECT_STRING
value: "zk-cs:2181"
- name: NIFI_ELECTION_MAX_CANDIDATES
value: "3"
ports:
- containerPort: 8080
name: client
- containerPort: 1025
name: nodeport
volumeMounts:
- name: nifi-database
mountPath: "/opt/nifi/nifi-current/database_repository"
- name: nifi-flowfile
mountPath: "/opt/nifi/nifi-current/flowfile_repository"
- name: nifi-content
mountPath: "/opt/nifi/nifi-current/content_repository"
- name: nifi-provenance
mountPath: "/opt/nifi/nifi-current/provenance_repository"
- name: nifi-state
mountPath: "/opt/nifi/nifi-current/state"
securityContext:
runAsUser: 1000
runAsGroup: 1000
fsGroup: 1000
volumeClaimTemplates:
- metadata:
name: nifi-database
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-flowfile
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-content
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-provenance
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi
- metadata:
name: nifi-state
spec:
storageClassName: "nifi-sc"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 5Gi

How Connect to MongoDB Primary Node With Nodeport After Primary Node Changed

I deployed MongoDB replica on Kubernetes And it works great!
And I defined a NodePort service for my primary pod Which it Name always is like <Statefulset_name>-0 .
I Connect to my primary node with this service And problem is when my primary node that is <Statefulset_name>-0 get terminated and MongoDB set another primary node which means I can't connect to primary node with that Nodeport service.
How can always connect to primary node even in this situation
My manifests is like below .
statefulset.yaml :
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: mongo
spec:
serviceName: "mongo"
replicas: 1
template:
metadata:
labels:
role: mongo
environment: test
spec:
terminationGracePeriodSeconds: 10
containers:
- name: mongo
image: mongo:3.4.9
command:
- /bin/sh
- -c
- >
if [ -f /data/db/admin-user.lock ]; then
mongod --replSet rs0 --clusterAuthMode keyFile --keyFile /etc/secrets-volume/mongodb-keyfile --setParameter authenticationMechanisms=SCRAM-SHA-1;
else
mongod --auth;
fi;
lifecycle:
postStart:
exec:
command:
- /bin/sh
- -c
- >
if [ ! -f /data/db/admin-user.lock ]; then
sleep 5;
touch /data/db/admin-user.lock
if [ "$HOSTNAME" = "mongo-0" ]; then
mongo --eval 'db = db.getSiblingDB("admin"); db.createUser({ user: "admin", pwd: "password", roles: [{ role: "root", db: "admin" }]});';
fi;
mongod --shutdown;
fi;
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-key
mountPath: "/etc/secrets-volume"
readOnly: true
- name: mongo-persistent-storage
mountPath: /data/db
- name: mongo-sidecar
image: cvallance/mongo-k8s-sidecar
env:
- name: MONGO_SIDECAR_POD_LABELS
value: "role=mongo,environment=test"
- name: MONGODB_USERNAME
value: admin
- name: MONGODB_PASSWORD
value: password
- name: MONGODB_DATABASE
value: admin
volumes:
- name: mongo-key
secret:
defaultMode: 0400
secretName: mongo-key
volumeClaimTemplates:
- metadata:
name: mongo-persistent-storage
annotations:
volume.beta.kubernetes.io/storage-class: "fast-rbd"
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 100Gi
service.yaml:
apiVersion: v1
kind: Service
metadata:
name: mongo
labels:
name: mongo
spec:
ports:
- port: 27017
targetPort: 27017
clusterIP: None
selector:
role: mongo
---
apiVersion: v1
kind: Service
metadata:
name: mongo-sv
labels:
name: mongo
spec:
ports:
- port: 27017
targetPort: 27017
selector:
statefulset.kubernetes.io/pod-name: mongo-0
type: NodePort
What should I do to always connect to my primary node