i am not able to see logs on kibana dashboard - kubernetes

I am using the ELK stack (elasticsearch, logstash, kibana) for log processing and analysis in a Kubernetes environment. To capture logs I am using filebeat.
The service account, the cluster role, and the cluster role binding of elasticsearch below yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch
namespace: kube-system
labels:
k8s-app: elasticsearch
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch
labels:
k8s-app: elasticsearch
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: elasticsearch
labels:
k8s-app: elasticsearch
subjects:
- kind: ServiceAccount
name: elasticsearch
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch
apiGroup: ""
elasticsearch service yaml
apiVersion: v1
kind: Service
metadata:
name: elasticsearch
namespace: kube-system
labels:
k8s-app: elasticsearch
spec:
ports:
- port: 9200
protocol: TCP
targetPort: db
selector:
k8s-app: elasticsearch
externalIPs:
- 10.10.0.82
Elastic search statesul set yaml below:
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch
namespace: kube-system
labels:
k8s-app: elasticsearch
spec:
serviceName: elasticsearch
replicas: 2
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: elasticsearch
template:
metadata:
labels:
k8s-app: elasticsearch
spec:
serviceAccountName: elasticsearch
containers:
- image: elasticsearch:6.8.4
name: elasticsearch
resources:
limits:
cpu: 1000m
memory: "2Gi"
requests:
cpu: 100m
memory: "1Gi"
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- name: data
mountPath: /data
env:
- name: "NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-init
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: data
labels:
k8s-app: elasticsearch
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 10Gi
pv & pvc0 yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: elklogs-pv0
namespace: kube-system
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Recycle
nfs:
server: 10.10.0.131
path: /opt/data/vol/0
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data-elasticsearch-0
namespace: kube-system
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
pv_pvc1.yaml
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: elklogs-pv1
namespace: kube-system
spec:
capacity:
storage: 1Gi
accessModes:
- ReadWriteMany
persistentVolumeReclaimPolicy: Recycle
nfs:
server: 10.10.0.131
path: /opt/data/vol/1
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: data-elasticsearch-1
namespace: kube-system
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: 1Gi
logstash_svc.yaml
kind: Service
apiVersion: v1
metadata:
name: logstash-service
namespace: kube-system
spec:
selector:
app: logstash
ports:
- protocol: TCP
port: 5044
targetPort: 5044
externalIPs:
- 10.10.0.82
logstash_config.yaml
kind: ConfigMap
metadata:
name: logstash-configmap
namespace: kube-system
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
logstash.conf: |
input {
beats {
port => 5044
}
}
filter {
grok {
match => { "message" => "%{COMBINEDAPACHELOG}" }
}
date {
match => [ "timestamp" , "dd/MMM/yyyy:HH:mm:ss Z" ]
}
geoip {
source => "clientip"
}
}
output {
elasticsearch {
hosts => ["http://10.10.0.82:9200"]
}
}
logstash deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: logstash-deployment
namespace: kube-system
spec:
replicas: 1
selector:
matchLabels:
app: logstash
template:
metadata:
labels:
app: logstash
spec:
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:6.3.0
ports:
- containerPort: 5044
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf
filebeat.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kube-system
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
namespace: kube-system
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.config:
prospectors:
# Mounted `filebeat-prospectors` configmap:
path: ${path.config}/prospectors.d/*.yml
# Reload prospectors configs as they change:
reload.enabled: false
modules:
path: ${path.config}/modules.d/*.yml
# Reload module configs as they change:
reload.enabled: false
output.logstash:
hosts: ["http://10.10.0.82:5044"]
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-prospectors
namespace: kube-system
labels:
k8s-app: filebeat
data:
kubernetes.yml: |-
- type: docker
containers.ids:
- "*"
processors:
- add_kubernetes_metadata:
in_cluster: true
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
namespace: kube-system
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:6.8.4
args: [
"-c", "/etc/filebeat.yml",
"-e",
]
securityContext:
runAsUser: 0
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: prospectors
mountPath: /usr/share/filebeat/prospectors.d
readOnly: true
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: prospectors
configMap:
defaultMode: 0600
name: filebeat-prospectors
- name: data
emptyDir: {}
kibana.yaml
kind: Deployment
metadata:
name: kibana-logging
namespace: kube-system
labels:
k8s-app: kibana-logging
spec:
replicas: 3
selector:
matchLabels:
k8s-app: kibana-logging
template:
metadata:
labels:
k8s-app: kibana-logging
spec:
containers:
- name: kibana-logging
image: docker.elastic.co/kibana/kibana-oss:6.8.4
env:
- name: ELASTICSEARCH_URL
value: http://10.10.0.82:9200
ports:
- containerPort: 5601
name: ui
protocol: TCP
---
apiVersion: v1
kind: Service
metadata:
name: kibana-logging
namespace: kube-system
labels:
k8s-app: kibana-logging
kubernetes.io/name: "Kibana"
spec:
type: NodePort
ports:
- port: 5601
protocol: TCP
targetPort: ui
nodePort: 32010
selector:
k8s-app: kibana-logging
kubectl get svc -n kube-system
elasticsearch ClusterIP 10.43.50.63 10.10.0.82 9200/TCP 31m
kibana-logging NodePort 10.43.58.127 10.10.0.82 5601:32010/TCP 4m4s
kube-dns ClusterIP 10.43.0.10 <none> 53/UDP,53/TCP,9153/TCP 23d
logstash-service ClusterIP 10.43.130.36 10.10.0.82 5044/TCP 30m
filebeat pod logs :
2020-11-04T16:42:22.857Z INFO log/harvester.go:255 Harvester started for file: /var/lib/docker/containers/011d24d334bba573ffbb466b0f3f70ae5ddc986f233e683076eaae7394801203/011d24d334bba573ffbb466b0f3f70ae5ddc986f233e683076eaae7394801203-json.log
2020-11-04T16:42:22.983Z INFO pipeline/output.go:95 Connecting to backoff(async(tcp://logstash-service:9600))
2020-11-04T16:42:52.412Z INFO [monitoring] log/log.go:144 Non-zero metrics in the last 30s {"monitoring": {"metrics": {"beat":{"cpu":{"system":{"ticks":270,"time":{"ms":271}},"total":{"ticks":740,"time":{"ms":745},"value":740},"user":{"ticks":470,"time":{"ms":474}}},"handles":{"limit":{"hard":1048576,"soft":1048576},"open":97},"info":{"ephemeral_id":"6584086a-eff4-46b5-9be0-93892dad9d97","uptime":{"ms":30191}},"memstats":{"gc_next":36421840,"memory_alloc":32140904,"memory_total":55133048,"rss":65593344}},"filebeat":{"events":{"active":4214,"added":4219,"done":5},"harvester":{"open_files":89,"running":88,"started":88}},"libbeat":{"config":{"module":{"running":0},"reloads":2},"output":{"type":"logstash"},"pipeline":{"clients":2,"events":{"active":4117,"filtered":88,"published":4116,"total":4205}}},"registrar":{"states":{"current":5,"update":5},"writes":{"success":6,"total":6}},"system":{"cpu":{"cores":8},"load":{"1":1.9,"15":0.61,"5":0.9,"norm":{"1":0.2375,"15":0.0763,"5":0.1125}}}}}}
2020-11-04T16:42:54.289Z ERROR pipeline/output.go:100 Failed to connect to backoff(async(tcp://logstash-service:5044)): dial tcp 10.43.145.162:5044: i/o timeout
2020-11-04T16:42:54.289Z INFO pipeline/output.go:93 Attempting to reconnect to backoff(async(tcp://logstash-service:5044)) with 1 reconnect attempt(s)
logstash pod logs :
[WARN ] 2020-11-04 15:45:04.648 [Ruby-0-Thread-4: /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-output-elasticsearch-9.1.1-java/lib/logstash/outputs/elasticsearch/http_client/pool.rb:232] elasticsearch - Attempted to resurrect connection to dead ES instance, but got an error. {:url=>"http://elasticsearch:9200/", :error_type=>LogStash::Outputs::ElasticSearch::HttpClient::Pool::HostUnreachableError, :error=>"Elasticsearch Unreachable: [http://elasticsearch:9200/][Manticore::ResolutionFailure] elasticsearch"}

What I have understood from your architecture, you are using Filebeat >> Logstash >> Elasticsearch >> Kibana
So, in the filebeat.yml, you have selected output as logstash. But, you have given wrong port for logstash output in filebeat.yml.
It should be:
output.logstash:
hosts: ['http://195.134.187.25:5044']
As, if you see in logstash_config.yaml, you have given 5044 as beats input. So, make the changes in filebeat.yml in output.logstash

Related

GKE | Statefulset gets deleted along with a service when deployed in the kube-system namespace

I have a GKE cluster of 5 nodes in the same zone. I'm trying to deploy an Elasticsearch statefulset of 3 nodes on the kube-system namespace, but every time I do the statefulset gets deleted and the pods get into the Terminating state immediately after the creation of the second pod.
I tried to check the pod logs and to describe the pod for any information but found nothing useful.
I even checked the GKE cluster logs where I detected the deletion request log but with no extra information of who is initiating it or why is it happening.
When I changed the namespace to default everything was fine and the pods were in the ready state.
Below is the manifest file I'm using for this deployment.
# RBAC authn and authz
apiVersion: v1
kind: ServiceAccount
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
# addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
# addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- "services"
- "namespaces"
- "endpoints"
verbs:
- "get"
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: elasticsearch-logging
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
# addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: elasticsearch-logging
namespace: kube-system
apiGroup: ""
roleRef:
kind: ClusterRole
name: elasticsearch-logging
apiGroup: ""
---
# Elasticsearch deployment itself
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
version: 7.16.2
kubernetes.io/cluster-service: "true"
# addonmanager.kubernetes.io/mode: Reconcile
spec:
serviceName: elasticsearch-logging
replicas: 2
updateStrategy:
type: RollingUpdate
selector:
matchLabels:
k8s-app: elasticsearch-logging
version: 7.16.2
template:
metadata:
labels:
k8s-app: elasticsearch-logging
version: 7.16.2
kubernetes.io/cluster-service: "true"
spec:
serviceAccountName: elasticsearch-logging
containers:
- image: docker.elastic.co/elasticsearch/elasticsearch:7.16.2
name: elasticsearch-logging
resources:
# need more cpu upon initialization, therefore burstable class
limits:
cpu: 1000m
requests:
cpu: 100m
ports:
- containerPort: 9200
name: db
protocol: TCP
- containerPort: 9300
name: transport
protocol: TCP
volumeMounts:
- name: elasticsearch-logging
mountPath: /data
env:
#Added by Nour
- name: discovery.seed_hosts
value: elasticsearch-master-headless
- name: "NAMESPACE"
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: elasticsearch-logging
# emptyDir: {}
# Elasticsearch requires vm.max_map_count to be at least 262144.
# If your OS already sets up this number to a higher value, feel free
# to remove this init container.
initContainers:
- image: alpine:3.6
command: ["/sbin/sysctl", "-w", "vm.max_map_count=262144"]
name: elasticsearch-logging-init
securityContext:
privileged: true
volumeClaimTemplates:
- metadata:
name: elasticsearch-logging
spec:
storageClassName: "standard"
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 30Gi
---
apiVersion: v1
kind: Service
metadata:
name: elasticsearch-logging
namespace: kube-system
labels:
k8s-app: elasticsearch-logging
kubernetes.io/cluster-service: "true"
# addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "Elasticsearch"
spec:
type: NodePort
ports:
- port: 9200
protocol: TCP
targetPort: db
nodePort: 31335
selector:
k8s-app: elasticsearch-logging
#Added by Nour
---
apiVersion: v1
kind: Service
metadata:
labels:
app: elasticsearch-master
name: elasticsearch-master
namespace: kube-system
spec:
ports:
- name: http
port: 9200
protocol: TCP
targetPort: 9200
- name: transport
port: 9300
protocol: TCP
targetPort: 9300
selector:
app: elasticsearch-master
sessionAffinity: None
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: elasticsearch-master
name: elasticsearch-master-headless
namespace: kube-system
spec:
ports:
- name: http
port: 9200
protocol: TCP
targetPort: 9200
- name: transport
port: 9300
protocol: TCP
targetPort: 9300
clusterIP: None
selector:
app: elasticsearch-master
Below are the available namespaces
$ kubectl get ns
NAME STATUS AGE
default Active 4d15h
kube-node-lease Active 4d15h
kube-public Active 4d15h
kube-system Active 4d15h
Am I using any old API version that might cause the issue?
Thank you.
To close i think it would make sense to paste the final answer here.
I understand your curiousity, i guess GCP just started preventing people from deploying stuff to the kube-system namespaces as it has the risk of messing with GKE. I never tried to deploy stuff to the kube-system namespace before so i'm sure if it was always like this or we just changed it
Overall i recommend avoiding deploying stuff into the kube-system namespace in GKE```

Kubernetes postgresql statefullset problem

I have a 1 master and 2 nodes configured with Istio and metallb, but I cant get to works a postgresql stetefullset.
I also have configured a DNS record to redirect the request.
All pods and statefulset are Runing, but I get this error when I try to connect:
"Expected authentication request from server, but received H"
What im doing wrong?
Config
apiVersion: v1
kind: Namespace
metadata:
name: awx
labels:
istio-injection: enabled
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: postgresql-db
namespace: awx
spec:
serviceName: postgresql-db
selector:
matchLabels:
app: postgresql-db
replicas: 2
template:
metadata:
labels:
app: postgresql-db
spec:
containers:
- name: postgresql-db
image: postgres:12.7
volumeMounts:
- name: postgresql-db-disk
mountPath: /data
# Config from Secret
envFrom:
- secretRef:
name: postgres-db-secret
ports:
- containerPort: 5432
name: postgresdb
# Volume Claim
volumeClaimTemplates:
- metadata:
name: postgresql-db-disk
namespace: awx
spec:
accessModes: ["ReadWriteOnce"]
storageClassName: "local-storage"
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: postgres-db
namespace: awx
labels:
app: postgresql-db
spec:
selector:
app: postgresql-db
type: ClusterIP
ports:
- name: tcp
port: 5432
protocol: TCP
targetPort: 5432
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: postgresql-db
namespace: awx
spec:
host: postgresql-db
trafficPolicy:
tls:
mode: DISABLE
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: postgresql-db-gateway
namespace: awx
spec:
selector:
istio: ingressgateway # use istio default controller
servers:
- port:
number: 80
name: http-postgresql-db
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: postgresql-db-vs
namespace: awx
spec:
hosts:
- "postgresql.awx.k8s"
gateways:
- postgresql-db-gateway
http:
- route:
- destination:
host: postgresql-db
port:
number: 5432
---
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: local-storage
namespace: awx
provisioner: kubernetes.io/no-provisioner
volumeBindingMode: WaitForFirstConsumer
#volumeBindingMode: Immediate
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-pv-1
namespace: awx
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: local-storage
local:
path: /home/xxx/storage/postgres
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8snode01
---
apiVersion: v1
kind: PersistentVolume
metadata:
name: postgres-pv-2
namespace: awx
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Delete
storageClassName: local-storage
local:
path: /home/xxx/storage/postgres
nodeAffinity:
required:
nodeSelectorTerms:
- matchExpressions:
- key: kubernetes.io/hostname
operator: In
values:
- k8snode02
---
apiVersion: v1
kind: Secret
metadata:
name: postgres-db-secret
namespace: awx
type: Opaque
stringData:
POSTGRES_DB: awxDB
POSTGRES_USER: awxUSER
POSTGRES_PASSWORD: awxPASSWORD
PGDATA: /data/pgdata

Why kubernetes' statefulset didn't run evenly for 3 pods?

I deployed a logstash by statefulset kind with 3 replicas in k8s. Using filebeat to send data to it.
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: logstash-nginx
spec:
serviceName: "logstash"
selector:
matchLabels:
app: logstash
updateStrategy:
type: RollingUpdate
replicas: 3
template:
metadata:
labels:
app: logstash
spec:
containers:
- name: logstash
image: docker.elastic.co/logstash/logstash:7.10.0
resources:
limits:
memory: 2Gi
ports:
- containerPort: 5044
volumeMounts:
- name: config-volume
mountPath: /usr/share/logstash/config
- name: logstash-pipeline-volume
mountPath: /usr/share/logstash/pipeline
command: ["/bin/sh","-c"]
args:
- bin/logstash -f /usr/share/logstash/pipeline/logstash.conf;
volumes:
- name: config-volume
configMap:
name: logstash-configmap
items:
- key: logstash.yml
path: logstash.yml
- name: logstash-pipeline-volume
configMap:
name: logstash-configmap
items:
- key: logstash.conf
path: logstash.conf
Logstash's service
---
apiVersion: v1
kind: Service
metadata:
labels:
app: logstash
name: logstash
spec:
ports:
- name: "5044"
port: 5044
targetPort: 5044
selector:
app: logstash
Filebeat's daemonset configmap
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
...
output.logstash:
hosts: ["logstash.default.svc.cluster.local:5044"]
loadbalance: true
bulk_max_size: 1024
When run real data. Most data went to the second logstash's pod. Sometimes data also can go to the first and the third pods but very little occurs.
Use another way to set LB from filebeat
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
...
output.logstash:
hosts: ["logstash-nginx-0.logstash.default.svc.cluster.local:5044", "logstash-nginx-1.logstash.default.svc.cluster.local:5044", "logstash-nginx-2.logstash.default.svc.cluster.local:5044"]
loadbalance: true
bulk_max_size: 1024
Logstash's configmap
---
apiVersion: v1
kind: ConfigMap
metadata:
name: logstash-configmap
data:
logstash.yml: |
http.host: "0.0.0.0"
path.config: /usr/share/logstash/pipeline
xpack.monitoring.enabled: false
From the filebeat pod, it can access URIs by curl:
logstash-nginx-0.logstash.default.svc.cluster.local:5044
logstash-nginx-1.logstash.default.svc.cluster.local:5044
logstash-nginx-2.logstash.default.svc.cluster.local:5044
But the data can't be sent by filebeat to logstash's 3 pods at all. No traffic in the logstash's output logs. Where is wrong?

Error: pod has unbound immediate PersistentVolumeClaims

I am trying to run kafka with kubeless but I get this error pod has unbound immediate PersistentVolumeClaims. I have created a persistent volume using rook and ceph and trying to use this perisistent volume with kubeless kafka. However when I run the code I get "pod has unbound persistent volume claims"
What am I doing wrong here?
Persistent Volument for Kafka
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: datadir
labels:
kubeless: kafka
spec:
storageClassName: rook-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
Persistent Volume for zookeper
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zookeeper
labels:
kubeless: zookeeper
spec:
storageClassName: rook-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
Kubeless Kafka
apiVersion: v1
kind: Service
metadata:
name: kafka
namespace: kubeless
spec:
ports:
- port: 9092
selector:
kubeless: kafka
---
apiVersion: v1
kind: Service
metadata:
name: zoo
namespace: kubeless
spec:
clusterIP: None
ports:
- name: peer
port: 9092
- name: leader-election
port: 3888
selector:
kubeless: zookeeper
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
kubeless: kafka-trigger-controller
name: kafka-trigger-controller
namespace: kubeless
spec:
selector:
matchLabels:
kubeless: kafka-trigger-controller
template:
metadata:
labels:
kubeless: kafka-trigger-controller
spec:
containers:
- env:
- name: KUBELESS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBELESS_CONFIG
value: kubeless-config
image: kubeless/kafka-trigger-controller:v1.0.2
imagePullPolicy: IfNotPresent
name: kafka-trigger-controller
serviceAccountName: controller-acct
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kafka-controller-deployer
rules:
- apiGroups:
- ""
resources:
- services
- configmaps
verbs:
- get
- list
- apiGroups:
- kubeless.io
resources:
- functions
- kafkatriggers
verbs:
- get
- list
- watch
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kafka-controller-deployer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kafka-controller-deployer
subjects:
- kind: ServiceAccount
name: controller-acct
namespace: kubeless
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: kafkatriggers.kubeless.io
spec:
group: kubeless.io
names:
kind: KafkaTrigger
plural: kafkatriggers
singular: kafkatrigger
scope: Namespaced
version: v1beta1
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: kafka
namespace: kubeless
spec:
serviceName: broker
template:
metadata:
labels:
kubeless: kafka
spec:
containers:
- env:
- name: KAFKA_ADVERTISED_HOST_NAME
value: broker.kubeless
- name: KAFKA_ADVERTISED_PORT
value: "9092"
- name: KAFKA_PORT
value: "9092"
- name: KAFKA_DELETE_TOPIC_ENABLE
value: "true"
- name: KAFKA_ZOOKEEPER_CONNECT
value: zookeeper.kubeless:2181
- name: ALLOW_PLAINTEXT_LISTENER
value: "yes"
image: bitnami/kafka:1.1.0-r0
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 30
tcpSocket:
port: 9092
name: broker
ports:
- containerPort: 9092
volumeMounts:
- mountPath: /bitnami/kafka/data
name: datadir
initContainers:
- command:
- sh
- -c
- chmod -R g+rwX /bitnami
image: busybox
imagePullPolicy: IfNotPresent
name: volume-permissions
volumeMounts:
- mountPath: /bitnami/kafka/data
name: datadir
volumeClaimTemplates:
- metadata:
name: datadir
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: broker
namespace: kubeless
spec:
clusterIP: None
ports:
- port: 9092
selector:
kubeless: kafka
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: zoo
namespace: kubeless
spec:
serviceName: zoo
template:
metadata:
labels:
kubeless: zookeeper
spec:
containers:
- env:
- name: ZOO_SERVERS
value: server.1=zoo-0.zoo:2888:3888:participant
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
image: bitnami/zookeeper:3.4.10-r12
imagePullPolicy: IfNotPresent
name: zookeeper
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
volumeMounts:
- mountPath: /bitnami/zookeeper
name: zookeeper
initContainers:
- command:
- sh
- -c
- chmod -R g+rwX /bitnami
image: busybox
imagePullPolicy: IfNotPresent
name: volume-permissions
volumeMounts:
- mountPath: /bitnami/zookeeper
name: zookeeper
volumeClaimTemplates:
- metadata:
name: zookeeper
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: kubeless
spec:
ports:
- name: client
port: 2181
selector:
kubeless: zookeeper
ERROR
vagrant#ubuntu-xenial:~/infra/ansible/scripts/kubeless-kafka-trigger$ kubectl get pod -n kubeless
NAME READY STATUS RESTARTS AGE
kafka-0 0/1 Pending 0 8m44s
kafka-trigger-controller-7cbd54b458-pccpn 1/1 Running 0 8m47s
kubeless-controller-manager-5bcb6757d9-nlksd 3/3 Running 0 3h34m
zoo-0 0/1 Pending 0 8m42s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 45s (x10 over 10m) default-scheduler pod has unbound immediate PersistentVolumeClaims (repeated 2 times
kubectl describe pod kafka-0 -n kubeless
Name: kafka-0
Namespace: kubeless
Priority: 0
Node: <none>
Labels: controller-revision-hash=kafka-c498d7f6
kubeless=kafka
statefulset.kubernetes.io/pod-name=kafka-0
Annotations: <none>
Status: Pending
IP:
Controlled By: StatefulSet/kafka
Init Containers:
volume-permissions:
Image: busybox
Port: <none>
Host Port: <none>
Command:
sh
-c
chmod -R g+rwX /bitnami
Environment: <none>
Mounts:
/bitnami/kafka/data from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-wj8vx (ro)
Containers:
broker:
Image: bitnami/kafka:1.1.0-r0
Port: 9092/TCP
Host Port: 0/TCP
Liveness: tcp-socket :9092 delay=30s timeout=1s period=10s #success=1 #failure=3
Environment:
KAFKA_ADVERTISED_HOST_NAME: broker.kubeless
KAFKA_ADVERTISED_PORT: 9092
KAFKA_PORT: 9092
KAFKA_DELETE_TOPIC_ENABLE: true
KAFKA_ZOOKEEPER_CONNECT: zookeeper.kubeless:2181
ALLOW_PLAINTEXT_LISTENER: yes
Mounts:
/bitnami/kafka/data from datadir (rw)
/var/run/secrets/kubernetes.io/serviceaccount from default-token-wj8vx (ro)
Conditions:
Type Status
PodScheduled False
Volumes:
datadir:
Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace)
ClaimName: datadir-kafka-0
ReadOnly: false
default-token-wj8vx:
Type: Secret (a volume populated by a Secret)
SecretName: default-token-wj8vx
Optional: false
QoS Class: BestEffort
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute for 300s
node.kubernetes.io/unreachable:NoExecute for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Warning FailedScheduling 45s (x10 over 10m) default-scheduler pod has unbound immediate PersistentVolumeClaims (repeated 2 times)
I got it working.. For someone who faces the same problem this would be useful..
This uses rook-ceph storage kubeless kafka
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: kafka
namespace: kubeless
labels:
kubeless: kafka
spec:
storageClassName: rook-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
kind: PersistentVolumeClaim
apiVersion: v1
metadata:
name: zookeeper
namespace: kubeless
labels:
kubeless: zookeeper
spec:
storageClassName: rook-block
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
apiVersion: v1
kind: Service
metadata:
name: kafka
namespace: kubeless
spec:
ports:
- port: 9092
selector:
kubeless: kafka
---
apiVersion: v1
kind: Service
metadata:
name: zoo
namespace: kubeless
spec:
clusterIP: None
ports:
- name: peer
port: 9092
- name: leader-election
port: 3888
selector:
kubeless: zookeeper
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
kubeless: kafka-trigger-controller
name: kafka-trigger-controller
namespace: kubeless
spec:
selector:
matchLabels:
kubeless: kafka-trigger-controller
template:
metadata:
labels:
kubeless: kafka-trigger-controller
spec:
containers:
- env:
- name: KUBELESS_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KUBELESS_CONFIG
value: kubeless-config
image: kubeless/kafka-trigger-controller:v1.0.2
imagePullPolicy: IfNotPresent
name: kafka-trigger-controller
serviceAccountName: controller-acct
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kafka-controller-deployer
rules:
- apiGroups:
- ""
resources:
- services
- configmaps
verbs:
- get
- list
- apiGroups:
- kubeless.io
resources:
- functions
- kafkatriggers
verbs:
- get
- list
- watch
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kafka-controller-deployer
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kafka-controller-deployer
subjects:
- kind: ServiceAccount
name: controller-acct
namespace: kubeless
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: kafkatriggers.kubeless.io
spec:
group: kubeless.io
names:
kind: KafkaTrigger
plural: kafkatriggers
singular: kafkatrigger
scope: Namespaced
version: v1beta1
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: kafka
namespace: kubeless
spec:
serviceName: broker
template:
metadata:
labels:
kubeless: kafka
spec:
containers:
- env:
- name: KAFKA_ADVERTISED_HOST_NAME
value: broker.kubeless
- name: KAFKA_ADVERTISED_PORT
value: "9092"
- name: KAFKA_PORT
value: "9092"
- name: KAFKA_DELETE_TOPIC_ENABLE
value: "true"
- name: KAFKA_ZOOKEEPER_CONNECT
value: zookeeper.kubeless:2181
- name: ALLOW_PLAINTEXT_LISTENER
value: "yes"
image: bitnami/kafka:1.1.0-r0
imagePullPolicy: IfNotPresent
livenessProbe:
initialDelaySeconds: 30
tcpSocket:
port: 9092
name: broker
ports:
- containerPort: 9092
volumeMounts:
- mountPath: /bitnami/kafka/data
name: kafka
initContainers:
- command:
- sh
- -c
- chmod -R g+rwX /bitnami
image: busybox
imagePullPolicy: IfNotPresent
name: volume-permissions
volumeMounts:
- mountPath: /bitnami/kafka/data
name: kafka
volumes:
- name: kafka
persistentVolumeClaim:
claimName: kafka
---
apiVersion: v1
kind: Service
metadata:
name: broker
namespace: kubeless
spec:
clusterIP: None
ports:
- port: 9092
selector:
kubeless: kafka
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: zoo
namespace: kubeless
spec:
serviceName: zoo
template:
metadata:
labels:
kubeless: zookeeper
spec:
containers:
- env:
- name: ZOO_SERVERS
value: server.1=zoo-0.zoo:2888:3888:participant
- name: ALLOW_ANONYMOUS_LOGIN
value: "yes"
image: bitnami/zookeeper:3.4.10-r12
imagePullPolicy: IfNotPresent
name: zookeeper
ports:
- containerPort: 2181
name: client
- containerPort: 2888
name: peer
- containerPort: 3888
name: leader-election
volumeMounts:
- mountPath: /bitnami/zookeeper
name: zookeeper
initContainers:
- command:
- sh
- -c
- chmod -R g+rwX /bitnami
image: busybox
imagePullPolicy: IfNotPresent
name: volume-permissions
volumeMounts:
- mountPath: /bitnami/zookeeper
name: zookeeper
volumes:
- name: zookeeper
persistentVolumeClaim:
claimName: zookeeper
---
apiVersion: v1
kind: Service
metadata:
name: zookeeper
namespace: kubeless
spec:
ports:
- name: client
port: 2181
selector:
kubeless: zookeeper
Got the same error in my minikube. Forgot to create volumes for my statefulSets.
Created PVC. Need to pay attention to storageClassName, check througt availiable (i did it at dashboard).
{
"kind": "PersistentVolumeClaim",
"apiVersion": "v1",
"metadata": {
"name": "XXXX",
"namespace": "kube-public",
"labels": {
"kubeless": "XXXX"
}
},
"spec": {
"storageClassName": "hostpath",
"accessModes": [
"ReadWriteOnce"
],
"resources": {
"requests": {
"storage": "1Gi"
}
}
}
}
I got persistence volumes.
Then i edited statefulSet:
"volumes": [
{
"name": "XXX",
"persistentVolumeClaim": {
"claimName": "XXX"
}
}
Added "persistentVolumeClaim" attribute, dropped pod, waited until new pod created.

argo workflow-controller can't connect to Kubernetes APIServer

I have installed argo in my own namespace in a central kubernetes cluster in my organization.
After installation when argo "workflow-controller" tries to fetch the configmaps using the API server, I get timeout error.
time="2018-08-15T01:24:40Z" level=fatal msg="Get https://192.168.0.1:443/api/v1/namespaces/2304613691/configmaps/workflow-controller-configmap: dial tcp 192.168.0.1:443: i/o timeout\ngithub.com/argoproj/argo/errors.Wrap\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:87\ngithub.com/argoproj/argo/errors.InternalWrapError\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:70\ngithub.com/argoproj/argo/workflow/controller.(*WorkflowController).ResyncConfig\n\t/root/go/src/github.com/argoproj/argo/workflow/controller/controller.go:295\nmain.Run\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:96\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:750\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).ExecuteC\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:831\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).Execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:784\nmain.main\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:68\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:195\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:2337"
It is trying to access the follwing url: https://192.168.0.1:443/api/v1/namespaces/2304613691/configmaps/workflow-controller-configmap from within the pod container.
I have also modified the kubernetes host config to reflect kubernetes.default and added an open all ingress and egress network policy.
But still the exception is there.
time="2018-08-16T18:23:55Z" level=fatal msg="Get https://kubernetes.default:443/api/v1/namespaces/2304613691/configmaps/workflow-controller-configmap: dial tcp: i/o timeout\ngithub.com/argoproj/argo/errors.Wrap\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:87\ngithub.com/argoproj/argo/errors.InternalWrapError\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:70\ngithub.com/argoproj/argo/workflow/controller.(*WorkflowController).ResyncConfig\n\t/root/go/src/github.com/argoproj/argo/workflow/controller/controller.go:295\nmain.Run\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:96\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:750\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).ExecuteC\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:831\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).Execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:784\nmain.main\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:68\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:195\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:2337"
apiVersion: v1
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
namespace: 2304613691
- apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-ui
namespace: 2304613691
kind: List
---
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
namespace: 2304613691
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- get
- list
- watch
- update
- patch
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-ui-role
namespace: 2304613691
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- get
- list
- watch
kind: List
---
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
namespace: "2304613691"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
namespace: "2304613691"
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-ui-binding
namespace: "2304613691"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-ui-role
subjects:
- kind: ServiceAccount
name: argo-ui
namespace: "2304613691"
kind: List
---
apiVersion: v1
items:
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
generation: 1
name: workflow-controller
namespace: 2304613691
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: workflow-controller
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
command:
- workflow-controller
env:
- name: ARGO_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: <our repo>/sample-agupta34/workflow-controller:v2.1.1
imagePullPolicy: IfNotPresent
name: workflow-controller
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: argo
serviceAccountName: argo
terminationGracePeriodSeconds: 30
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
generation: 1
name: argo-ui
namespace: 2304613691
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: argo-ui
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: argo-ui
spec:
containers:
- env:
- name: ARGO_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: IN_CLUSTER
value: "true"
- name: ENABLE_WEB_CONSOLE
value: "false"
- name: BASE_HREF
value: /
image: <our repo>/sample-agupta34/argoui:v2.1.1
imagePullPolicy: IfNotPresent
name: argo-ui
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: argo-ui
serviceAccountName: argo-ui
terminationGracePeriodSeconds: 30
kind: List
---
apiVersion: v1
data:
config: |
artifactRepository: {}
executorImage: <our repo>/sample-agupta34/argoexec:v2.1.1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
namespace: 2304613691
---
apiVersion: v1
kind: Service
metadata:
name: argo-ui
namespace: 2304613691
labels:
app: argo-ui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8001
selector:
app: argo-ui
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argo-ui
namespace: 2304613691
annotations:
kubernetes.io/ingress.class: "netscaler.v2"
netscaler.applecloud.io/insecure-backend: "true"
spec:
backend:
serviceName: argo-ui
servicePort: 80
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: argo-and-argo-ui-netpol
spec:
podSelector:
matchLabels:
app: workflow-controller
app: argo-ui
ingress:
- {}
egress:
- {}