how to change kube-dns service ip - kubernetes

when install kubernetes dashboard using this command:
kubectl apply -f https://raw.githubusercontent.com/kubernetes/dashboard/v1.10.1/src/deploy/recommended/kubernetes-dashboard.yaml
I check kubernetes service state like this:
kubectl get events --namespace=kube-system
It give me this tip:
LAST SEEN TYPE REASON OBJECT MESSAGE
2m38s Warning ClusterIPOutOfRange service/kube-dns Cluster IP 10.43.0.10 is not within the service CIDR 10.254.0.0/16; please recreate service
2m38s Warning ClusterIPOutOfRange service/metrics-server Cluster IP 10.43.96.112 is not within the service CIDR 10.254.0.0/16; please recreate service
is there any way to change my kube-dns service ip into CIDR range(I search from internet and no one change to ip by hand,should I tweak the kube-dns component config)?This is version info:
[root#iZuf63refzweg1d9dh94t8Z ~]# kubectl version
Client Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.2", GitCommit:"f6278300bebbb750328ac16ee6dd3aa7d3549568", GitTreeState:"clean", BuildDate:"2019-08-05T09:23:26Z", GoVersion:"go1.12.5", Compiler:"gc", Platform:"linux/amd64"}
Server Version: version.Info{Major:"1", Minor:"15", GitVersion:"v1.15.2", GitCommit:"f6278300bebbb750328ac16ee6dd3aa7d3549568", GitTreeState:"clean", BuildDate:"2019-08-05T09:15:22Z", GoVersion:"go1.12.5", Compiler:"gc", Platform:"linux/amd64"}
this is cluster dump:
{
"kind": "NodeList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/nodes",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "EventList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/kube-system/events",
"resourceVersion": "8044226"
},
"items": [
{
"metadata": {
"name": "kube-dns.15bf7a181c6f8459",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/events/kube-dns.15bf7a181c6f8459",
"uid": "3adfac28-ca39-4cc0-a516-08220a4be8b5",
"resourceVersion": "8044216",
"creationTimestamp": "2019-08-29T19:03:59Z"
},
"involvedObject": {
"kind": "Service",
"namespace": "kube-system",
"name": "kube-dns",
"uid": "2256b0f5-80f5-11e9-b3ce-00163e086f0c",
"apiVersion": "v1",
"resourceVersion": "379"
},
"reason": "ClusterIPOutOfRange",
"message": "Cluster IP 10.43.0.10 is not within the service CIDR 10.254.0.0/16; please recreate service",
"source": {
"component": "ipallocator-repair-controller"
},
"firstTimestamp": "2019-08-29T19:03:59Z",
"lastTimestamp": "2019-09-05T15:10:58Z",
"count": 3283,
"type": "Warning",
"eventTime": null,
"reportingComponent": "",
"reportingInstance": ""
},
{
"metadata": {
"name": "metrics-server.15bf7a181c8012e4",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/events/metrics-server.15bf7a181c8012e4",
"uid": "0be9374b-b497-4a9d-86d2-2b75da47b659",
"resourceVersion": "8044217",
"creationTimestamp": "2019-08-29T19:03:59Z"
},
"involvedObject": {
"kind": "Service",
"namespace": "kube-system",
"name": "metrics-server",
"uid": "255955e6-80f5-11e9-b3ce-00163e086f0c",
"apiVersion": "v1",
"resourceVersion": "427"
},
"reason": "ClusterIPOutOfRange",
"message": "Cluster IP 10.43.96.112 is not within the service CIDR 10.254.0.0/16; please recreate service",
"source": {
"component": "ipallocator-repair-controller"
},
"firstTimestamp": "2019-08-29T19:03:59Z",
"lastTimestamp": "2019-09-05T15:10:58Z",
"count": 3283,
"type": "Warning",
"eventTime": null,
"reportingComponent": "",
"reportingInstance": ""
}
]
}
{
"kind": "ReplicationControllerList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/kube-system/replicationcontrollers",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "ServiceList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/kube-system/services",
"resourceVersion": "8044226"
},
"items": [
{
"metadata": {
"name": "kube-dns",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/services/kube-dns",
"uid": "2256b0f5-80f5-11e9-b3ce-00163e086f0c",
"resourceVersion": "379",
"creationTimestamp": "2019-05-28T03:03:14Z",
"labels": {
"addonmanager.kubernetes.io/mode": "Reconcile",
"k8s-app": "kube-dns",
"kubernetes.io/cluster-service": "true",
"kubernetes.io/name": "KubeDNS"
},
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"kube-dns\",\"kubernetes.io/cluster-service\":\"true\",\"kubernetes.io/name\":\"KubeDNS\"},\"name\":\"kube-dns\",\"namespace\":\"kube-system\"},\"spec\":{\"clusterIP\":\"10.43.0.10\",\"ports\":[{\"name\":\"dns\",\"port\":53,\"protocol\":\"UDP\"},{\"name\":\"dns-tcp\",\"port\":53,\"protocol\":\"TCP\"}],\"selector\":{\"k8s-app\":\"kube-dns\"}}}\n"
}
},
"spec": {
"ports": [
{
"name": "dns",
"protocol": "UDP",
"port": 53,
"targetPort": 53
},
{
"name": "dns-tcp",
"protocol": "TCP",
"port": 53,
"targetPort": 53
}
],
"selector": {
"k8s-app": "kube-dns"
},
"clusterIP": "10.43.0.10",
"type": "ClusterIP",
"sessionAffinity": "None"
},
"status": {
"loadBalancer": {}
}
},
{
"metadata": {
"name": "metrics-server",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/services/metrics-server",
"uid": "255955e6-80f5-11e9-b3ce-00163e086f0c",
"resourceVersion": "427",
"creationTimestamp": "2019-05-28T03:03:19Z",
"labels": {
"kubernetes.io/name": "Metrics-server"
},
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{},\"labels\":{\"kubernetes.io/name\":\"Metrics-server\"},\"name\":\"metrics-server\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"port\":443,\"protocol\":\"TCP\",\"targetPort\":443}],\"selector\":{\"k8s-app\":\"metrics-server\"}}}\n"
}
},
"spec": {
"ports": [
{
"protocol": "TCP",
"port": 443,
"targetPort": 443
}
],
"selector": {
"k8s-app": "metrics-server"
},
"clusterIP": "10.43.96.112",
"type": "ClusterIP",
"sessionAffinity": "None"
},
"status": {
"loadBalancer": {}
}
}
]
}
{
"kind": "DaemonSetList",
"apiVersion": "apps/v1",
"metadata": {
"selfLink": "/apis/apps/v1/namespaces/kube-system/daemonsets",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "DeploymentList",
"apiVersion": "apps/v1",
"metadata": {
"selfLink": "/apis/apps/v1/namespaces/kube-system/deployments",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "ReplicaSetList",
"apiVersion": "apps/v1",
"metadata": {
"selfLink": "/apis/apps/v1/namespaces/kube-system/replicasets",
"resourceVersion": "73105"
},
"items": [
{
"metadata": {
"name": "kubernetes-dashboard-7d75c474bb",
"namespace": "kube-system",
"selfLink": "/apis/apps/v1/namespaces/kube-system/replicasets/kubernetes-dashboard-7d75c474bb",
"uid": "1b426257-5d74-4f50-b368-45f65d926fdf",
"resourceVersion": "57629",
"generation": 1,
"creationTimestamp": "2019-08-29T15:11:10Z",
"labels": {
"k8s-app": "kubernetes-dashboard",
"pod-template-hash": "7d75c474bb"
},
"annotations": {
"deployment.kubernetes.io/desired-replicas": "1",
"deployment.kubernetes.io/max-replicas": "2",
"deployment.kubernetes.io/revision": "1"
},
"ownerReferences": [
{
"apiVersion": "apps/v1",
"kind": "Deployment",
"name": "kubernetes-dashboard",
"uid": "c5b2daaa-d306-43b1-ab0a-9745beb865de",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"k8s-app": "kubernetes-dashboard",
"pod-template-hash": "7d75c474bb"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"k8s-app": "kubernetes-dashboard",
"pod-template-hash": "7d75c474bb"
}
},
"spec": {
"volumes": [
{
"name": "kubernetes-dashboard-certs",
"secret": {
"secretName": "kubernetes-dashboard-certs",
"defaultMode": 420
}
},
{
"name": "tmp-volume",
"emptyDir": {}
}
],
"containers": [
{
"name": "kubernetes-dashboard",
"image": "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1",
"args": [
"--auto-generate-certificates"
],
"ports": [
{
"containerPort": 8443,
"protocol": "TCP"
}
],
"resources": {},
"volumeMounts": [
{
"name": "kubernetes-dashboard-certs",
"mountPath": "/certs"
},
{
"name": "tmp-volume",
"mountPath": "/tmp"
}
],
"livenessProbe": {
"httpGet": {
"path": "/",
"port": 8443,
"scheme": "HTTPS"
},
"initialDelaySeconds": 30,
"timeoutSeconds": 30,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 3
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"serviceAccountName": "kubernetes-dashboard",
"serviceAccount": "kubernetes-dashboard",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "node-role.kubernetes.io/master",
"effect": "NoSchedule"
}
]
}
}
},
"status": {
"replicas": 1,
"fullyLabeledReplicas": 1,
"observedGeneration": 1
}
}
]
}
{
"kind": "PodList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/kube-system/pods",
"resourceVersion": "73105"
},
"items": [
{
"metadata": {
"name": "kubernetes-dashboard-7d75c474bb-b2lwd",
"generateName": "kubernetes-dashboard-7d75c474bb-",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/pods/kubernetes-dashboard-7d75c474bb-b2lwd",
"uid": "d4013a3c-7688-4a00-8acf-e5e49c10c772",
"resourceVersion": "57626",
"creationTimestamp": "2019-08-29T15:11:10Z",
"labels": {
"k8s-app": "kubernetes-dashboard",
"pod-template-hash": "7d75c474bb"
},
"ownerReferences": [
{
"apiVersion": "apps/v1",
"kind": "ReplicaSet",
"name": "kubernetes-dashboard-7d75c474bb",
"uid": "1b426257-5d74-4f50-b368-45f65d926fdf",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"volumes": [
{
"name": "kubernetes-dashboard-certs",
"secret": {
"secretName": "kubernetes-dashboard-certs",
"defaultMode": 420
}
},
{
"name": "tmp-volume",
"emptyDir": {}
},
{
"name": "kubernetes-dashboard-token-7k8wl",
"secret": {
"secretName": "kubernetes-dashboard-token-7k8wl",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "kubernetes-dashboard",
"image": "k8s.gcr.io/kubernetes-dashboard-amd64:v1.10.1",
"args": [
"--auto-generate-certificates"
],
"ports": [
{
"containerPort": 8443,
"protocol": "TCP"
}
],
"resources": {},
"volumeMounts": [
{
"name": "kubernetes-dashboard-certs",
"mountPath": "/certs"
},
{
"name": "tmp-volume",
"mountPath": "/tmp"
},
{
"name": "kubernetes-dashboard-token-7k8wl",
"readOnly": true,
"mountPath": "/var/run/secrets/kubernetes.io/serviceaccount"
}
],
"livenessProbe": {
"httpGet": {
"path": "/",
"port": 8443,
"scheme": "HTTPS"
},
"initialDelaySeconds": 30,
"timeoutSeconds": 30,
"periodSeconds": 10,
"successThreshold": 1,
"failureThreshold": 3
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"serviceAccountName": "kubernetes-dashboard",
"serviceAccount": "kubernetes-dashboard",
"securityContext": {},
"schedulerName": "default-scheduler",
"tolerations": [
{
"key": "node-role.kubernetes.io/master",
"effect": "NoSchedule"
}
],
"enableServiceLinks": true
},
"status": {
"phase": "Pending",
"conditions": [
{
"type": "PodScheduled",
"status": "False",
"lastProbeTime": null,
"lastTransitionTime": "2019-08-29T15:11:10Z",
"reason": "Unschedulable",
"message": "no nodes available to schedule pods"
}
],
"qosClass": "BestEffort"
}
}
]
}
==== START logs for container kubernetes-dashboard of pod kube-system/kubernetes-dashboard-7d75c474bb-b2lwd ====
==== END logs for container kubernetes-dashboard of pod kube-system/kubernetes-dashboard-7d75c474bb-b2lwd ====
{
"kind": "EventList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/default/events",
"resourceVersion": "8044226"
},
"items": [
{
"metadata": {
"name": "kubernetes.15bf7a181c3cf6a8",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/events/kubernetes.15bf7a181c3cf6a8",
"uid": "4502f2e2-efd1-4707-b402-bfb16194cd52",
"resourceVersion": "8044213",
"creationTimestamp": "2019-08-29T19:03:59Z"
},
"involvedObject": {
"kind": "Service",
"namespace": "default",
"name": "kubernetes",
"uid": "089121d4-80f5-11e9-b3ce-00163e086f0c",
"apiVersion": "v1",
"resourceVersion": "6"
},
"reason": "ClusterIPOutOfRange",
"message": "Cluster IP 10.43.0.1 is not within the service CIDR 10.254.0.0/16; please recreate service",
"source": {
"component": "ipallocator-repair-controller"
},
"firstTimestamp": "2019-08-29T19:03:59Z",
"lastTimestamp": "2019-09-05T15:10:58Z",
"count": 3283,
"type": "Warning",
"eventTime": null,
"reportingComponent": "",
"reportingInstance": ""
}
]
}
{
"kind": "ReplicationControllerList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/default/replicationcontrollers",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "ServiceList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/default/services",
"resourceVersion": "8044226"
},
"items": [
{
"metadata": {
"name": "kubernetes",
"namespace": "default",
"selfLink": "/api/v1/namespaces/default/services/kubernetes",
"uid": "089121d4-80f5-11e9-b3ce-00163e086f0c",
"resourceVersion": "6",
"creationTimestamp": "2019-05-28T03:02:31Z",
"labels": {
"component": "apiserver",
"provider": "kubernetes"
}
},
"spec": {
"ports": [
{
"name": "https",
"protocol": "TCP",
"port": 443,
"targetPort": 6443
}
],
"clusterIP": "10.43.0.1",
"type": "ClusterIP",
"sessionAffinity": "None"
},
"status": {
"loadBalancer": {}
}
}
]
}
{
"kind": "DaemonSetList",
"apiVersion": "apps/v1",
"metadata": {
"selfLink": "/apis/apps/v1/namespaces/default/daemonsets",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "DeploymentList",
"apiVersion": "apps/v1",
"metadata": {
"selfLink": "/apis/apps/v1/namespaces/default/deployments",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "ReplicaSetList",
"apiVersion": "apps/v1",
"metadata": {
"selfLink": "/apis/apps/v1/namespaces/default/replicasets",
"resourceVersion": "73105"
},
"items": []
}
{
"kind": "PodList",
"apiVersion": "v1",
"metadata": {
"selfLink": "/api/v1/namespaces/default/pods",
"resourceVersion": "73105"
},
"items": []
}
Cluster info dumped to standard output

Related

How to get the output via jsonpath with kubectl

I run kubectl get events to get the events details, now I'd like to do a fuzzy search to get the particular pods with prefix nginx-*
Suppose, I have this output as below
$ kubectl get events -o json
{
"apiVersion": "v1",
"items": [
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-03-12T06:18:58Z",
"involvedObject": {
"apiVersion": "v1",
"kind": "Pod",
"name": "nginx-6db489d4b7-99xmd",
"namespace": "default",
"resourceVersion": "9683",
"uid": "64f6eeb1-c267-4ee1-b34d-14e65573d63f"
},
"kind": "Event",
"lastTimestamp": "2020-03-12T06:18:58Z",
"message": "Successfully assigned default/nginx-6db489d4b7-99xmd to kind-worker3",
"metadata": {
"creationTimestamp": "2020-03-12T06:18:58Z",
"name": "nginx-6db489d4b7-99xmd.15fb7a182197a184",
"namespace": "default",
"resourceVersion": "9703",
"selfLink": "/api/v1/namespaces/default/events/nginx-6db489d4b7-99xmd.15fb7a182197a184",
"uid": "de0ff737-e4d6-4218-b441-26c68a1ee8bd"
},
"reason": "Scheduled",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "default-scheduler"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-03-12T06:18:59Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{nginx}",
"kind": "Pod",
"name": "nginx-6db489d4b7-99xmd",
"namespace": "default",
"resourceVersion": "9693",
"uid": "64f6eeb1-c267-4ee1-b34d-14e65573d63f"
},
"kind": "Event",
"lastTimestamp": "2020-03-12T06:18:59Z",
"message": "Pulling image \"nginx\"",
"metadata": {
"creationTimestamp": "2020-03-12T06:18:59Z",
"name": "nginx-6db489d4b7-99xmd.15fb7a18754d0bfc",
"namespace": "default",
"resourceVersion": "9709",
"selfLink": "/api/v1/namespaces/default/events/nginx-6db489d4b7-99xmd.15fb7a18754d0bfc",
"uid": "d541f134-5e9c-4b7f-b035-ae4d49a3745f"
},
"reason": "Pulling",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "kind-worker3"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-03-12T06:18:26Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{nginx}",
"kind": "Pod",
"name": "nginx",
"namespace": "default",
"resourceVersion": "9555",
"uid": "f9d0ae86-4d7d-4553-91c2-efc0c3f8144f"
},
"kind": "Event",
"lastTimestamp": "2020-03-12T06:18:26Z",
"message": "Pulling image \"nginx\"",
"metadata": {
"creationTimestamp": "2020-03-12T06:18:26Z",
"name": "nginx.15fb7a10b4975ae0",
"namespace": "default",
"resourceVersion": "9565",
"selfLink": "/api/v1/namespaces/default/events/nginx.15fb7a10b4975ae0",
"uid": "f66cf712-1284-4f65-895a-5fbfa974e317"
},
"reason": "Pulling",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "kind-worker"
},
"type": "Normal"
},
{
"apiVersion": "v1",
"count": 1,
"eventTime": null,
"firstTimestamp": "2020-03-12T06:18:38Z",
"involvedObject": {
"apiVersion": "v1",
"fieldPath": "spec.containers{nginx}",
"kind": "Pod",
"name": "nginx",
"namespace": "default",
"resourceVersion": "9555",
"uid": "f9d0ae86-4d7d-4553-91c2-efc0c3f8144f"
},
"kind": "Event",
"lastTimestamp": "2020-03-12T06:18:38Z",
"message": "Successfully pulled image \"nginx\"",
"metadata": {
"creationTimestamp": "2020-03-12T06:18:38Z",
"name": "nginx.15fb7a13a4aed9fc",
"namespace": "default",
"resourceVersion": "9613",
"selfLink": "/api/v1/namespaces/default/events/nginx.15fb7a13a4aed9fc",
"uid": "55a4a512-d5c0-41da-ae9c-c1654b6bbdfe"
},
"reason": "Pulled",
"reportingComponent": "",
"reportingInstance": "",
"source": {
"component": "kubelet",
"host": "kind-worker"
},
"type": "Normal"
}
],
"kind": "List",
"metadata": {
"resourceVersion": "",
"selfLink": ""
}
}
I'd like to get the messages from pod nginx-* only.
$ kubectl get events -o=jsonpath='{.items[*].involvedObject}'
But I am not sure how to check with name if it is nginx-* and then export its messages
"involvedObject": {
"apiVersion": "v1",
"kind": "Pod",
"name": "nginx-6db489d4b7-99xmd",
"namespace": "default",
"resourceVersion": "9683",
"uid": "64f6eeb1-c267-4ee1-b34d-14e65573d63f"
},
"message": "Successfully assigned default/nginx-6db489d4b7-99xmd to kind-worker3",
Kubectl jsonpath implementation doesn't support regexp matching so its not possible to achieve using only this tool (Take a look at this github issue). Fortunately you can always use jq to filter events, take a look for example below.
kubectl get events -ojson | jq '.items[] | select(.involvedObject.name | test("^ngin-")) | .message'

prometheus do not pull data from treafik service in kubernetes cluster

I am using prometheus(quay.azk8s.cn/prometheus/prometheus:v2.15.2) to monitor traefik 2.1.6 in kubernetes monitoring namespace,now I am make traefik expose metics and I could using curl command to get config from http://traefik-ip:8080/metrics,but prometheus do not pull data.I already added annotation to treafik service yaml in kubernetes kube-system namespace,this is the prometheus service config:
{
"kind": "StatefulSet",
"apiVersion": "apps/v1beta2",
"metadata": {
"name": "prometheus-k8s",
"namespace": "monitoring",
"selfLink": "/apis/apps/v1beta2/namespaces/monitoring/statefulsets/prometheus-k8s",
"uid": "4190d704-aa3b-40da-ab99-bac3cb10f186",
"resourceVersion": "18281285",
"generation": 7,
"creationTimestamp": "2020-03-04T16:31:01Z",
"labels": {
"prometheus": "k8s"
},
"annotations": {
"prometheus-operator-input-hash": "4895445337133709592"
},
"ownerReferences": [
{
"apiVersion": "monitoring.coreos.com/v1",
"kind": "Prometheus",
"name": "k8s",
"uid": "ddf7e48d-f982-4881-9312-0d50466870a9",
"controller": true,
"blockOwnerDeletion": true
}
]
},
"spec": {
"replicas": 2,
"selector": {
"matchLabels": {
"app": "prometheus",
"prometheus": "k8s"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "prometheus",
"prometheus": "k8s"
}
},
"spec": {
"volumes": [
{
"name": "config",
"secret": {
"secretName": "prometheus-k8s",
"defaultMode": 420
}
},
{
"name": "tls-assets",
"secret": {
"secretName": "prometheus-k8s-tls-assets",
"defaultMode": 420
}
},
{
"name": "config-out",
"emptyDir": {}
},
{
"name": "prometheus-k8s-rulefiles-0",
"configMap": {
"name": "prometheus-k8s-rulefiles-0",
"defaultMode": 420
}
},
{
"name": "prometheus-k8s-db",
"emptyDir": {}
}
],
"containers": [
{
"name": "prometheus",
"image": "quay.azk8s.cn/prometheus/prometheus:v2.15.2",
"args": [
"--web.console.templates=/etc/prometheus/consoles",
"--web.console.libraries=/etc/prometheus/console_libraries",
"--config.file=/etc/prometheus/config_out/prometheus.env.yaml",
"--storage.tsdb.path=/prometheus",
"--storage.tsdb.retention.time=24h",
"--web.enable-lifecycle",
"--storage.tsdb.no-lockfile",
"--web.route-prefix=/"
],
"ports": [
{
"name": "web",
"containerPort": 9090,
"protocol": "TCP"
}
],
"resources": {
"requests": {
"memory": "400Mi"
}
},
"volumeMounts": [
{
"name": "config-out",
"readOnly": true,
"mountPath": "/etc/prometheus/config_out"
},
{
"name": "tls-assets",
"readOnly": true,
"mountPath": "/etc/prometheus/certs"
},
{
"name": "prometheus-k8s-db",
"mountPath": "/prometheus"
},
{
"name": "prometheus-k8s-rulefiles-0",
"mountPath": "/etc/prometheus/rules/prometheus-k8s-rulefiles-0"
}
],
"livenessProbe": {
"httpGet": {
"path": "/-/healthy",
"port": "web",
"scheme": "HTTP"
},
"timeoutSeconds": 3,
"periodSeconds": 5,
"successThreshold": 1,
"failureThreshold": 6
},
"readinessProbe": {
"httpGet": {
"path": "/-/ready",
"port": "web",
"scheme": "HTTP"
},
"timeoutSeconds": 3,
"periodSeconds": 5,
"successThreshold": 1,
"failureThreshold": 120
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "FallbackToLogsOnError",
"imagePullPolicy": "IfNotPresent"
},
{
"name": "prometheus-config-reloader",
"image": "quay.azk8s.cn/coreos/prometheus-config-reloader:v0.37.0",
"command": [
"/bin/prometheus-config-reloader"
],
"args": [
"--log-format=logfmt",
"--reload-url=http://localhost:9090/-/reload",
"--config-file=/etc/prometheus/config/prometheus.yaml.gz",
"--config-envsubst-file=/etc/prometheus/config_out/prometheus.env.yaml"
],
"env": [
{
"name": "POD_NAME",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.name"
}
}
}
],
"resources": {
"limits": {
"cpu": "100m",
"memory": "25Mi"
},
"requests": {
"cpu": "100m",
"memory": "25Mi"
}
},
"volumeMounts": [
{
"name": "config",
"mountPath": "/etc/prometheus/config"
},
{
"name": "config-out",
"mountPath": "/etc/prometheus/config_out"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "FallbackToLogsOnError",
"imagePullPolicy": "IfNotPresent"
},
{
"name": "rules-configmap-reloader",
"image": "jimmidyson/configmap-reload:v0.3.0",
"args": [
"--webhook-url=http://localhost:9090/-/reload",
"--volume-dir=/etc/prometheus/rules/prometheus-k8s-rulefiles-0"
],
"resources": {
"limits": {
"cpu": "100m",
"memory": "25Mi"
},
"requests": {
"cpu": "100m",
"memory": "25Mi"
}
},
"volumeMounts": [
{
"name": "prometheus-k8s-rulefiles-0",
"mountPath": "/etc/prometheus/rules/prometheus-k8s-rulefiles-0"
}
],
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "FallbackToLogsOnError",
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 600,
"dnsPolicy": "ClusterFirst",
"nodeSelector": {
"kubernetes.io/os": "linux"
},
"serviceAccountName": "prometheus-k8s",
"serviceAccount": "prometheus-k8s",
"securityContext": {
"runAsUser": 1000,
"runAsNonRoot": true,
"fsGroup": 2000
},
"schedulerName": "default-scheduler"
}
},
"serviceName": "prometheus-operated",
"podManagementPolicy": "Parallel",
"updateStrategy": {
"type": "RollingUpdate"
},
"revisionHistoryLimit": 10
},
"status": {
"observedGeneration": 7,
"replicas": 2,
"readyReplicas": 2,
"currentReplicas": 2,
"updatedReplicas": 2,
"currentRevision": "prometheus-k8s-6f76f69569",
"updateRevision": "prometheus-k8s-6f76f69569",
"collisionCount": 0
}
}
this is the config:
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "traefik",
"namespace": "kube-system",
"selfLink": "/api/v1/namespaces/kube-system/services/traefik",
"uid": "b2695279-2467-4480-aab5-a720a43951c1",
"resourceVersion": "18280221",
"creationTimestamp": "2020-01-29T10:26:34Z",
"annotations": {
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"v1\",\"kind\":\"Service\",\"metadata\":{\"annotations\":{\"prometheus.io/port\":\"8080\",\"prometheus.io/scrape\":\"true\"},\"name\":\"traefik\",\"namespace\":\"kube-system\"},\"spec\":{\"ports\":[{\"name\":\"web\",\"port\":80},{\"name\":\"websecure\",\"port\":443},{\"name\":\"metrics\",\"port\":8080}],\"selector\":{\"app\":\"traefik\"}}}\n",
"prometheus.io/port": "8080",
"prometheus.io/scrape": "true"
}
},
"spec": {
"ports": [
{
"name": "web",
"protocol": "TCP",
"port": 80,
"targetPort": 80
},
{
"name": "websecure",
"protocol": "TCP",
"port": 443,
"targetPort": 443
},
{
"name": "metrics",
"protocol": "TCP",
"port": 8080,
"targetPort": 8080
}
],
"selector": {
"app": "traefik"
},
"clusterIP": "10.254.169.66",
"type": "ClusterIP",
"sessionAffinity": "None"
},
"status": {
"loadBalancer": {}
}
}
I read some docs give tips that I should config the pull task in kubernetes(v1.15.2) config map like this:
apiVersion: v1
kind: ConfigMap
metadata:
name: prometheus-config
namespace: kube-ops
data:
prometheus.yml: |
global:
scrape_interval: 30s
scrape_timeout: 30s
scrape_configs:
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
- job_name: 'traefik'
static_configs:
- targets: ['traefik-ingress-service.kube-system.svc.cluster.local:8080']
and I add the config in my prometheus yaml.Am I missing something? I do this steps:
expose traefik metrics url(success)
add annotation to my traefik service(success)
but the metrics data not collected,I am stucking in this problem for 2 days,what should I do to make it work? This is my prometheus's service discovery dashboard:
But when I query data from prometheus I found nothing.
http_requests_total{job="traefik"}
Pay attention new version(v2.1.6) of treafik's request query to check pull data is:
traefik_entrypoint_requests_total{job="traefik"}
you can see the prometheus pulled data successful.

Kubernetes metrics-server unable to add metric-resolution flag

I am using kubernetes v1.9.7-gke.6. I am trying to edit the metrics-server deployment yaml and add --metric-resolution flag, when I add the flag and save the change I see on the terminal that the edit was successful. When I edit again the metrics-server deployment file the flag that I added it doesn't exist. Is there any way to edit the metrics server deployment yaml?
Here is the deployment , its the default that created when I create a new kuberentes cluster at google cloud.
{
"apiVersion": "extensions/v1beta1",
"kind": "Deployment",
"metadata": {
"annotations": {
"deployment.kubernetes.io/revision": "12",
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"extensions/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"labels\":{\"addonmanager.kubernetes.io/mode\":\"Reconcile\",\"k8s-app\":\"metrics-server\",\"kubernetes.io/cluster-service\":\"true\",\"version\":\"v0.2.1\"},\"name\":\"metrics-server-v0.2.1\",\"namespace\":\"kube-system\"},\"spec\":{\"selector\":{\"matchLabels\":{\"k8s-app\":\"metrics-server\",\"version\":\"v0.2.1\"}},\"template\":{\"metadata\":{\"annotations\":{\"scheduler.alpha.kubernetes.io/critical-pod\":\"\"},\"labels\":{\"k8s-app\":\"metrics-server\",\"version\":\"v0.2.1\"},\"name\":\"metrics-server\"},\"spec\":{\"containers\":[{\"command\":[\"/metrics-server\",\"--source=kubernetes.summary_api:''\"],\"image\":\"gcr.io/google_containers/metrics-server-amd64:v0.2.1\",\"name\":\"metrics-server\",\"ports\":[{\"containerPort\":443,\"name\":\"https\",\"protocol\":\"TCP\"}]},{\"command\":[\"/pod_nanny\",\"--config-dir=/etc/config\",\"--cpu=40m\",\"--extra-cpu=0.5m\",\"--memory=40Mi\",\"--extra-memory=4Mi\",\"--threshold=5\",\"--deployment=metrics-server-v0.2.1\",\"--container=metrics-server\",\"--poll-period=300000\",\"--estimator=exponential\"],\"env\":[{\"name\":\"MY_POD_NAME\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.name\"}}},{\"name\":\"MY_POD_NAMESPACE\",\"valueFrom\":{\"fieldRef\":{\"fieldPath\":\"metadata.namespace\"}}}],\"image\":\"gcr.io/google_containers/addon-resizer:1.8.1\",\"name\":\"metrics-server-nanny\",\"resources\":{\"limits\":{\"cpu\":\"100m\",\"memory\":\"300Mi\"},\"requests\":{\"cpu\":\"5m\",\"memory\":\"50Mi\"}},\"volumeMounts\":[{\"mountPath\":\"/etc/config\",\"name\":\"metrics-server-config-volume\"}]}],\"serviceAccountName\":\"metrics-server\",\"tolerations\":[{\"key\":\"CriticalAddonsOnly\",\"operator\":\"Exists\"}],\"volumes\":[{\"configMap\":{\"name\":\"metrics-server-config\"},\"name\":\"metrics-server-config-volume\"}]}}}}\n"
},
"creationTimestamp": "2018-09-20T13:04:03Z",
"generation": 14,
"labels": {
"addonmanager.kubernetes.io/mode": "Reconcile",
"k8s-app": "metrics-server",
"kubernetes.io/cluster-service": "true",
"version": "v0.2.1"
},
"name": "metrics-server-v0.2.1",
"namespace": "kube-system",
"resourceVersion": "822513",
"selfLink": "/apis/extensions/v1beta1/namespaces/kube-system/deployments/metrics-server-v0.2.1",
"uid": "a5cd1f4c-bcd5-11e8-9313-42010a80005f"
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"k8s-app": "metrics-server",
"version": "v0.2.1"
}
},
"strategy": {
"rollingUpdate": {
"maxSurge": 1,
"maxUnavailable": 1
},
"type": "RollingUpdate"
},
"template": {
"metadata": {
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"creationTimestamp": null,
"labels": {
"k8s-app": "metrics-server",
"version": "v0.2.1"
},
"name": "metrics-server"
},
"spec": {
"containers": [
{
"command": [
"/metrics-server",
"--source=kubernetes.summary_api:''"
],
"image": "gcr.io/google_containers/metrics-server-amd64:v0.2.1",
"imagePullPolicy": "IfNotPresent",
"name": "metrics-server",
"ports": [
{
"containerPort": 443,
"name": "https",
"protocol": "TCP"
}
],
"resources": {
"limits": {
"cpu": "48m",
"memory": "104Mi"
},
"requests": {
"cpu": "48m",
"memory": "104Mi"
}
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File"
},
{
"command": [
"/pod_nanny",
"--config-dir=/etc/config",
"--cpu=40m",
"--extra-cpu=0.5m",
"--memory=40Mi",
"--extra-memory=4Mi",
"--threshold=5",
"--deployment=metrics-server-v0.2.1",
"--container=metrics-server",
"--poll-period=300000",
"--estimator=exponential"
],
"env": [
{
"name": "MY_POD_NAME",
"valueFrom": {
"fieldRef": {
"apiVersion": "v1",
"fieldPath": "metadata.name"
}
}
},
{
"name": "MY_POD_NAMESPACE",
"valueFrom": {
{
"apiVersion": "v1",
"fieldPath": "metadata.namespace"
}
}
}
],
"image": "gcr.io/google_containers/addon-resizer:1.8.1",
"imagePullPolicy": "IfNotPresent",
"name": "metrics-server-nanny",
"resources": {
"limits": {
"cpu": "100m",
"memory": "300Mi"
},
"requests": {
"cpu": "5m",
"memory": "50Mi"
}
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"volumeMounts": [
{
"mountPath": "/etc/config",
"name": "metrics-server-config-volume"
}
]
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"schedulerName": "default-scheduler",
"securityContext": {},
"serviceAccount": "metrics-server",
"serviceAccountName": "metrics-server",
"terminationGracePeriodSeconds": 30,
"tolerations": [
{
"key": "CriticalAddonsOnly",
"operator": "Exists"
}
],
"volumes": [
{
"configMap": {
"defaultMode": 420,
"name": "metrics-server-config"
},
"name": "metrics-server-config-volume"
}
]
}
}
},
"status": {
"availableReplicas": 1,
"conditions": [
{
"lastTransitionTime": "2018-09-20T13:04:03Z",
"lastUpdateTime": "2018-09-20T13:04:03Z",
"message": "Deployment has minimum availability.",
"reason": "MinimumReplicasAvailable",
"status": "True",
"type": "Available"
}
],
"observedGeneration": 14,
"readyReplicas": 1,
"replicas": 1,
"updatedReplicas": 1
}
}
Editing the yaml/flags of anything in kube-system on GKE (Google Kubernetes Engine) will not work as it will get reverted by the master. So, that part is working as intended.
It looks like fluentd which is auto-managed by GKE for logging, is what is causing the changes to get reverted. So the option I can think of, would be to disable the gke addons (ie cloud logging), and roll your own fluentd daemonset, and then configure things yourself. I will recommend you to visit this discussion for more information
Additionally, I will request you to take a look into this guide, if you'd like to roll your own fluentd on your cluster as well.

Kubernetes rolling update not working

I have 2 kubernetes installs for different projects that as best as I can see have equivalent config in the areas that matter yet the 2 perform rolling updates differently.
Both were installed on AWS using kops.
System 1 (k8s v1.7.0) - Kill pod in a deployment using k8s web gui, new pod is created first and then once running will terminate old pod. No downtime.
System 2 (k8s v1.8.4) - Kill pod in a deployment using k8s web gui, old pod is killed instantly and then new pod is created. Causes brief downtime.
Any suggestions or ideas as to why they behave differently and how I can get system 2 to create new pod before terminating the old one?
System 1 deployment
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "proxy-deployment",
"namespace": "namespace",
"selfLink": "/apis/extensions/v1beta1/namespaces/namespace/deployments/proxy-deployment",
"uid": "d12778ba-8950-11e7-9e69-12f38e55b21a",
"resourceVersion": "31538492",
"generation": 7,
"creationTimestamp": "2017-08-25T04:49:45Z",
"labels": {
"app": "proxy"
},
"annotations": {
"deployment.kubernetes.io/revision": "6",
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"name\":\"proxy-deployment\",\"namespace\":\"namespace\"},\"spec\":{\"replicas\":2,\"template\":{\"metadata\":{\"labels\":{\"app\":\"proxy\"}},\"spec\":{\"containers\":[{\"image\":\"xxxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/nginx-proxy-xxxxxx:latest\",\"name\":\"proxy-ctr\",\"ports\":[{\"containerPort\":80},{\"containerPort\":8080}]}]}}}}\n"
}
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"app": "proxy"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "proxy",
"date": "1522386390"
}
},
"spec": {
"containers": [
{
"name": "proxy-ctr",
"image": "xxxxxxxxxxx.dkr.ecr.us-east-1.amazonaws.com/nginx-proxy-xxxxxx:latest",
"ports": [
{
"containerPort": 80,
"protocol": "TCP"
},
{
"containerPort": 8080,
"protocol": "TCP"
}
],
"resources": {},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {},
"schedulerName": "default-scheduler"
}
},
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": "25%",
"maxSurge": "25%"
}
},
"revisionHistoryLimit": 2,
"progressDeadlineSeconds": 600
},
"status": {
"observedGeneration": 7,
"replicas": 1,
"updatedReplicas": 1,
"readyReplicas": 1,
"availableReplicas": 1,
"conditions": [
{
"type": "Progressing",
"status": "True",
"lastUpdateTime": "2018-03-30T05:03:01Z",
"lastTransitionTime": "2017-08-25T04:49:45Z",
"reason": "NewReplicaSetAvailable",
"message": "ReplicaSet \"proxy-deployment-1457650622\" has successfully progressed."
},
{
"type": "Available",
"status": "True",
"lastUpdateTime": "2018-06-01T06:55:12Z",
"lastTransitionTime": "2018-06-01T06:55:12Z",
"reason": "MinimumReplicasAvailable",
"message": "Deployment has minimum availability."
}
]
}
}
System 2 Deployment
{
"kind": "Deployment",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "prodefault-deployment",
"namespace": "namespace",
"selfLink": "/apis/extensions/v1beta1/namespaces/namespace/deployments/prodefault-deployment",
"uid": "a80528c8-eb79-11e7-9364-068125440f70",
"resourceVersion": "25203392",
"generation": 10,
"creationTimestamp": "2017-12-28T02:49:00Z",
"labels": {
"app": "prodefault"
},
"annotations": {
"deployment.kubernetes.io/revision": "7",
"kubectl.kubernetes.io/last-applied-configuration": "{\"apiVersion\":\"apps/v1beta1\",\"kind\":\"Deployment\",\"metadata\":{\"annotations\":{},\"name\":\"prodefault-deployment\",\"namespace\":\"namespace\"},\"spec\":{\"replicas\":1,\"strategy\":{\"rollingUpdate\":{\"maxSurge\":\"25%\",\"maxUnavailable\":\"25%\"},\"type\":\"RollingUpdate\"},\"template\":{\"metadata\":{\"labels\":{\"app\":\"prodefault\"}},\"spec\":{\"containers\":[{\"image\":\"xxxxxxxxxxxx.dkr.ecr.us-west-2.amazonaws.com/xxxxxxxxxxx-pro-default:latest\",\"livenessProbe\":{\"httpGet\":{\"path\":\"/healthchk\",\"port\":80},\"initialDelaySeconds\":120,\"periodSeconds\":15,\"timeoutSeconds\":1},\"name\":\"prodefault-ctr\",\"ports\":[{\"containerPort\":80}],\"readinessProbe\":{\"httpGet\":{\"path\":\"/healthchk\",\"port\":80},\"initialDelaySeconds\":5,\"periodSeconds\":2,\"timeoutSeconds\":3},\"resources\":{\"limits\":{\"cpu\":\"1\",\"memory\":\"1024Mi\"},\"requests\":{\"cpu\":\"150m\",\"memory\":\"256Mi\"}},\"volumeMounts\":[{\"mountPath\":\"/var/www/html/homes\",\"name\":\"efs-pvc\"},{\"mountPath\":\"/var/xero\",\"name\":\"xero-key\",\"readOnly\":true},{\"mountPath\":\"/var/gcal\",\"name\":\"gcal-json\",\"readOnly\":true}]}],\"volumes\":[{\"name\":\"efs-pvc\",\"persistentVolumeClaim\":{\"claimName\":\"tio-pv-claim-homes\"}},{\"name\":\"xero-key\",\"secret\":{\"secretName\":\"xero-key\"}},{\"name\":\"gcal-json\",\"secret\":{\"secretName\":\"gcaljson\"}}]}}}}\n"
}
},
"spec": {
"replicas": 1,
"selector": {
"matchLabels": {
"app": "prodefault"
}
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"app": "prodefault"
}
},
"spec": {
"volumes": [
{
"name": "efs-pvc",
"persistentVolumeClaim": {
"claimName": "tio-pv-claim-homes"
}
},
{
"name": "xero-key",
"secret": {
"secretName": "xero-key",
"defaultMode": 420
}
},
{
"name": "gcal-json",
"secret": {
"secretName": "gcaljson",
"defaultMode": 420
}
}
],
"containers": [
{
"name": "prodefault-ctr",
"image": "xxxxxxxxxxxx.dkr.ecr.us-west-2.amazonaws.com/xxxxxxxxxxx-pro-default:latest",
"ports": [
{
"containerPort": 80,
"protocol": "TCP"
}
],
"resources": {
"limits": {
"cpu": "1",
"memory": "1Gi"
},
"requests": {
"cpu": "150m",
"memory": "256Mi"
}
},
"volumeMounts": [
{
"name": "efs-pvc",
"mountPath": "/var/www/html/homes"
},
{
"name": "xero-key",
"readOnly": true,
"mountPath": "/var/xero"
},
{
"name": "gcal-json",
"readOnly": true,
"mountPath": "/var/gcal"
}
],
"livenessProbe": {
"httpGet": {
"path": "/healthchk",
"port": 80,
"scheme": "HTTP"
},
"initialDelaySeconds": 120,
"timeoutSeconds": 1,
"periodSeconds": 15,
"successThreshold": 1,
"failureThreshold": 3
},
"readinessProbe": {
"httpGet": {
"path": "/healthchk",
"port": 80,
"scheme": "HTTP"
},
"initialDelaySeconds": 5,
"timeoutSeconds": 3,
"periodSeconds": 2,
"successThreshold": 1,
"failureThreshold": 3
},
"terminationMessagePath": "/dev/termination-log",
"terminationMessagePolicy": "File",
"imagePullPolicy": "Always"
}
],
"restartPolicy": "Always",
"terminationGracePeriodSeconds": 30,
"dnsPolicy": "ClusterFirst",
"securityContext": {},
"schedulerName": "default-scheduler"
}
},
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": "25%",
"maxSurge": "25%"
}
},
"revisionHistoryLimit": 2,
"progressDeadlineSeconds": 600
},
"status": {
"observedGeneration": 10,
"replicas": 1,
"updatedReplicas": 1,
"readyReplicas": 1,
"availableReplicas": 1,
"conditions": [
{
"type": "Progressing",
"status": "True",
"lastUpdateTime": "2018-01-15T06:07:52Z",
"lastTransitionTime": "2017-12-28T03:00:16Z",
"reason": "NewReplicaSetAvailable",
"message": "ReplicaSet \"prodefault-deployment-9685f46d4\" has successfully progressed."
},
{
"type": "Available",
"status": "True",
"lastUpdateTime": "2018-06-13T07:12:41Z",
"lastTransitionTime": "2018-06-13T07:12:41Z",
"reason": "MinimumReplicasAvailable",
"message": "Deployment has minimum availability."
}
]
}
}
I noticed both pods have the following rolling update stratege defined:
"strategy": {
"type": "RollingUpdate",
"rollingUpdate": {
"maxUnavailable": "25%",
"maxSurge": "25%"
}
},
In this way, It should be terminating the old pod after new pod created in a normal rolling update through 'set image' or 'kubectl apply'.
So the different behavior between two systems maybe come from the dashboard. I guess you are running dashboard with different version in your two system, since according to the compatibility metrix of dashboard, the kubernetes v1.7 needs dashboard 1.7 to support, while the kubernetes v1.8 needs dashboard 1.8 to support. Maybe version different dashboard treat 'kill pod' as different action, I don't know.
Or if you are running dashboard 1.7 in your v1.8 system, then try to upgrade your dashbord at first.
And lastly, don't use 'kill pod' to do rolling update.

How do I do this deployment by command line

I can do a deploy like this, but cannot do it via command line.
I was looking at doing it like this
kubectl create -f kubernetes-rc.json
{
"kind": "ReplicationController",
"apiVersion": "v1",
"metadata": {
"name": "foo-frontend-rc",
"labels": {
"www": true
},
"namespace": "foo"
},
"spec": {
"replicas": 1,
"template": {
"metadata": {
"labels": {
"app": "foo-frontend"
}
},
"spec": {
"containers": [
{
"name": "foo-frontend",
"image": "gcr.io/atomic-griffin-130023/foo-frontend:b3fc862",
"ports": [
{
"containerPort": 3009,
"protocol": "TCP"
}
],
"imagePullPolicy": "IfNotPresent"
}
],
"restartPolicy": "Always",
"dnsPolicy": "ClusterFirst"
}
}
}
}
and
kubectl create -f kubernetes-service.json
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo-frontend-service"
},
"spec": {
"selector": {
"app": "foo-frontend-rc"
},
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 3009
}
]
}
}
to no avail. It creates the rc, but it won’t expose the service externally.
Your service's selector is wrong. It should be selecting a label from the pod template, not a label on the RC itself.
If you change the following in your service:
"selector": {
"app": "foo-frontend-rc"
},
to:
"selector": {
"app": "foo-frontend"
},
It should fix it.
Update
Change your service definition to
{
"kind": "Service",
"apiVersion": "v1",
"metadata": {
"name": "foo-frontend-service"
},
"spec": {
"selector": {
"app": "foo-frontend"
},
"ports": [
{
"protocol": "TCP",
"port": 80,
"targetPort": 3009,
"nodePort": 30009
}
],
"type": "LoadBalancer"
}
}