I have setup Ingress controller:
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: {{ template "mychart.fullname" . }}-app
annotations:
# type of authentication [basic|digest]
nginx.ingress.kubernetes.io/auth-type: basic
# name of the secret that contains the user/password definitions
nginx.ingress.kubernetes.io/auth-secret: {{ template "mychart.fullname" . }}-myauthsecret
# message to display with an appropriate context why the authentication is required
nginx.ingress.kubernetes.io/auth-realm: "Authentication Required - foo"
spec:
rules:
- host: "test.example.com"
http:
paths:
- path: /
backend:
serviceName: {{ template "mychart.fullname" . }}-app
servicePort: 80
But, when I test it, I get connection refused:
curl -H 'Host: test.example.com' http://{public ip}/
When I test it on machine, where cluster run, it works properly:
curl -H 'Host: test.example.com' https://10.96.183.247/
10.96.183.247 is local cluster IP
Thank you for comments, I havent noticed, I had no Nginx ingress controller installed on new baremetal.
Here is missing part, ingress with hostport:
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
---
kind: ConfigMap
apiVersion: v1
metadata:
name: nginx-configuration
namespace: ingress-nginx
---
# tcp-services-configmap
kind: ConfigMap
apiVersion: v1
metadata:
name: tcp-services
namespace: ingress-nginx
---
# udp-services-configmap
kind: ConfigMap
apiVersion: v1
metadata:
name: udp-services
namespace: ingress-nginx
# rbac start
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: nginx-ingress-clusterrole
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: nginx-ingress-role
namespace: ingress-nginx
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: nginx-ingress-role-nisa-binding
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: nginx-ingress-role
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: nginx-ingress-clusterrole-nisa-binding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: nginx-ingress-clusterrole
subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
# rbac end
# with-rbac start
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-ingress-controller
namespace: ingress-nginx
spec:
replicas: 1
selector:
matchLabels:
app: ingress-nginx
template:
metadata:
labels:
app: ingress-nginx
annotations:
prometheus.io/port: '10254'
prometheus.io/scrape: 'true'
spec:
serviceAccountName: nginx-ingress-serviceaccount
containers:
- name: nginx-ingress-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.14.0
args:
- /nginx-ingress-controller
- --default-backend-service=$(POD_NAMESPACE)/default-http-backend
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --annotations-prefix=nginx.ingress.kubernetes.io
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
hostPort: 80 # !!!!!!
- name: https
containerPort: 443
hostPort: 443 # !!!!!!
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
securityContext:
runAsNonRoot: false
---
# with-rbac end
# default-backend start
apiVersion: apps/v1
kind: Deployment
metadata:
name: default-http-backend
namespace: ingress-nginx
spec:
selector:
matchLabels:
app: default-http-backend
template:
metadata:
labels:
app: default-http-backend
spec:
terminationGracePeriodSeconds: 60
containers:
- name: default-http-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: gcr.io/google_containers/defaultbackend:1.4
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
namespace: ingress-nginx
spec:
selector:
app: default-http-backend
ports:
- port: 80
targetPort: 8080
---
# default-backend end
Related
We are working with Akka Kubernetes API to connect to other pods in the Kubernetes cluster(AWS EKS) to form a cluster. But we are getting the following error when the token is being read
[ERROR] 2022-12-01 12:05:53.635+0000 [cloud-poi-ha-akka.actor.default-dispatcher-11] a.d.k.KubernetesApiServiceDiscovery - Error reading api-token from /var/run/secrets/kubernetes.io/serviceaccount/token
java.nio.file.AccessDeniedException: /var/run/secrets/kubernetes.io/serviceaccount/token
When we checked the token that's gets mounted on the pod, we observed it's not the token from the service account we intend to use but the default token. This causes the discovery of other pods fail with error 403.
[INFO ] 2022-12-01 12:07:19.999+0000 [cloud-poi-ha-akka.actor.default-dispatcher-14] a.d.k.KubernetesApiServiceDiscovery - Querying for pods with label selector: [app=cloud-poi]. Namespace: [fdnbug-cloud-poi-ha-bugfix-def-85666]. Port: [None]
[WARN ] 2022-12-01 12:07:20.011+0000 [cloud-poi-ha-akka.actor.default-dispatcher-16] a.d.k.KubernetesApiServiceDiscovery - Forbidden to communicate with Kubernetes API server; check RBAC settings. Response: [{"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"pods is forbidden: User \"system:anonymous\" cannot list resource \"pods\" in API group \"\" in the namespace \"fdnbug-cloud-poi-ha-bugfix-def-85666\"","reason":"Forbidden","details":{"kind":"pods"},"code":403}
]
[WARN ] 2022-12-01 12:07:20.013+0000 [cloud-poi-ha-akka.actor.default-dispatcher-12] a.m.c.b.i.BootstrapCoordinator - Resolve attempt failed! Cause: akka.discovery.kubernetes.KubernetesApiServiceDiscovery$KubernetesApiException: Forbidden when communicating with the Kubernetes API. Check RBAC settings.
Our Statefulset and ServiceAccount manifests looks like:
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: sa-ne-ha
namespace: somenamespace
labels:
componentName: ne
app: some-app
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ne-r-cluster-managers
namespace: somenamespace
labels:
componentName: ne
app: some-app
rules:
- apiGroups:
- ""
resources:
- pods
- pods/status
- pods/log
- statefulsets
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ne-rb-cluster-managers
namespace: somenamespace
labels:
componentName: ne
app: someapp
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ne-r-cluster-managers
subjects:
- kind: ServiceAccount
name: sa-ne-ha
namespace: somenamespace
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ne-ha
namespace: somenamespace
labels:
componentName: ne-ha
app: someapp
version: someversion
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: "9252"
spec:
serviceName: ne-headless
replicas: 2
selector:
matchLabels:
componentName: ne-ha
app: someapp
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
componentName: ne-ha
app: someapp
spec:
serviceAccountName: sa-ne-ha
containers:
- name: ne-ha
image: ${DOCKER_IMAGE}
imagePullPolicy: Always
resources:
requests:
cpu: ${CONTAINERS_RESOURCES_CPU_REQUESTS}
memory: ${CONTAINERS_RESOURCES_MEMORY_REQUESTS}
limits:
cpu: ${CONTAINERS_RESOURCES_CPU_LIMITS}
memory: ${CONTAINERS_RESOURCES_MEMORY_LIMITS}
command:
- "/microservice/bin"
env:
ports:
- name: http
containerPort: 8080
protocol: TCP
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 15
periodSeconds: 30
failureThreshold: 3
timeoutSeconds: 30
livenessProbe:
httpGet:
path: /alive
port: 8080
initialDelaySeconds: 130
periodSeconds: 30
failureThreshold: 3
timeoutSeconds: 5
---
Have someone encountered a similar issue and was able to resolve it?
So I'm trying to raise three instances of RavenDB and I can't access them although they are running fine when checking the logs the certificate for running in a secured manner as well as top
The deployment is done within GKE.
the certificate generated using LetsEncrypt binding to the external IP of haproxy
I have no idea what is the issue..
kubectl describe ingress command reslut:
kubectl describe ingress
Name: ravendb
Labels: app=ravendb
Namespace: default
Address: 34.111.56.107
Default backend: default-http-backend:80 (10.80.1.5:8080)
Rules:
Host Path Backends
---- ---- --------
a.example.development.run
/ ravendb-0:443 (10.80.0.14:443)
tcp-a.example.development.run
/ ravendb-0:38888 (10.80.0.14:38888)
b.example.development.run
/ ravendb-1:443 (10.80.0.12:443)
tcp-b.example.development.run
/ ravendb-1:38888 (10.80.0.12:38888)
c.example.development.run
/ ravendb-2:443 (10.80.0.13:443)
tcp-c.example.development.run
/ ravendb-2:38888 (10.80.0.13:38888)
Annotations: ingress.kubernetes.io/backends:
{"k8s-be-32116--bad4c61c2f1d097c":"HEALTHY","k8s1-bad4c61c-default-ravendb-0-38888-31a8aae1":"UNHEALTHY","k8s1-bad4c61c-default-ravendb-0-...
ingress.kubernetes.io/forwarding-rule: k8s2-fr-pocrmcsc-default-ravendb-gtrvt7cq
ingress.kubernetes.io/ssl-passthrough: true
ingress.kubernetes.io/target-proxy: k8s2-tp-pocrmcsc-default-ravendb-gtrvt7cq
ingress.kubernetes.io/url-map: k8s2-um-pocrmcsc-default-ravendb-gtrvt7cq
these are the yaml files:
HAPROXY
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-controller
namespace: default
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-controller
rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
- "networking.k8s.io"
resources:
- ingresses/status
verbs:
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-controller
namespace: default
rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
- create
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-controller
subjects:
- kind: ServiceAccount
name: ingress-controller
namespace: default
- apiGroup: rbac.authorization.k8s.io
kind: User
name: ingress-controller
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-controller
namespace: default
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-controller
subjects:
- kind: ServiceAccount
name: ingress-controller
namespace: default
- apiGroup: rbac.authorization.k8s.io
kind: User
name: ingress-controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: ingress-default-backend
name: ingress-default-backend
namespace: default
spec:
selector:
matchLabels:
run: ingress-default-backend
template:
metadata:
labels:
run: ingress-default-backend
spec:
containers:
- name: ingress-default-backend
image: gcr.io/google_containers/defaultbackend:1.0
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
---
apiVersion: v1
kind: Service
metadata:
name: ingress-default-backend
namespace: default
spec:
ports:
- port: 8080
selector:
run: ingress-default-backend
---
apiVersion: v1
kind: ConfigMap
metadata:
name: haproxy-ingress
namespace: default
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
spec:
selector:
matchLabels:
run: haproxy-ingress
template:
metadata:
labels:
run: haproxy-ingress
spec:
serviceAccountName: ingress-controller
containers:
- name: haproxy-ingress
image: quay.io/jcmoraisjr/haproxy-ingress
args:
- --default-backend-service=$(POD_NAMESPACE)/ingress-default-backend
- --configmap=$(POD_NAMESPACE)/haproxy-ingress
- --reload-strategy=reusesocket
ports:
- name: https
containerPort: 443
- name: stat
containerPort: 1936
livenessProbe:
httpGet:
path: /healthz
port: 10253
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
---
apiVersion: v1
kind: Service
metadata:
labels:
run: haproxy-ingress
name: haproxy-ingress
namespace: default
spec:
type: LoadBalancer
selector:
app: ingress-controller
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
- name: https
protocol: TCP
port: 443
targetPort: 443
- name: stat
port: 1936
RAVENDB
---
apiVersion: v1
kind: ConfigMap
metadata:
name: ravendb-settings
namespace: default
labels:
app: ravendb
data:
ravendb-0: >
{
"Setup.Mode": "None",
"DataDir": "/data/RavenData",
"Security.Certificate.Path": "/ssl/ssl",
"ServerUrl": "https://0.0.0.0",
"ServerUrl.Tcp": "tcp://0.0.0.0:38888",
"PublicServerUrl": "https://a.example.development.run",
"PublicServerUrl.Tcp": "tcp://tcp-a.example.development.run:38888",
"License.Path": "/license/license.json",
"License.Eula.Accepted": "true"
}
ravendb-1: >
{
"Setup.Mode": "None",
"DataDir": "/data/RavenData",
"Security.Certificate.Path": "/ssl/ssl",
"ServerUrl": "https://0.0.0.0",
"ServerUrl.Tcp": "tcp://0.0.0.0:38888",
"PublicServerUrl": "https://b.example.development.run",
"PublicServerUrl.Tcp": "tcp://tcp-b.example.development.run:38888",
"License.Path": "/license/license.json",
"License.Eula.Accepted": "true"
}
ravendb-2: >
{
"Setup.Mode": "None",
"DataDir": "/data/RavenData",
"Security.Certificate.Path": "/ssl/ssl",
"ServerUrl": "https://0.0.0.0",
"ServerUrl.Tcp": "tcp://0.0.0.0:38888",
"PublicServerUrl": "https://c.example.development.run",
"PublicServerUrl.Tcp": "tcp://tcp-c.example.development.run:38888",
"License.Path": "/license/license.json",
"License.Eula.Accepted": "true"
}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ravendb
namespace: default
labels:
app: ravendb
spec:
serviceName: ravendb
template:
metadata:
labels:
app: ravendb
spec:
containers:
- command:
- /bin/sh
- -c
- /opt/RavenDB/Server/Raven.Server --config-path /config/$HOSTNAME
image: ravendb/ravendb:latest
imagePullPolicy: Always
name: ravendb
ports:
- containerPort: 443
name: http-api
protocol: TCP
- containerPort: 38888
name: tcp-server
protocol: TCP
- containerPort: 161
name: snmp
protocol: TCP
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /data
name: data
- mountPath: /ssl
name: ssl
- mountPath: /license
name: license
- mountPath: /config
name: config
dnsPolicy: ClusterFirst
restartPolicy: Always
terminationGracePeriodSeconds: 120
volumes:
- name: ssl
secret:
defaultMode: 420
secretName: ravendb-ssl
- configMap:
defaultMode: 420
name: ravendb-settings
name: config
- name: license
secret:
defaultMode: 420
secretName: ravendb-license
updateStrategy:
type: RollingUpdate
podManagementPolicy: OrderedReady
replicas: 3
selector:
matchLabels:
app: ravendb
volumeClaimTemplates:
- metadata:
labels:
app: ravendb
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ravendb
namespace: default
labels:
app: ravendb
annotations:
ingress.kubernetes.io/ssl-passthrough: "true"
spec:
rules:
- host: a.example.development.run
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ravendb-0
port:
number: 443
- host: tcp-a.example.development.run
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: ravendb-0
port:
number: 38888
- host: b.example.development.run
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: ravendb-1
port:
number: 443
- host: tcp-b.example.development.run
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: ravendb-1
port:
number: 38888
- host: c.example.development.run
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: ravendb-2
port:
number: 443
- host: tcp-c.example.development.run
http:
paths:
- path: /
pathType: ImplementationSpecific
backend:
service:
name: ravendb-2
port:
number: 38888
---
apiVersion: v1
kind: Service
metadata:
name: ravendb-0
namespace: default
labels:
app: ravendb
node: "0"
spec:
ports:
- name: http
protocol: TCP
port: 80
targetPort: 80
- name: https
protocol: TCP
port: 443
targetPort: 443
- name: http-api
port: 443
protocol: TCP
targetPort: 443
- name: tcp-server
port: 38888
protocol: TCP
targetPort: 38888
- name: snmp
port: 161
protocol: TCP
targetPort: 161
selector:
app: ravendb
statefulset.kubernetes.io/pod-name: ravendb-0
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: ravendb-1
namespace: default
labels:
app: ravendb
node: "1"
spec:
ports:
- name: http-api
port: 443
protocol: TCP
targetPort: 443
- name: tcp-server
port: 38888
protocol: TCP
targetPort: 38888
- name: snmp
port: 161
protocol: TCP
targetPort: 161
selector:
app: ravendb
statefulset.kubernetes.io/pod-name: ravendb-1
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
name: ravendb-2
namespace: default
labels:
app: ravendb
node: "2"
spec:
ports:
- name: http-api
port: 443
protocol: TCP
targetPort: 443
- name: tcp-server
port: 38888
protocol: TCP
targetPort: 38888
- name: snmp
port: 161
protocol: TCP
targetPort: 161
selector:
app: ravendb
statefulset.kubernetes.io/pod-name: ravendb-2
type: ClusterIP
SECRETS
apiVersion: v1
kind: Secret
metadata:
name: ravendb-license
namespace: default
labels:
app: ravendb
type: Opaque
data:
license.json: >
---
apiVersion: v1
kind: Secret
metadata:
name: ravendb-ssl
namespace: default
labels:
app: ravendb
type: Opaque
data:
ssl: >
I'm studying helm3 and k8s (microk8s).
While tryingi the following command:
helm install traefik traefik/traefik -n traefik --values traefik-values.yaml
and traefik-values.yaml has the following value:
additionalArguments:
- "--certificatesresolvers.letsencrypt.acme.email=<my-email>"
- "--certificatesresolvers.letsencrypt.acme.storage=/data/acme.json"
- "--certificatesresolvers.letsencrypt.acme.caserver=https://acme-v02.api.letsencrypt.org/directory"
- "--certificatesResolvers.letsencrypt.acme.tlschallenge=true"
- "--api.insecure=true"
- "--accesslog=true"
- "--log.level=INFO"
hostNetwork: true
ipaddress: <my-ip>
service:
type: ClusterIP
ports:
web:
port: 80
websecure:
port: 443
I receive this bind-permission error
traefik.go:76: command traefik error: error while building entryPoint web: error preparing server: error opening listener: listen tcp :80: bind: permission denied
on the other hand, I can install Traefik on the same ports (80 and 443) using the following yaml file (approximately the example on Traefik's site):
---
apiVersion: v1
kind: Namespace
metadata:
name: traefik
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: traefik-ingress-controller
namespace: traefik
---
kind: DaemonSet
apiVersion: apps/v1
metadata:
name: traefik-ingress-controller
namespace: traefik
labels:
k8s-app: traefik-ingress-lb
spec:
selector:
matchLabels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
template:
metadata:
labels:
k8s-app: traefik-ingress-lb
name: traefik-ingress-lb
spec:
tolerations:
- effect: NoSchedule
operator: Exists
serviceAccountName: traefik-ingress-controller
terminationGracePeriodSeconds: 60
hostNetwork: true
containers:
- image: traefik:2.4
name: traefik-ingress-lb
ports:
- name: http
containerPort: 80
hostPort: 80
# - name: admin
# containerPort: 8080
# hostPort: 8080
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
args:
- --providers.kubernetesingress=true
# you need to manually set this IP to the incoming public IP
# that your ingress resources would use. Note it only affects
# status and kubectl UI, and doesn't really do anything
# It could even be left out https://github.com/containous/traefik/issues/6303
- --providers.kubernetesingress.ingressendpoint.ip=<my-server-ip>
## uncomment these and the ports above and below to enable
## the web UI on the host NIC port 8080 in **insecure** mode
- --api.dashboard=true
- --api.insecure=true
- --log=true
- --log.level=INFO
- --accesslog=true
- --entrypoints.web.address=:80
- --entrypoints.websecure.address=:443
- --certificatesresolvers.leresolver.acme.tlschallenge=true # <== Enable TLS-ALPN-01 to generate and renew ACME certs
- --certificatesresolvers.leresolver.acme.email=<email> # <== Setting email for certs
- --certificatesresolvers.leresolver.acme.storage=/data/acme.json # <== Defining acme file to store cert information
---
kind: Service
apiVersion: v1
metadata:
name: traefik-ingress-service
namespace: traefik
spec:
selector:
k8s-app: traefik-ingress-lb
ports:
- protocol: TCP
port: 80
name: web
# - protocol: TCP
# port: 8080
# name: admin
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
rules:
- apiGroups:
- ""
resources:
- services
- endpoints
- secrets
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- ingresses
verbs:
- get
- list
- watch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: traefik-ingress-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: traefik-ingress-controller
subjects:
- kind: ServiceAccount
name: traefik-ingress-controller
namespace: traefik
The two specs are not identical but quite similar as far as I can understand. They both create a ServiceAccount in the 'traefik' namespace and grant a ClusterRole.
What part determines the permission on port 80?
There's an open issue on the Traefik helm chart where Jasper Ben suggests a working solution:
hostNetwork: true
ports:
web:
port: 80
redirectTo: websecure
websecure:
port: 443
securityContext:
capabilities:
drop: [ALL]
add: [NET_BIND_SERVICE]
readOnlyRootFilesystem: true
runAsGroup: 0
runAsNonRoot: false
runAsUser: 0
The missing part in the helm chart is NET_BIND_SERVICE capability in the securityContext.
Environment: Ubuntu 18.06 bare metal, set up the cluster with kubeadm (single node)
I want to access the cluster via port 80. Currently I am able to access it via the nodePort: domain.com:31668/ but not via port 80. I am using metallb Do I need something else to handle incoming traffic?
So the current topology would be:
LoadBalancer > Ingress Controller > Ingress > Service
kubectl -n ingress-nginx describe service/ingress-nginx:
Name: ingress-nginx
Namespace: ingress-nginx
Labels: app.kubernetes.io/name=ingress-nginx
app.kubernetes.io/part-of=ingress-nginx
Annotations: <none>
Selector: app.kubernetes.io/name=ingress-nginx,app.kubernetes.io/part-of=ingress-nginx
Type: LoadBalancer
IP: 10.99.6.137
LoadBalancer Ingress: 192.168.1.240
Port: http 80/TCP
TargetPort: 80/TCP
NodePort: http 31668/TCP
Endpoints: 192.168.0.8:80
Port: https 443/TCP
TargetPort: 443/TCP
NodePort: https 30632/TCP
Endpoints: 192.168.0.8:443
Session Affinity: None
External Traffic Policy: Cluster
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal IPAllocated 35m metallb-controller Assigned IP "192.168.1.240"
As I am using a bare metal environment I am using metallb.
metallb config:
apiVersion: v1
kind: ConfigMap
metadata:
namespace: metallb-system
name: config
data:
config: |
address-pools:
- name: default
protocol: layer2
addresses:
- 192.168.1.240-192.168.1.250
Ingress controller yml's:
apiVersion: v1 kind: Namespace metadata: name: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
---
kind: ConfigMap apiVersion: v1 metadata: name: nginx-configuration namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
--- kind: ConfigMap apiVersion: v1 metadata: name: tcp-services namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
--- kind: ConfigMap apiVersion: v1 metadata: name: udp-services namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
--- apiVersion: v1 kind: ServiceAccount metadata: name: nginx-ingress-serviceaccount namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
--- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: nginx-ingress-clusterrole labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx rules:
- apiGroups:
- ""
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
- apiGroups:
- ""
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- "extensions"
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- apiGroups:
- "extensions"
resources:
- ingresses/status
verbs:
- update
--- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: Role metadata: name: nginx-ingress-role namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx rules:
- apiGroups:
- ""
resources:
- configmaps
- pods
- secrets
- namespaces
verbs:
- get
- apiGroups:
- ""
resources:
- configmaps
resourceNames:
# Defaults to "<election-id>-<ingress-class>"
# Here: "<ingress-controller-leader>-<nginx>"
# This has to be adapted if you change either parameter
# when launching the nginx-ingress-controller.
- "ingress-controller-leader-nginx"
verbs:
- get
- update
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- apiGroups:
- ""
resources:
- endpoints
verbs:
- get
--- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: RoleBinding metadata: name: nginx-ingress-role-nisa-binding namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: nginx-ingress-role subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
--- apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRoleBinding metadata: name: nginx-ingress-clusterrole-nisa-binding labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: nginx-ingress-clusterrole subjects:
- kind: ServiceAccount
name: nginx-ingress-serviceaccount
namespace: ingress-nginx
---
apiVersion: extensions/v1beta1 kind: Deployment metadata: name: nginx-ingress-controller namespace: ingress-nginx labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx spec: replicas: 1 selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/part-of: ingress-nginx
annotations:
prometheus.io/port: "10254"
prometheus.io/scrape: "true"
spec:
serviceAccountName: nginx-ingress-serviceaccount
containers:
- name: nginx-ingress-controller
image: quay.io/kubernetes-ingress-controller/nginx-ingress-controller:0.21.0
args:
- /nginx-ingress-controller
- --configmap=$(POD_NAMESPACE)/nginx-configuration
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --publish-service=$(POD_NAMESPACE)/ingress-nginx
- --annotations-prefix=nginx.ingress.kubernetes.io
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
# www-data -> 33
runAsUser: 33
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
ports:
- name: http
containerPort: 80
- name: https
containerPort: 443
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
---
output of curl -v http://192.168.1.240 (executing inside the server)
* Rebuilt URL to: http://192.168.1.240/
* Trying 192.168.1.240...
* TCP_NODELAY set
* Connected to 192.168.1.240 (192.168.1.240) port 80 (#0)
> GET / HTTP/1.1
> Host: 192.168.1.240
> User-Agent: curl/7.61.0
> Accept: */*
>
< HTTP/1.1 404 Not Found
< Server: nginx/1.15.6
< Date: Thu, 27 Dec 2018 19:03:28 GMT
< Content-Type: text/html
< Content-Length: 153
< Connection: keep-alive
<
<html>
<head><title>404 Not Found</title></head>
<body>
<center><h1>404 Not Found</h1></center>
<hr><center>nginx/1.15.6</center>
</body>
</html>
* Connection #0 to host 192.168.1.240 left intact
kubectl describe ingress articleservice-ingress
Name: articleservice-ingress
Namespace: default
Address: 192.168.1.240
Default backend: default-http-backend:80 (<none>)
Rules:
Host Path Backends
---- ---- --------
host.com
/articleservice articleservice:31001 (<none>)
Annotations:
nginx.ingress.kubernetes.io/rewrite-target: /
Events: <none>
curl -vH 'host: elpsit.com' http://192.168.1.240/articleservice/system/ipaddr
I can reach the ingress as expected from inside the server.
I have installed argo in my own namespace in a central kubernetes cluster in my organization.
After installation when argo "workflow-controller" tries to fetch the configmaps using the API server, I get timeout error.
time="2018-08-15T01:24:40Z" level=fatal msg="Get https://192.168.0.1:443/api/v1/namespaces/2304613691/configmaps/workflow-controller-configmap: dial tcp 192.168.0.1:443: i/o timeout\ngithub.com/argoproj/argo/errors.Wrap\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:87\ngithub.com/argoproj/argo/errors.InternalWrapError\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:70\ngithub.com/argoproj/argo/workflow/controller.(*WorkflowController).ResyncConfig\n\t/root/go/src/github.com/argoproj/argo/workflow/controller/controller.go:295\nmain.Run\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:96\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:750\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).ExecuteC\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:831\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).Execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:784\nmain.main\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:68\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:195\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:2337"
It is trying to access the follwing url: https://192.168.0.1:443/api/v1/namespaces/2304613691/configmaps/workflow-controller-configmap from within the pod container.
I have also modified the kubernetes host config to reflect kubernetes.default and added an open all ingress and egress network policy.
But still the exception is there.
time="2018-08-16T18:23:55Z" level=fatal msg="Get https://kubernetes.default:443/api/v1/namespaces/2304613691/configmaps/workflow-controller-configmap: dial tcp: i/o timeout\ngithub.com/argoproj/argo/errors.Wrap\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:87\ngithub.com/argoproj/argo/errors.InternalWrapError\n\t/root/go/src/github.com/argoproj/argo/errors/errors.go:70\ngithub.com/argoproj/argo/workflow/controller.(*WorkflowController).ResyncConfig\n\t/root/go/src/github.com/argoproj/argo/workflow/controller/controller.go:295\nmain.Run\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:96\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:750\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).ExecuteC\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:831\ngithub.com/argoproj/argo/vendor/github.com/spf13/cobra.(*Command).Execute\n\t/root/go/src/github.com/argoproj/argo/vendor/github.com/spf13/cobra/command.go:784\nmain.main\n\t/root/go/src/github.com/argoproj/argo/cmd/workflow-controller/main.go:68\nruntime.main\n\t/usr/local/go/src/runtime/proc.go:195\nruntime.goexit\n\t/usr/local/go/src/runtime/asm_amd64.s:2337"
apiVersion: v1
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: argo
namespace: 2304613691
- apiVersion: v1
kind: ServiceAccount
metadata:
name: argo-ui
namespace: 2304613691
kind: List
---
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-role
namespace: 2304613691
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
verbs:
- create
- get
- list
- watch
- update
- patch
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- watch
- list
- apiGroups:
- ""
resources:
- persistentvolumeclaims
verbs:
- create
- delete
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- get
- list
- watch
- update
- patch
- apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: argo-ui-role
namespace: 2304613691
rules:
- apiGroups:
- ""
resources:
- pods
- pods/exec
- pods/log
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- secrets
verbs:
- get
- apiGroups:
- argoproj.io
resources:
- workflows
verbs:
- get
- list
- watch
kind: List
---
apiVersion: v1
items:
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-binding
namespace: "2304613691"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-role
subjects:
- kind: ServiceAccount
name: argo
namespace: "2304613691"
- apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: argo-ui-binding
namespace: "2304613691"
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: argo-ui-role
subjects:
- kind: ServiceAccount
name: argo-ui
namespace: "2304613691"
kind: List
---
apiVersion: v1
items:
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
generation: 1
name: workflow-controller
namespace: 2304613691
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: workflow-controller
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: workflow-controller
spec:
containers:
- args:
- --configmap
- workflow-controller-configmap
command:
- workflow-controller
env:
- name: ARGO_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
image: <our repo>/sample-agupta34/workflow-controller:v2.1.1
imagePullPolicy: IfNotPresent
name: workflow-controller
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: argo
serviceAccountName: argo
terminationGracePeriodSeconds: 30
- apiVersion: extensions/v1beta1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: "1"
generation: 1
name: argo-ui
namespace: 2304613691
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: argo-ui
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
labels:
app: argo-ui
spec:
containers:
- env:
- name: ARGO_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: IN_CLUSTER
value: "true"
- name: ENABLE_WEB_CONSOLE
value: "false"
- name: BASE_HREF
value: /
image: <our repo>/sample-agupta34/argoui:v2.1.1
imagePullPolicy: IfNotPresent
name: argo-ui
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
serviceAccount: argo-ui
serviceAccountName: argo-ui
terminationGracePeriodSeconds: 30
kind: List
---
apiVersion: v1
data:
config: |
artifactRepository: {}
executorImage: <our repo>/sample-agupta34/argoexec:v2.1.1
kind: ConfigMap
metadata:
name: workflow-controller-configmap
namespace: 2304613691
---
apiVersion: v1
kind: Service
metadata:
name: argo-ui
namespace: 2304613691
labels:
app: argo-ui
spec:
ports:
- port: 80
protocol: TCP
targetPort: 8001
selector:
app: argo-ui
---
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: argo-ui
namespace: 2304613691
annotations:
kubernetes.io/ingress.class: "netscaler.v2"
netscaler.applecloud.io/insecure-backend: "true"
spec:
backend:
serviceName: argo-ui
servicePort: 80
---
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: argo-and-argo-ui-netpol
spec:
podSelector:
matchLabels:
app: workflow-controller
app: argo-ui
ingress:
- {}
egress:
- {}