apiVersion: getambassador.io/v3alpha1
kind: TLSContext
metadata:
name: example-context
spec:
hosts: ["*.example.stackoverflow.com"]
secret: example-acme-secret
alpn_protocols: h2,http/1.1
min_tls_version: v1.2
redirect_cleartext_from: 8080
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: foo-host
spec:
hostname: ["*.example.stackoverflow.com"]
tlsContext:
name: example-context
tlsSecret:
name: example-acme-secret
apiVersion: getambassador.io/v3alpha1
kind: TLSContext
metadata:
name: example-context-test2
spec:
hosts: ["*.api.stackoverflow.com"]
secret: example-stackoverflow-secret
alpn_protocols: h2,http/1.1
min_tls_version: v1.2
redirect_cleartext_from: 8080
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: example-context-test2-host
spec:
hostname: ["*.api.stackoverflow.com"]
tlsContext:
name: example-context-test2
tlsSecret:
name: example-stackoverflow-secret
kubectl get namespace
default Active 3h33m
ingress-nginx Active 3h11m
kube-node-lease Active 3h33m
kube-public Active 3h33m
kube-system Active 3h33m
kubectl get services -n ingress-nginx
NAME TYPE CLUSTER-IP EXTERNAL-IP
PORT(S) AGE
ingress-nginx-controller LoadBalancer 10.102.205.190 localhost 80:31378/TCP,443:31888/TCP 3h12m
ingress-nginx-controller-admission ClusterIP 10.103.97.209 <none> 443/TCP 3h12m
When I am making the request from nextjs getInitialProps http://ingress-nginx-controller.ingress-nginx.svc.cluster.local/api/users/currentuser then its throwing an error Error: connect ECONNREFUSED 127.0.0.1:443.
LandingPage.getInitialProps = async () => {
if (typeof window === "undefined") {
const { data } = await axios.get(
"http://ingress-nginx-controller.ingress-nginx.svc.cluster.local/api/users/currentuser",
{
headers: {
Host: "ticketing.dev",
},
}
);
return data;
} else {
const { data } = await axios.get("/api/users/currentuser");
return data;
}
};
My auth.deply.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: auth-depl
spec:
replicas: 1
selector:
matchLabels:
app: auth
template:
metadata:
labels:
app: auth
spec:
containers:
- name: auth
image: sajeebxn/auth
env:
- name: MONGO_URI
value: 'mongodb://tickets-mongo-srv:27017/auth'
- name: JWT_KEY
valueFrom:
secretKeyRef:
name: jwt-secret
key: JWT_KEY
---
apiVersion: v1
kind: Service
metadata:
name: auth-srv
spec:
selector:
app: auth
ports:
- name: auth
protocol: TCP
port: 3000
targetPort: 3000
And my ingress-srv.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-service
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/use-regex: "true"
spec:
tls:
- hosts:
- ticketing.dev
# secretName: e-ticket-secret
rules:
- host: ticketing.dev
http:
paths:
- path: /api/users/?(.*)
pathType: Prefix
backend:
service:
name: auth-srv
port:
number: 3000
- path: /?(.*)
pathType: Prefix
backend:
service:
name: client-srv
port:
number: 3000
Try using http://ingress-nginx-controller.ingress-nginx/api/users/currentuser.
This worked for me
I have currently written below auth manifest for Istio.
kind: RequestAuthentication
metadata:
name: "jwt-validation"
namespace: some-namespace
spec:
selector:
matchLabels:
auth: required
jwtRules:
- issuer: "https://you.auth0.com/"
jwksUri: "https://you.auth0.com/.well-known/jwks.json"
---
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: jwt-auth-policy
namespace: some-namespace
spec:
selector:
matchLabels:
auth: required
action: DENY
rules:
- from:
- source:
notRequestPrincipals: ["*"]
for which i am getting the below response from browser
RBAC: access denied
But instead of this i wan to get a Json response
saying
{
"status": "failure",
"message": "Not Authorised"
}
with status code 403
Now i have tried the below Lua filter
apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter
metadata:
name: custom-filter-response-code
namespace: istio-system
spec:
workloadSelector:
labels:
istio: ingressgateway
configPatches:
- applyTo: HTTP_FILTER
match:
context: GATEWAY
listener:
filterChain:
filter:
name: "envoy.filters.network.http_connection_manager"
subFilter:
name: "envoy.extAuthz"
patch:
operation: INSERT_AFTER
value:
name: envoy.custom-resp
typed_config:
"#type": "type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua"
inlineCode: |
function envoy_on_response(response_handle)
if response_handle:headers():get(":status") == "401" then
response_handle:headers():replace(":status", "403")
else
local body = response_handle:body()
local jsonString = tostring(body:getBytes(0, body:length()))
jsonString = jsonString:gsub("(status|failur)", "(message|Not Authorised)")
response_handle:body():set(jsonString)
end
Please Guide me with correct snippet
I am trying to deploy consul using kubernetes StatefulSet with following manifest
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: dev-ethernet
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
namespace: dev-ethernet
labels:
app: consul
---
apiVersion: v1
kind: Secret
metadata:
name: consul-secret
namespace: dev-ethernet
data:
consul-gossip-encryption-key: "aIRpNkHT/8Tkvf757sj2m5AcRlorWNgzcLI4yLEMx7M="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-config
namespace: dev-ethernet
data:
server.json: |
{
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"disable_host_node_id": true,
"data_dir": "/consul/data",
"log_level": "INFO",
"datacenter": "us-west-2",
"domain": "cluster.local",
"ports": {
"http": 8500
},
"retry_join": [
"provider=k8s label_selector=\"app=consul,component=server\""
],
"server": true,
"telemetry": {
"prometheus_retention_time": "5m"
},
"ui": true
}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
namespace: dev-ethernet
spec:
selector:
matchLabels:
app: consul
component: server
serviceName: consul
podManagementPolicy: Parallel
replicas: 3
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
template:
metadata:
labels:
app: consul
component: server
annotations:
consul.hashicorp.com/connect-inject: "false"
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
securityContext:
fsGroup: 1000
containers:
- name: consul
image: "consul:1.8"
args:
- "agent"
- "-advertise=$(POD_IP)"
- "-bootstrap-expect=3"
- "-config-file=/etc/consul/config/server.json"
- "-encrypt=$(GOSSIP_ENCRYPTION_KEY)"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: GOSSIP_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: consul-secret
key: consul-gossip-encryption-key
volumeMounts:
- name: data
mountPath: /consul/data
- name: config
mountPath: /etc/consul/config
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave
ports:
- containerPort: 8500
name: ui-port
- containerPort: 8400
name: alt-port
- containerPort: 53
name: udp-port
- containerPort: 8080
name: http-port
- containerPort: 8301
name: serflan
- containerPort: 8302
name: serfwan
- containerPort: 8600
name: consuldns
- containerPort: 8300
name: server
volumes:
- name: config
configMap:
name: consul-config
volumeClaimTemplates:
- metadata:
name: data
labels:
app: consul
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: aws-gp2
resources:
requests:
storage: 3Gi
But gets ==> encrypt has invalid key: illegal base64 data at input byte 1 when container starts.
I have generated consul-gossip-encryption-key locally using docker run -i -t consul keygen
Anyone knows whats wrong here ?
secret.data must be base64 string.
try
kubectl create secret generic consul-gossip-encryption-key --from-literal=key="$(docker run -i -t consul keygen)" --dry-run -o=yaml
and replace
apiVersion: v1
kind: Secret
metadata:
name: consul-secret
namespace: dev-ethernet
data:
consul-gossip-encryption-key: "aIRpNkHT/8Tkvf757sj2m5AcRlorWNgzcLI4yLEMx7M="
ref: https://www.consul.io/docs/k8s/helm#v-global-gossipencryption
I want to create a custom 403 error page.
Currently I already have an Ingress created and in the annotations I have something like this:
"nginx.ingress.kubernetes.io/whitelist-source-range": "100.01.128.0/20,88.100.01.01"
So any attempt to access my web app outside that IP range receives a 403 error.
In order to create a custom page I tried adding the following annotations:
"nginx.ingress.kubernetes.io/custom-http-errors": "403",
"nginx.ingress.kubernetes.io/default-backend": "default-http-backend"
where default-http-backend is the name of an app already deployed.
the ingress has this:
{
"kind": "Ingress",
"apiVersion": "extensions/v1beta1",
"metadata": {
"name": "my-app-ingress",
"namespace": "my-app-test",
"selfLink": "/apis/extensions/v1beta1/namespaces/my-app-test/ingresses/my-app-ingress",
"uid": "8f31f2b4-428d-11ea-b15a-ee0dcf00d5a8",
"resourceVersion": "129105581",
"generation": 3,
"creationTimestamp": "2020-01-29T11:50:34Z",
"annotations": {
"kubernetes.io/ingress.class": "nginx",
"nginx.ingress.kubernetes.io/custom-http-errors": "403",
"nginx.ingress.kubernetes.io/default-backend": "default-http-backend",
"nginx.ingress.kubernetes.io/rewrite-target": "/",
"nginx.ingress.kubernetes.io/whitelist-source-range": "100.01.128.0/20,90.108.01.012"
}
},
"spec": {
"tls": [
{
"hosts": [
"my-app-test.retail-azure.js-devops.co.uk"
],
"secretName": "ssl-secret"
}
],
"rules": [
{
"host": "my-app-test.retail-azure.js-devops.co.uk",
"http": {
"paths": [
{
"path": "/api",
"backend": {
"serviceName": "my-app-backend",
"servicePort": 80
}
},
{
"path": "/",
"backend": {
"serviceName": "my-app-frontend",
"servicePort": 80
}
}
]
}
}
]
},
"status": {
"loadBalancer": {
"ingress": [
{}
]
}
}
}
Yet I always get the default 403.
What am I missing?
I've reproduced your scenario and that worked for me.
I will try to guide you in steps I've followed.
Cloud provider: GKE
Kubernetes Version: v1.15.3
Namespace: default
I'm using 2 deployments of 2 images with a service for each one.
Service 1: default-http-backend - with nginx image, it will be our default backend.
Service 2: custom-http-backend - with inanimate/echo-server image, this service will be displayed if the request become from a whitelisted ip.
Ingress: Nginx ingress with annotations.
Expected behavior: The ingress will be configured to use default-backend, custom-http-errors and whitelist-source-range annotations. If the request was made from a whitelisted ip the ingress will redirect to custom-http-backend, if not it will be redirect to default-http-backend.
Deployment 1: default-http-backend
Create a file default-http-backend.yaml with this content:
apiVersion: apps/v1
kind: Deployment
metadata:
name: default-http-backend
spec:
selector:
matchLabels:
app: default-http-backend
template:
metadata:
labels:
app: default-http-backend
spec:
containers:
- name: default-http-backend
image: nginx
ports:
- name: http
containerPort: 80
imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
spec:
selector:
app: default-http-backend
ports:
- protocol: TCP
port: 80
targetPort: 80
Apply the yaml file: k apply -f default-http-backend.yaml
Deployment 2: custom-http-backend
Create a file custom-http-backend.yaml with this content:
apiVersion: apps/v1
kind: Deployment
metadata:
name: custom-http-backend
spec:
selector:
matchLabels:
app: custom-http-backend
template:
metadata:
labels:
app: custom-http-backend
spec:
containers:
- name: custom-http-backend
image: inanimate/echo-server
ports:
- name: http
containerPort: 8080
imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
name: custom-http-backend
spec:
selector:
app: custom-http-backend
ports:
- protocol: TCP
port: 80
targetPort: 8080
Apply the yaml file: k apply -f custom-http-backend.yaml
Check if services is up and running
I'm using the alias k for kubectl
➜ ~ k get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
custom-http-backend ClusterIP 10.125.5.227 <none> 80/TCP 73s
default-http-backend ClusterIP 10.125.9.218 <none> 80/TCP 5m41s
...
➜ ~ k get pods
NAME READY STATUS RESTARTS AGE
custom-http-backend-67844fb65d-k2mwl 1/1 Running 0 2m10s
default-http-backend-5485f569bd-fkd6f 1/1 Running 0 6m39s
...
You could test the service using port-forward:
default-http-backend
k port-forward svc/default-http-backend 8080:80
Try to access http://localhost:8080 in your browse to see the nginx default page.
custom-http-backend
k port-forward svc/custom-http-backend 8080:80
Try to access http://localhost:8080 in your browse to see the custom page provided by the echo-server image.
Ingress configuration
At this point we have both services up and running, we need to install and configure the nginx ingress. You can follow the official documentation, this will not covered here.
After installed let's deploy the ingress, based in the code you posted i did some modifications: tls removed, added other domain and removed the path /api for tests purposes only and add my home ip to whitelist.
Create a file my-app-ingress.yaml with the content:
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: my-app-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: "/"
nginx.ingress.kubernetes.io/custom-http-errors: '403'
nginx.ingress.kubernetes.io/default-backend: default-http-backend
nginx.ingress.kubernetes.io/whitelist-source-range: 207.34.xxx.xx/32
spec:
rules:
- host: myapp.rabello.me
http:
paths:
- path: "/"
backend:
serviceName: custom-http-backend
servicePort: 80
Apply the spec: k apply -f my-app-ingress.yaml
Check the ingress with the command:
➜ ~ k get ing
NAME HOSTS ADDRESS PORTS AGE
my-app-ingress myapp.rabello.me 146.148.xx.xxx 80 36m
That's all!
If I test from home with my whitelisted ip, the custom page is showed, but if i try to access using my cellphone in 4G network, the nginx default page is displayed.
Note I'm using ingress and services in the same namespace, if you need work with different namespace you need to use ExternalName.
I hope that helps!
References:
kubernetes deployments
kubernetes service
nginx ingress
nginx annotations
I want to create a custom 403 error page. Currently I already have an Ingress created and in the annotations.
So any attempt to access my web app outside that IP range receives a 403 error.
In order to create a custom page I tried adding the following annotations:
kind: Ingress
metadata:
name: my-app-ingress
namespace: default
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: "/"
nginx.ingress.kubernetes.io/custom-http-errors: '403'
nginx.ingress.kubernetes.io/default-backend: default-http-backend
nginx.ingress.kubernetes.io/whitelist-source-range: 125.10.156.36/32
spec:
rules:
- host: venkat.dev.vboffice.com
http:
paths:
- path: "/"
backend:
serviceName: custom-http-backend
servicePort: 80
where default-http-backend is the name of an app already deployed with default nginx page.
If I test from home with my whitelisted ip, the custom page is showed, but if i try to access using my cellphone in 4G network, it will display default backend 404
i need to add any nginx config change custom-http-backend pod????
Deployment 1:default-http-backend
apiVersion: apps/v1
kind: Deployment
metadata:
name: default-http-backend
spec:
selector:
matchLabels:
app: default-http-backend
template:
metadata:
labels:
app: default-http-backend
spec:
containers:
- name: default-http-backend
image: nginx
ports:
- name: http
containerPort: 80
imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
name: default-http-backend
spec:
selector:
app: default-http-backend
ports:
- protocol: TCP
port: 80
targetPort: 80
Deployment 2: custom-http-backend
apiVersion: apps/v1
kind: Deployment
metadata:
name: custom-http-backend
spec:
selector:
matchLabels:
app: custom-http-backend
template:
metadata:
labels:
app: custom-http-backend
spec:
containers:
- name: custom-http-backend
image: inanimate/echo-server
ports:
- name: http
containerPort: 8080
imagePullPolicy: IfNotPresent
---
apiVersion: v1
kind: Service
metadata:
name: custom-http-backend
spec:
selector:
app: custom-http-backend
ports:
- protocol: TCP
port: 80
targetPort: 8080
One can customize the 403 error page for ingress-nginx (/etc/nginx/template), just by editing the nginx.tmpl file. Then mounting it to ingress nginx controller deployment.Below is the part of the nginx.tmpl need to be edited:
{{/* Build server redirects (from/to www) */}}
{{ range $redirect := .RedirectServers }}
## start server {{ $redirect.From }}
server {
server_name {{ $redirect.From }};
{{ buildHTTPListener $all $redirect.From }}
{{ buildHTTPSListener $all $redirect.From }}
ssl_certificate_by_lua_block {
certificate.call()
}
error_page 403 /403.html;
{{ if gt (len $cfg.BlockUserAgents) 0 }}
if ($block_ua) {
return 403;
}
{{ end }}
{{ if gt (len $cfg.BlockReferers) 0 }}
if ($block_ref) {
return 403;
}
{{ end }}
location = /403.html {
root /usr/local/nginx/html/;
internal;
}
set_by_lua_block $redirect_to {
local request_uri = ngx.var.request_uri
if string.sub(request_uri, -1) == "/" then
request_uri = string.sub(request_uri, 1, -2)
end
{{ if ne $all.ListenPorts.HTTPS 443 }}
{{ $redirect_port := (printf ":%v" $all.ListenPorts.HTTPS) }}
return string.format("%s://%s%s%s", ngx.var.scheme, "{{ $redirect.To }}", "{{ $redirect_port }}", request_uri)
{{ else }}
return string.format("%s://%s%s", ngx.var.scheme, "{{ $redirect.To }}", request_uri)
{{ end }}
}
return {{ $all.Cfg.HTTPRedirectCode }} $redirect_to;
}
## end server {{ $redirect.From }}
{{ end }}
{{ range $server := $servers }}
## start server {{ $server.Hostname }}
server {
server_name {{ buildServerName $server.Hostname }} {{range $server.Aliases }}{{ . }} {{ end }};
error_page 403 /403.html;
{{ if gt (len $cfg.BlockUserAgents) 0 }}
if ($block_ua) {
return 403;
}
{{ end }}
{{ if gt (len $cfg.BlockReferers) 0 }}
if ($block_ref) {
return 403;
}
{{ end }}
location = /403.html {
root /usr/local/nginx/html/;
internal;
}
{{ template "SERVER" serverConfig $all $server }}
{{ if not (empty $cfg.ServerSnippet) }}
# Custom code snippet configured in the configuration configmap
{{ $cfg.ServerSnippet }}
{{ end }}
{{ template "CUSTOM_ERRORS" (buildCustomErrorDeps "upstream-default-backend" $cfg.CustomHTTPErrors $all.EnableMetrics) }}
}
## end server {{ $server.Hostname }}
{{ end }}
In the above snippet error_apge 403 /403.html; is declared before we return 403. Then the location of /403.html is defined. The root path is same where one should mount the 403.html page. In this case its /usr/local/nginx/html/.
Below snippet will help you mount the volume with custom pages.
volumes:
- name: custom-errors
configMap:
# Provide the name of the ConfigMap you want to mount.
name: custom-ingress-pages
items:
- key: "404.html"
path: "404.html"
- key: "403.html"
path: "403.html"
- key: "50x.html"
path: "50x.html"
- key: "index.html"
path: "index.html"
This solution doesn't require you to spawn another/extra service or pod of any kind to work.
For more info: https://engineering.zenduty.com/blog/2022/03/02/customizing-error-pages
You need to create and deploy custom default backend which will return a custom error page.Follow the doc to deploy a custom default backend and configure nginx ingress controller by modifying the deployment yaml to use this custom default backend.
The deployment yaml for the custom default backend is here and the source code is here.