apiVersion: getambassador.io/v3alpha1
kind: TLSContext
metadata:
name: example-context
spec:
hosts: ["*.example.stackoverflow.com"]
secret: example-acme-secret
alpn_protocols: h2,http/1.1
min_tls_version: v1.2
redirect_cleartext_from: 8080
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: foo-host
spec:
hostname: ["*.example.stackoverflow.com"]
tlsContext:
name: example-context
tlsSecret:
name: example-acme-secret
apiVersion: getambassador.io/v3alpha1
kind: TLSContext
metadata:
name: example-context-test2
spec:
hosts: ["*.api.stackoverflow.com"]
secret: example-stackoverflow-secret
alpn_protocols: h2,http/1.1
min_tls_version: v1.2
redirect_cleartext_from: 8080
---
apiVersion: getambassador.io/v3alpha1
kind: Host
metadata:
name: example-context-test2-host
spec:
hostname: ["*.api.stackoverflow.com"]
tlsContext:
name: example-context-test2
tlsSecret:
name: example-stackoverflow-secret
Related
I'm trying to run iceberg on kubernetes.
Here are the files that I'm using:
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: mc
name: mc
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: mc
strategy: {}
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: mc
spec:
containers:
- command:
- /bin/sh
- -c
- ' until (/usr/bin/mc config host add minio http://minio:9000 admin password) do echo ''...waiting...'' && sleep 1; done; /usr/bin/mc rm -r --force minio/warehouse; /usr/bin/mc mb minio/warehouse; /usr/bin/mc policy set public minio/warehouse; exit 0; '
env:
- name: AWS_ACCESS_KEY_ID
value: admin
- name: AWS_REGION
value: us-east-1
- name: AWS_SECRET_ACCESS_KEY
value: password
image: minio/mc
name: mc
resources: {}
restartPolicy: Always
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: minio
name: minio
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: minio
strategy: {}
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: minio
spec:
containers:
- args:
- server
- /data
- --console-address
- :9001
env:
- name: MINIO_ROOT_PASSWORD
value: password
- name: MINIO_ROOT_USER
value: admin
image: minio/minio
name: minio
ports:
- containerPort: 9001
- containerPort: 9000
resources: {}
restartPolicy: Always
status: {}
---
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: minio
name: minio
spec:
type: LoadBalancer
ports:
- name: "9001"
port: 9001
targetPort: 9001
- name: "9000"
port: 9000
targetPort: 9000
selector:
io.kompose.service: minio
status:
loadBalancer: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: rest
name: rest
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: rest
strategy: {}
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: rest
spec:
containers:
- env:
- name: AWS_ACCESS_KEY_ID
value: admin
- name: AWS_REGION
value: us-east-1
- name: AWS_SECRET_ACCESS_KEY
value: password
- name: CATALOG_IO__IMPL
value: org.apache.iceberg.aws.s3.S3FileIO
- name: CATALOG_S3_ENDPOINT
value: http://minio:9000
- name: CATALOG_WAREHOUSE
value: s3a://warehouse/wh/
image: tabulario/iceberg-rest:0.2.0
name: rest
ports:
- containerPort: 8181
resources: {}
restartPolicy: Always
status: {}
---
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: rest
name: rest
spec:
type: LoadBalancer
ports:
- name: "8181"
port: 8181
targetPort: 8181
selector:
io.kompose.service: rest
status:
loadBalancer: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: spark-iceberg-claim0
name: spark-iceberg-claim0
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
creationTimestamp: null
labels:
io.kompose.service: spark-iceberg-claim1
name: spark-iceberg-claim1
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 100Mi
status: {}
---
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: spark-iceberg
name: spark-iceberg
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: spark-iceberg
strategy:
type: Recreate
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: spark-iceberg
spec:
containers:
- env:
- name: AWS_ACCESS_KEY_ID
value: admin
- name: AWS_REGION
value: us-east-1
- name: AWS_SECRET_ACCESS_KEY
value: password
image: tabulario/spark-iceberg
name: spark-iceberg
ports:
- containerPort: 8888
- containerPort: 8080
resources: {}
volumeMounts:
- mountPath: /home/iceberg/warehouse
name: spark-iceberg-claim0
- mountPath: /home/iceberg/notebooks/notebooks
name: spark-iceberg-claim1
restartPolicy: Always
volumes:
- name: spark-iceberg-claim0
persistentVolumeClaim:
claimName: spark-iceberg-claim0
- name: spark-iceberg-claim1
persistentVolumeClaim:
claimName: spark-iceberg-claim1
status: {}
---
apiVersion: v1
kind: Service
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.27.0 (b0ed6a2c9)
creationTimestamp: null
labels:
io.kompose.service: spark-iceberg
name: spark-iceberg
spec:
type: LoadBalancer
ports:
- name: "8882"
port: 8882
targetPort: 8888
- name: "8081"
port: 8081
targetPort: 8080
selector:
io.kompose.service: spark-iceberg
status:
loadBalancer: {}
after deploy all pods are working fine except the rest pod
kubectl get pod
when I see the log I get this message
Exception in thread "main" java.lang.NumberFormatException: For input string: "tcp://10.101.184.144:8181"
kubectl logs res-pod
How can I get the rest pod work properly?
I try to change the image of the rest deployment and change some ports of the service
I want to patch (overwrite) list in kubernetes manifest with Kustomize.
I am using patchesStrategicMerge method.
When I patch the parameters which are not in list the patching works as expected - only addressed parameters in patch.yaml are replaced, rest is untouched.
When I patch list the whole list is replaced.
How can I replace only specific items in the list and the res of the items in list stay untouched?
I found these two resources:
https://github.com/kubernetes-sigs/kustomize/issues/581
https://github.com/kubernetes/community/blob/master/contributors/devel/sig-api-machinery/strategic-merge-patch.md
but wasn't able to make desired solution of it.
exmaple code:
orig-file.yaml
apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager-slack-config
namespace: system-namespace
spec:
test: test
other: other-stuff
receivers:
- name: default
slackConfigs:
- name: slack
username: test-user
channel: "#alerts"
sendResolved: true
apiURL:
name: slack-webhook-url
key: address
patch.yaml:
apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager-slack-config
namespace: system-namespace
spec:
test: brase-yourself
receivers:
- name: default
slackConfigs:
- name: slack
username: Karl
kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- orig-file.yaml
patchesStrategicMerge:
- patch.yaml
What I get:
apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager-slack-config
namespace: system-namespace
spec:
other: other-stuff
receivers:
- name: default
slackConfigs:
- name: slack
username: Karl
test: brase-yourself
What I want:
apiVersion: monitoring.coreos.com/v1alpha1
kind: AlertmanagerConfig
metadata:
name: alertmanager-slack-config
namespace: system-namespace
spec:
other: other-stuff
receivers:
- name: default
slackConfigs:
- name: slack
username: Karl
channel: "#alerts"
sendResolved: true
apiURL:
name: slack-webhook-url
key: address
test: brase-yourself
What you can do is to use jsonpatch instead of patchesStrategicMerge, so in your case:
cat <<EOF >./kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- orig-file.yaml
patches:
- path: patch.yaml
target:
group: monitoring.coreos.com
version: v1alpha1
kind: AlertmanagerConfig
name: alertmanager-slack-config
EOF
patch:
cat <<EOF >./patch.yaml
- op: replace
path: /spec/receivers/0/slackConfigs/0/username
value: Karl
EOF
I have currently written below auth manifest for Istio.
kind: RequestAuthentication
metadata:
name: "jwt-validation"
namespace: some-namespace
spec:
selector:
matchLabels:
auth: required
jwtRules:
- issuer: "https://you.auth0.com/"
jwksUri: "https://you.auth0.com/.well-known/jwks.json"
---
apiVersion: security.istio.io/v1beta1
kind: AuthorizationPolicy
metadata:
name: jwt-auth-policy
namespace: some-namespace
spec:
selector:
matchLabels:
auth: required
action: DENY
rules:
- from:
- source:
notRequestPrincipals: ["*"]
for which i am getting the below response from browser
RBAC: access denied
But instead of this i wan to get a Json response
saying
{
"status": "failure",
"message": "Not Authorised"
}
with status code 403
Now i have tried the below Lua filter
apiVersion: networking.istio.io/v1alpha3
kind: EnvoyFilter
metadata:
name: custom-filter-response-code
namespace: istio-system
spec:
workloadSelector:
labels:
istio: ingressgateway
configPatches:
- applyTo: HTTP_FILTER
match:
context: GATEWAY
listener:
filterChain:
filter:
name: "envoy.filters.network.http_connection_manager"
subFilter:
name: "envoy.extAuthz"
patch:
operation: INSERT_AFTER
value:
name: envoy.custom-resp
typed_config:
"#type": "type.googleapis.com/envoy.extensions.filters.http.lua.v3.Lua"
inlineCode: |
function envoy_on_response(response_handle)
if response_handle:headers():get(":status") == "401" then
response_handle:headers():replace(":status", "403")
else
local body = response_handle:body()
local jsonString = tostring(body:getBytes(0, body:length()))
jsonString = jsonString:gsub("(status|failur)", "(message|Not Authorised)")
response_handle:body():set(jsonString)
end
Please Guide me with correct snippet
I am trying to deploy consul using kubernetes StatefulSet with following manifest
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: consul
labels:
app: consul
rules:
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: consul
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: consul
subjects:
- kind: ServiceAccount
name: consul
namespace: dev-ethernet
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: consul
namespace: dev-ethernet
labels:
app: consul
---
apiVersion: v1
kind: Secret
metadata:
name: consul-secret
namespace: dev-ethernet
data:
consul-gossip-encryption-key: "aIRpNkHT/8Tkvf757sj2m5AcRlorWNgzcLI4yLEMx7M="
---
apiVersion: v1
kind: ConfigMap
metadata:
name: consul-config
namespace: dev-ethernet
data:
server.json: |
{
"bind_addr": "0.0.0.0",
"client_addr": "0.0.0.0",
"disable_host_node_id": true,
"data_dir": "/consul/data",
"log_level": "INFO",
"datacenter": "us-west-2",
"domain": "cluster.local",
"ports": {
"http": 8500
},
"retry_join": [
"provider=k8s label_selector=\"app=consul,component=server\""
],
"server": true,
"telemetry": {
"prometheus_retention_time": "5m"
},
"ui": true
}
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: consul
namespace: dev-ethernet
spec:
selector:
matchLabels:
app: consul
component: server
serviceName: consul
podManagementPolicy: Parallel
replicas: 3
updateStrategy:
rollingUpdate:
partition: 0
type: RollingUpdate
template:
metadata:
labels:
app: consul
component: server
annotations:
consul.hashicorp.com/connect-inject: "false"
spec:
serviceAccountName: consul
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- consul
topologyKey: kubernetes.io/hostname
terminationGracePeriodSeconds: 10
securityContext:
fsGroup: 1000
containers:
- name: consul
image: "consul:1.8"
args:
- "agent"
- "-advertise=$(POD_IP)"
- "-bootstrap-expect=3"
- "-config-file=/etc/consul/config/server.json"
- "-encrypt=$(GOSSIP_ENCRYPTION_KEY)"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: GOSSIP_ENCRYPTION_KEY
valueFrom:
secretKeyRef:
name: consul-secret
key: consul-gossip-encryption-key
volumeMounts:
- name: data
mountPath: /consul/data
- name: config
mountPath: /etc/consul/config
lifecycle:
preStop:
exec:
command:
- /bin/sh
- -c
- consul leave
ports:
- containerPort: 8500
name: ui-port
- containerPort: 8400
name: alt-port
- containerPort: 53
name: udp-port
- containerPort: 8080
name: http-port
- containerPort: 8301
name: serflan
- containerPort: 8302
name: serfwan
- containerPort: 8600
name: consuldns
- containerPort: 8300
name: server
volumes:
- name: config
configMap:
name: consul-config
volumeClaimTemplates:
- metadata:
name: data
labels:
app: consul
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: aws-gp2
resources:
requests:
storage: 3Gi
But gets ==> encrypt has invalid key: illegal base64 data at input byte 1 when container starts.
I have generated consul-gossip-encryption-key locally using docker run -i -t consul keygen
Anyone knows whats wrong here ?
secret.data must be base64 string.
try
kubectl create secret generic consul-gossip-encryption-key --from-literal=key="$(docker run -i -t consul keygen)" --dry-run -o=yaml
and replace
apiVersion: v1
kind: Secret
metadata:
name: consul-secret
namespace: dev-ethernet
data:
consul-gossip-encryption-key: "aIRpNkHT/8Tkvf757sj2m5AcRlorWNgzcLI4yLEMx7M="
ref: https://www.consul.io/docs/k8s/helm#v-global-gossipencryption
I was able to achieve load-balancing with sample istio applications
https://github.com/piomin/sample-istio-services
https://istio.io/docs/guides/bookinfo/
But was not able to get istio load-balancing working with single private service having 2 versions. Example: 2 consul servers with different versions .
Service and pod definition :
apiVersion: v1
kind: Service
metadata:
name: consul-test
labels:
app: test
spec:
ports:
- port: 8500
name: http
selector:
app: test
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: consul-test-v1
spec:
replicas: 1
template:
metadata:
labels:
app: test
version: v1
spec:
containers:
- name: consul-test-v1
image: consul:latest
ports:
- containerPort: 8500
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: consul-test-v2
spec:
replicas: 1
template:
metadata:
labels:
app: test
version: v2
spec:
containers:
- name: consul-test-v2
image: consul:1.1.0
ports:
- containerPort: 8500
Gateway definition:
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: http-gateway
spec:
selector:
istio: ingressgateway # use istio default controller
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- "*"
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: con-gateway
spec:
hosts:
- "*"
gateways:
- http-gateway
http:
- match:
- uri:
exact: /catalog
route:
- destination:
host: consul-test
port:
number: 8500
Routing rules in virtual service:
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: consul-test
spec:
hosts:
- consul-test
gateways:
- con-gateway
- mesh
http:
- route:
- destination:
host: consul-test
subset: v1
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: consul-test
spec:
host: consul-test
subsets:
- name: v1
labels:
version: v1
- name: v2
labels:
version: v2
Though I route all traffic ( http requests ) to consul server version v1, my http requests on consul-service lands on v1 and v2 alternately i.e, it follows Round-Robin rule .
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
consul-test ClusterIP 10.97.200.140 <none> 8500/TCP 9m
$ curl -L http://10.97.200.140:8500/v1/catalog/nodes
[
{
"ID": "ebfa341b-4557-a392-9f8a-8ee307113faa",
"Node": "consul-test-v1-765dd566dd-6cmj9",
"Address": "127.0.0.1",
"Datacenter": "dc1",
"TaggedAddresses": {
"lan": "127.0.0.1",
"wan": "127.0.0.1"
},
"Meta": {
"consul-network-segment": ""
},
"CreateIndex": 9,
"ModifyIndex": 10
}
]
$ curl -L http://10.97.200.140:8500/v1/catalog/nodes
[
{
"ID": "1b60a5bd-9a17-ff18-3a65-0ff95b3a836a",
"Node": "consul-test-v2-fffd475bc-st4mv",
"Address": "127.0.0.1",
"Datacenter": "dc1",
"TaggedAddresses": {
"lan": "127.0.0.1",
"wan": "127.0.0.1"
},
"Meta": {
"consul-network-segment": ""
},
"CreateIndex": 5,
"ModifyIndex": 6
}
]
I have the above mentioned issue when curl is done on the service ClusterIP:ClusterPort
$ kubectl get svc
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
consul-test ClusterIP 10.97.200.140 <none> 8500/TCP 9m
$ curl -L http://10.97.200.140:8500/v1/catalog/nodes
But LoadBalancing works as expected when curl is done on INGRESS_HOST and INGRESS_PORT ( determining INGRESS_HOST and INGRESS_PORT present here )
$ curl -L http://$INGRESS_HOST:$INGRESS_PORT/v1/catalog/nodes --- WORKS