I have a SPA and a microservices architecture. I am running the program locally on my machine using skaffold dev and kubernetes with Google Cloud Provider (GCP). I am connecting my frontend to my backend using Ingress-NGINX. When I go to the host name on my browser mavata.dev (configured on my local machine), I can no longer load the site. I get a "Cannot GET localhost:9000/main.js" net:::ERR_CONNECTION_REFUSED. See below for my config:
Kubernetes Config:
(ingress-srv.yaml)
# RUN: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.3.1/deploy/static/provider/cloud/deploy.yaml
# for GCP run: kubectl create clusterrolebinding cluster-admin-binding --clusterrole cluster-admin --user $(gcloud config get-value account)
# then run: kubectl apply -f https://raw.githubusercontent.com/kubernetes/ingress-nginx/controller-v1.4.0/deploy/static/provider/cloud/deploy.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-srv
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/use-regex: 'true'
# certmanager.k8s.io/cluster-issuer: core-prod
# nginx.ingress.kubernetes.io/proxy-read-timeout: "1800"
# nginx.ingress.kubernetes.io/proxy-send-timeout: "1800"
# nginx.ingress.kubernetes.io/rewrite-target: /
# nginx.ingress.kubernetes.io/secure-backends: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
# nginx.ingress.kubernetes.io/websocket-services: core-service
# nginx.org/websocket-services: core-service
#---
# nginx.ingress.kubernetes.io/proxy-read-timeout: "3600"
# nginx.ingress.kubernetes.io/proxy-send-timeout: "3600"
# nginx.ingress.kubernetes.io/server-snippets: |
# location / {
# proxy_set_header Upgrade $http_upgrade;
# proxy_http_version 1.1;
# proxy_set_header X-Forwarded-Host $http_host;
# proxy_set_header X-Forwarded-Proto $scheme;
# proxy_set_header X-Forwarded-For $remote_addr;
# proxy_set_header Host $host;
# proxy_set_header Connection "upgrade";
# proxy_cache_bypass $http_upgrade;
# }
spec:
rules:
- host: mavata.dev # need to update 'hosts' file on local machine (in VS Code) C:\Windows\System32\drivers\etc
http:
paths:
# - backend:
# pathType: Prefix
# serviceName: tornado-socket
# servicePort: 8000
# - path: /api/company/create
# pathType: Prefix
# backend:
# service:
# name: company-clusterip-srv
# port:
# number: 4000
# - path: /api/company/?(.*)
# pathType: Prefix
# backend:
# service:
# name: company-srv
# port:
# number: 4000
# - path: /api/companies
# pathType: Prefix
# backend:
# service:
# name: companies-srv
# port:
# number: 4000
- path: /api/users/?(.*)
pathType: Prefix
backend:
service:
name: auth-server-srv
port:
number: 4000
# - path: /api/permissions/?(.*)
# pathType: Prefix
# backend:
# service:
# name: permissions-srv
# port:
# number: 4000
- path: /?(.*)
pathType: Prefix
backend:
service:
name: client-srv
port:
number: 9000
(client-depl.yaml)
apiVersion: apps/v1
kind: Deployment # tpye of k8s object we want to create
metadata:
name: client-depl
spec:
replicas: 1
selector:
matchLabels:
app: client
template:
metadata:
labels:
app: client
spec:
containers:
- name: client-container
image: us.gcr.io/mavata/frontend
ports:
- containerPort: 9000
---
apiVersion: v1
kind: Service
metadata:
name: client-srv
spec:
selector:
app: client
ports:
- name: client-container
protocol: TCP
port: 9000
targetPort: 9000
SPA Webpack Dev Config:
(webpack.dev.config)
const { merge } = require('webpack-merge');
// const ModuleFederationPlugin = require('webpack/lib/container/ModuleFederationPlugin');
const commonConfig = require('./webpack.common');
// const packageJson = require('../package.json');
const globals = require('../src/data-variables/global');
const port = globals.port;
const devConfig = {
mode: 'development',
output: {
publicPath: `http://localhost:${port}/` // don't forget the slash at the end
},
devServer: {
host: '0.0.0.0',
port: port,
allowedHosts: ['mavata.dev'],
historyApiFallback: {
index: 'index.html',
},
},
// plugins: [
// new ModuleFederationPlugin({
// name: 'container',
// filename: 'remoteEntry.js',
// remotes: {
// marketingMfe: 'marketingMod#http://localhost:8081/remoteEntry.js',
// authMfe: 'authMod#http://localhost:8082/remoteEntry.js',
// companyMfe: 'companyMod#http://localhost:8083/remoteEntry.js',
// dataMfe: 'dataMod#http://localhost:8084/remoteEntry.js'
// },
// exposes: {
// './Functions': './src/functions/Functions',
// './Variables': './src/data-variables/Variables'
// },
// shared: {...packageJson.dependencies, ...packageJson.peerDependencies},
// }),
// ],
};
module.exports = merge(commonConfig, devConfig);
(webpack.common.config)
const HtmlWebpackPlugin = require('html-webpack-plugin');
const path = require('path');
module.exports = {
module: {
rules: [
{
test: /\.m?js$/,
exclude: /node_modules/,
use: {
loader: 'babel-loader',
options: {
presets: ['#babel/preset-react', '#babel/preset-env'],
plugins: ['#babel/plugin-transform-runtime'],
},
},
},
{
test: /\.css$/,
use: ['style-loader', 'css-loader']
},
{
test: /\.(scss)$/,
use: [
{
// Adds CSS to the DOM by injecting a `<style>` tag
loader: 'style-loader'
},
{
// Interprets `#import` and `url()` like `import/require()` and will resolve them
loader: 'css-loader'
},
{
// Loads a SASS/SCSS file and compiles it to CSS
loader: 'sass-loader'
},
]
},
// {
// test: /\.s[ac]ss$/i,
// use: [
// // Creates `style` nodes from JS strings
// "style-loader",
// // Translates CSS into CommonJS
// "css-loader",
// // Compiles Sass to CSS
// "sass-loader",
// ],
// },
{
test: /\.svg$/,
use: ['#svgr/webpack'],
},
{
test: /\.(woff(2)?|ttf|eot)(\?v=\d+\.\d+\.\d+)?$/,
exclude: /node_modules/,
use: [
{
loader: 'file-loader',
options: {
name: '[path][name].[ext]',
outputPath: 'fonts/'
}
}
]
},
{
// Load all images as base64 encoding if they are smaller than 8192 bytes
test: /\.(png|jpg|jpeg|gif|ico|svg|webp)$/,
use: [
{
// loader: 'url-loader',
loader: 'file-loader',
options: {
// On development we want to see where the file is coming from, hence we preserve the [path]
name: '[path][name].[ext]?hash=[hash:20]',
//name: '[path][name].[ext]',
limit: 8192
},
},
],
},
{
// Load all icons
test: /\.(eot|woff|woff2|svg|ttf)([\?]?.*)$/,
use: [
{
loader: 'file-loader',
}
],
},
{
test: /\.(ttf|eot|woff|woff2)$/,
loader: 'file-loader',
options: {
name: 'fonts/[name].[ext]',
},
},
],
},
plugins: [
new HtmlWebpackPlugin({
template: './public/index.html',
}),
],
resolve: {
extensions: ['', '.js', '.jsx', '.scss', '.eot', '.ttf', '.svg', '.woff'],
modules: ['node_modules', 'src', 'scripts', 'images', 'fonts'],
alias: {
Navbar: path.resolve(__dirname, '../src/components/navbar/'),
containerMfe: path.resolve(__dirname, '../src/'),
Variables: path.resolve(__dirname, '../src/data-variables/Variables.js'),
Functions: path.resolve(__dirname, '../src/functions/Functions.js')
}
},
};
Related
We are trying to deploy a logic app as containerized workload in AKS. Following is our Docker file:
FROM mcr.microsoft.com/azure-functions/dotnet:3.0.14492-appservice
ENV AzureWebJobsStorage=<StorageAccount connection string>
ENV AZURE_FUNCTIONS_ENVIRONMENT Development
ENV AzureWebJobsScriptRoot=/home/site/wwwroot
ENV AzureFunctionsJobHost__Logging__Console__IsEnabled=true
ENV FUNCTIONS_V2_COMPATIBILITY_MODE=true
COPY ./bin/release/netcoreapp3.1/publish/ /home/site/wwwroot
Following is our deployment manifest file:
apiVersion: apps/v1
kind: Deployment
metadata:
name: pfna-pgt-sf-pdfextract
namespace: canary
labels:
app: pfna-pgt-sf-pdfextract
spec:
replicas: 1
selector:
matchLabels:
app: pfna-pgt-sf-pdfextract
template:
metadata:
labels:
app: pfna-pgt-sf-pdfextract
spec:
nodeSelector:
beta.kubernetes.io/os: linux
containers:
- name: pfna-pgt-sf-pdfextract
image: "image_link"
resources:
requests:
cpu: 100m
memory: 128Mi
limits:
cpu: 250m
memory: 256Mi
ports:
- containerPort: 80
env:
- name: AzureBlob_connectionString
value: <connection_string>
- name: AzureWebJobsStorage
value: <connection_string>
imagePullSecrets:
- name: sbx-acr-secret
---
apiVersion: v1
kind: Service
metadata:
name: pfna-pgt-sf-pdfextract
namespace: canary
labels:
app: pfna-pgt-sf-pdfextract
spec:
type: LoadBalancer
ports:
- port: 80
targetPort: 80
protocol: TCP
name: http-pfna-pgt-sf-pdfextract
selector:
app: pfna-pgt-sf-pdfextract
Following is connections.json:
{
"serviceProviderConnections": {
"AzureBlob": {
"parameterValues": {
"connectionString": "#appsetting('AzureWebJobsStorage')"
},
"serviceProvider": {
"id": "/serviceProviders/AzureBlob"
},
"displayName": "localAzureBlob"
}
},
"managedApiConnections": {}
}
Following is the host.json:
{
"version": "2.0",
"logging": {
"applicationInsights": {
"samplingSettings": {
"isEnabled": true,
"excludedTypes": "Request"
}
}
},
"extensionBundle": {
"id": "Microsoft.Azure.Functions.ExtensionBundle.Workflows",
"version": "[1.*, 2.0.0)"
},
"extensions": {
"workflow": {
"settings": {
"Runtime.Backend.VariableOperation.MaximumStatelessVariableSize": "5000000"
}
}
}
}
The image is running successfully in docker desktop but when deployed to AKS we are getting 'Function host is not running'.
Please help resolve this.
You need to specify WEBSITE_HOSTNAME as well (doesn't matter what it is, just needs to be specified)
That being said, as of today there is another issue that is causing the function host to not start (libadvapi32.dll).
I have this chart of a personal project deployed in minikube:
---
# Source: frontend/templates/service.yaml
kind: Service
apiVersion: v1
metadata:
name: xxx-app-service
spec:
selector:
app: xxx-app
ports:
- protocol: TCP
port: 3000
targetPort: 3000
---
# Source: frontend/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
deployment.kubernetes.io/revision: '3'
creationTimestamp: '2022-06-19T21:57:01Z'
generation: 3
labels:
app: xxx-app
name: xxx-app
namespace: default
resourceVersion: '43299'
uid: 7c43767a-abbd-4806-a9d2-6712847a0aad
spec:
progressDeadlineSeconds: 600
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
app: xxx-app
strategy:
rollingUpdate:
maxSurge: 25%
maxUnavailable: 25%
type: RollingUpdate
template:
metadata:
creationTimestamp: null
labels:
app: xxx-app
spec:
containers:
- image: "registry.gitlab.com/esavara/xxx/wm:staging"
name: frontend
imagePullPolicy: Always
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
ports:
- containerPort: 3000
livenessProbe:
httpGet:
path: /
port: 3000
initialDelaySeconds: 10
periodSeconds: 3
env:
- name: PORT
value: "3000"
resources: {}
dnsPolicy: ClusterFirst
imagePullSecrets:
- name: regcred
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
---
# Source: frontend/templates/ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
creationTimestamp: '2022-06-19T22:28:58Z'
generation: 1
name: xxx-app
namespace: default
resourceVersion: '44613'
uid: b58dcd17-ee1f-42e5-9dc7-d915a21f97b5
spec:
ingressClassName: nginx
rules:
- http:
paths:
- backend:
service:
name: "xxx-app-service"
port:
number: 3000
path: /
pathType: Prefix
status:
loadBalancer:
ingress:
- ip: 192.168.39.80
---
# Source: frontend/templates/gitlab-registry-sealed.json
{
"kind": "SealedSecret",
"apiVersion": "bitnami.com/v1alpha1",
"metadata": {
"name": "regcred",
"namespace": "default",
"creationTimestamp": null
},
"spec": {
"template": {
"metadata": {
"name": "regcred",
"namespace": "default",
"creationTimestamp": null
},
"type": "kubernetes.io/dockerconfigjson",
"data": null
},
"encryptedData": {
".dockerconfigjson": "AgBpHoQw1gBq0IFFYWnxlBLLYl1JC23TzbRWGgLryVzEDP8p+NAGjngLFZmtklmCEHLK63D9pp3zL7YQQYgYBZUjpEjj8YCTOhvnjQIg7g+5b/CPXWNoI5TuNexNJFKFv1mN5DzDk9np/E69ogRkDJUvUsbxvVXs6/TKGnRbnp2NuI7dTJ18QgGXdLXs7S416KE0Yid9lggw06JrDN/OSxaNyUlqFGcRJ6OfGDAHivZRV1Kw9uoX06go3o+AjVd6eKlDmmvaY/BOc52bfm7pY2ny1fsXGouQ7R7V1LK0LcsCsKdAkg6/2DU3v32mWZDKJgKkK5efhTQr1KGOBoLuuHKX6nF5oMA1e1Ii3wWe77lvWuvlpaNecCBTc7im+sGt0dyJb4aN5WMLoiPGplGqnuvNqEFa/nhkbwXm5Suke2FQGKyzDsMIBi9p8PZ7KkOJTR1s42/8QWVggTGH1wICT1RzcGzURbanc0F3huQ/2RcTmC4UvYyhCUqr3qKrbAIYTNBayfyhsBaN5wVRnV5LiPxjLbbOmObSr/8ileJgt1HMQC3V9pVLZobaJvlBjr/mmNlrF118azJOFY+a/bqzmtBWwlcvVuel/EaOb8uA8mgwfnbnShMinL1OWTHoj+D0ayFmUdsQgMYwRmStnC7x/6OXThmBgfyrLguzz4V2W8O8qbdDz+O5QoyboLUuR9WQb/ckpRio2qa5tidnKXzZzbWzFEevv9McxvCC1+ovvw4IullU8ra3FutnTentmPHKU2OPr1EhKgFKIX20u8xZaeIJYLCiZlVQohylpjkHnBZo1qw+y1CTiDwytunpmkoGGAXLx++YQSjEJEo889PmVVlSwZ8p/Rdusiz1WbgKqFt27yZaOfYzR2bT++HuB5x6zqfK6bbdV/UZndXs"
}
}
}
I'm trying to use Telepresence to redirect the traffic from the deployed application to a Docker container which have my project mounted inside and has hot-reloading, to continue the development of it but inside Kubernetes, but running telepresence intercept xxx-app-service --port 3000:3000 --env-file /tmp/xxx-app-service.env fails with the following error:
telepresence: error: workload "xxx-app-service.default" not found
Why is this happening and how do I fix it?
I tried to convert the below working kubernetes manifest from
##namespace
---
apiVersion: v1
kind: Namespace
metadata:
name: poc
##postgress
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: db
name: db
namespace: poc
spec:
replicas: 1
selector:
matchLabels:
app: db
template:
metadata:
labels:
app: db
spec:
containers:
- image: postgres
name: postgres
env:
- name: POSTGRES_USER
value: postgres
- name: POSTGRES_PASSWORD
value: postgres
ports:
- containerPort: 5432
name: postgres
---
apiVersion: v1
kind: Service
metadata:
labels:
app: db
name: db
namespace: poc
spec:
type: ClusterIP
ports:
- name: "db-service"
port: 5432
targetPort: 5432
selector:
app: db
##adminer
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: ui
name: ui
namespace: poc
spec:
replicas: 1
selector:
matchLabels:
app: ui
template:
metadata:
labels:
app: ui
spec:
containers:
- image: adminer
name: adminer
ports:
- containerPort: 8080
name: ui
---
apiVersion: v1
kind: Service
metadata:
labels:
app: ui
name: ui
namespace: poc
spec:
type: NodePort
ports:
- name: "ui-service"
port: 8080
targetPort: 8080
selector:
app: ui
to
import * as k8s from "#pulumi/kubernetes";
import * as kx from "#pulumi/kubernetesx";
//db
const dbLabels = { app: "db" };
const dbDeployment = new k8s.apps.v1.Deployment("db", {
spec: {
selector: { matchLabels: dbLabels },
replicas: 1,
template: {
metadata: { labels: dbLabels },
spec: {
containers: [
{
name: "postgres",
image: "postgres",
env: [{ name: "POSTGRES_USER", value: "postgres"},{ name: "POSTGRES_PASSWORD", value: "postgres"}],
ports: [{containerPort: 5432}]
}
]
}
}
}
});
const dbService = new k8s.core.v1.Service("db", {
metadata: { labels: dbDeployment.spec.template.metadata.labels },
spec: {
selector: dbLabels,
type: "ClusterIP",
ports: [{ port: 5432, targetPort: 5432, protocol: "TCP" }],
}
});
//adminer
const uiLabels = { app: "ui" };
const uiDeployment = new k8s.apps.v1.Deployment("ui", {
spec: {
selector: { matchLabels: uiLabels },
replicas: 1,
template: {
metadata: { labels: uiLabels },
spec: {
containers: [
{
name: "adminer",
image: "adminer",
ports: [{containerPort: 8080}],
}
]
}
}
}
});
const uiService = new k8s.core.v1.Service("ui", {
metadata: { labels: uiDeployment.spec.template.metadata.labels },
spec: {
selector: uiLabels,
type: "NodePort",
ports: [{ port: 8080, targetPort: 8080, protocol: "TCP" }]
}
});
With this pulumi up -y is SUCCESS without error but the application is not fully UP and RUNNING. Because the adminer image is trying to use Postgres database hostname as db, But looks like pulumi is changing the service name like below:
My question here is, How to make this workable?
Is there a way in pulumi to strict with the naming?
Note- I know we can easily pass the hostname as an env variable to the adminer image but I am wondering if there is anything that can allow us to not change the name.
Pulumi automatically adds random strings to your resources to help with replacing resource. You can find more information about this in the FAQ
If you'd like to disable this, you can override it using the metadata, like so:
import * as k8s from "#pulumi/kubernetes";
import * as kx from "#pulumi/kubernetesx";
//db
const dbLabels = { app: "db" };
const dbDeployment = new k8s.apps.v1.Deployment("db", {
spec: {
selector: { matchLabels: dbLabels },
replicas: 1,
template: {
metadata: { labels: dbLabels },
spec: {
containers: [
{
name: "postgres",
image: "postgres",
env: [{ name: "POSTGRES_USER", value: "postgres"},{ name: "POSTGRES_PASSWORD", value: "postgres"}],
ports: [{containerPort: 5432}]
}
]
}
}
}
});
const dbService = new k8s.core.v1.Service("db", {
metadata: {
name: "db", // explicitly set a name on the service
labels: dbDeployment.spec.template.metadata.labels
},
spec: {
selector: dbLabels,
type: "ClusterIP",
ports: [{ port: 5432, targetPort: 5432, protocol: "TCP" }],
}
});
With that said, it's not always best practice to hardcode names like this, you should, if possible, reference outputs from your resources and pass them to new resources.
I have the following manifest for deploying a IstIO egress gateway routing:
---
apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
name: REDACTED-egress-se
spec:
hosts:
- sahfpxa.REDACTED
ports:
- number: 8080
name: http-port
protocol: HTTP
resolution: DNS
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: sahfpxa-REDACTED-egress-gw
spec:
selector:
istio: egressgateway
servers:
- port:
number: 8080
name: http
protocol: HTTP
hosts:
- sahfpxa.REDACTED
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: egressgateway-for-sahfpxa-REDACTED
spec:
host: istio-egressgateway.istio-system.svc.cluster.local
subsets:
- name: sahfpxa
---
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: direct-sahfpxa-REDACTED-through-egress-gateway
spec:
hosts:
- sahfpxa.REDACTED
gateways:
- REDACTED/REDACTED-egress-gw
- mesh
http:
- match:
- gateways:
- mesh
port: 8080
route:
- destination:
host: istio-egressgateway.istio-system.svc.cluster.local
subset: sahfpxa
port:
number: 80
weight: 100
- match:
- gateways:
- REDACTED/sahfpxa-REDACTED-egress-gw
port: 8080
route:
- destination:
host: sahfpxa.REDACTED
port:
number: 8080
weight: 100
But I get a connection refused from the sidecar istio-proxy container Pod of the affected namespace and a HTTP 503 error from the workload container in that namespace.
Any ideas what could be wrong with the configuration or how I can debug it?
Thanks in advance.
Best regards,
rforberger
There were few errors in Your deployment manifest like DestinationRule was not pointing at Your ServiceEntry.
You can try to match Yours with these manifest files:
apiVersion: networking.istio.io/v1alpha3
kind: ServiceEntry
metadata:
name: etth
spec:
hosts:
- etth.pl
ports:
- number: 8080
name: http-port
protocol: HTTP
resolution: DNS
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
name: istio-egressgateway
spec:
selector:
istio: egressgateway
servers:
- port:
number: 80
name: http
protocol: HTTP
hosts:
- etth.pl
---
apiVersion: networking.istio.io/v1alpha3
kind: DestinationRule
metadata:
name: egressgateway-for-cnn
spec:
host: istio-egressgateway.istio-system.svc.cluster.local
subsets:
- name: etth
apiVersion: networking.istio.io/v1alpha3
kind: VirtualService
metadata:
name: direct-cnn-through-egress-gateway
spec:
hosts:
- etth.pl
gateways:
- istio-egressgateway
- mesh
http:
- match:
- gateways:
- mesh
port: 80
route:
- destination:
host: istio-egressgateway.istio-system.svc.cluster.local
subset: etth
port:
number: 80
weight: 100
- match:
- gateways:
- istio-egressgateway
port: 80
route:
- destination:
host: etth.pl
port:
number: 8080
weight: 100
You can check if routes are present in:
istioctl pc routes $(kubectl get pods -l istio=egressgateway -o jsonpath='{.items[0].metadata.name}' -n istio-system).istio-system -o json
$ istioctl pc routes $(kubectl get pods -l istio=egressgateway -o jsonpath='{.items[0].metadata.name}' -n istio-system).istio-system -o json
[
{
"name": "http.80",
"virtualHosts": [
{
"name": "etth.pl:80",
"domains": [
"etth.pl",
"etth.pl:80"
],
"routes": [
{
"match": {
"prefix": "/",
"caseSensitive": true
},
"route": {
"cluster": "outbound|8080||etth.pl",
"timeout": "0s",
"retryPolicy": {
"retryOn": "connect-failure,refused-stream,unavailable,cancelled,resource-exhausted,retriable-status-codes",
"numRetries": 2,
"retryHostPredicate": [
{
"name": "envoy.retry_host_predicates.previous_hosts"
}
],
"hostSelectionRetryMaxAttempts": "5",
"retriableStatusCodes": [
503
]
},
"maxGrpcTimeout": "0s"
},
"metadata": {
"filterMetadata": {
"istio": {
"config": "/apis/networking/v1alpha3/namespaces/default/virtual-service/direct-cnn-through-egress-gateway"
}
}
},
"decorator": {
"operation": "etth.pl:8080/*"
},
"typedPerFilterConfig": {
"mixer": {
"#type": "type.googleapis.com/istio.mixer.v1.config.client.ServiceConfig",
"disableCheckCalls": true,
"mixerAttributes": {
"attributes": {
"destination.service.host": {
"stringValue": "etth.pl"
},
"destination.service.name": {
"stringValue": "etth.pl"
},
"destination.service.namespace": {
"stringValue": "default"
}
}
},
"forwardAttributes": {
"attributes": {
"destination.service.host": {
"stringValue": "etth.pl"
},
"destination.service.name": {
"stringValue": "etth.pl"
},
"destination.service.namespace": {
"stringValue": "default"
}
}
}
}
}
}
]
}
],
"validateClusters": false
},
{
"virtualHosts": [
{
"name": "backend",
"domains": [
"*"
],
"routes": [
{
"match": {
"prefix": "/stats/prometheus"
},
"route": {
"cluster": "prometheus_stats"
}
}
]
}
]
}
]
I am trying to deploy minio in kubernetes using helm stable charts,
and when I try to check the status of the release
helm status minio
the pod desired capacity is 4, but current is 0
I tried to look the journalctl logs for any logs from kubelet, but found none
I have attached all helm charts can some one please point out what wrong am I doing?
---
# Source: minio/templates/secrets.yaml
apiVersion: v1
kind: Secret
metadata:
name: RELEASE-NAME-minio
labels:
app: minio
chart: minio-1.7.0
release: RELEASE-NAME
heritage: Tiller
type: Opaque
data:
accesskey: RFJMVEFEQU1DRjNUQTVVTVhOMDY=
secretkey: bHQwWk9zWmp5MFpvMmxXN3gxeHlFWmF5bXNPUkpLM1VTb3VqeEdrdw==
---
# Source: minio/templates/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: RELEASE-NAME-minio
labels:
app: minio
chart: minio-1.7.0
release: RELEASE-NAME
heritage: Tiller
data:
initialize: |-
#!/bin/sh
set -e ; # Have script exit in the event of a failed command.
# connectToMinio
# Use a check-sleep-check loop to wait for Minio service to be available
connectToMinio() {
ATTEMPTS=0 ; LIMIT=29 ; # Allow 30 attempts
set -e ; # fail if we can't read the keys.
ACCESS=$(cat /config/accesskey) ; SECRET=$(cat /config/secretkey) ;
set +e ; # The connections to minio are allowed to fail.
echo "Connecting to Minio server: http://$MINIO_ENDPOINT:$MINIO_PORT" ;
MC_COMMAND="mc config host add myminio http://$MINIO_ENDPOINT:$MINIO_PORT $ACCESS $SECRET" ;
$MC_COMMAND ;
STATUS=$? ;
until [ $STATUS = 0 ]
do
ATTEMPTS=`expr $ATTEMPTS + 1` ;
echo \"Failed attempts: $ATTEMPTS\" ;
if [ $ATTEMPTS -gt $LIMIT ]; then
exit 1 ;
fi ;
sleep 2 ; # 1 second intervals between attempts
$MC_COMMAND ;
STATUS=$? ;
done ;
set -e ; # reset `e` as active
return 0
}
# checkBucketExists ($bucket)
# Check if the bucket exists, by using the exit code of `mc ls`
checkBucketExists() {
BUCKET=$1
CMD=$(/usr/bin/mc ls myminio/$BUCKET > /dev/null 2>&1)
return $?
}
# createBucket ($bucket, $policy, $purge)
# Ensure bucket exists, purging if asked to
createBucket() {
BUCKET=$1
POLICY=$2
PURGE=$3
# Purge the bucket, if set & exists
# Since PURGE is user input, check explicitly for `true`
if [ $PURGE = true ]; then
if checkBucketExists $BUCKET ; then
echo "Purging bucket '$BUCKET'."
set +e ; # don't exit if this fails
/usr/bin/mc rm -r --force myminio/$BUCKET
set -e ; # reset `e` as active
else
echo "Bucket '$BUCKET' does not exist, skipping purge."
fi
fi
# Create the bucket if it does not exist
if ! checkBucketExists $BUCKET ; then
echo "Creating bucket '$BUCKET'"
/usr/bin/mc mb myminio/$BUCKET
else
echo "Bucket '$BUCKET' already exists."
fi
# At this point, the bucket should exist, skip checking for existence
# Set policy on the bucket
echo "Setting policy of bucket '$BUCKET' to '$POLICY'."
/usr/bin/mc policy $POLICY myminio/$BUCKET
}
# Try connecting to Minio instance
connectToMinio
# Create the bucket
createBucket bucket none false
config.json: |-
{
"version": "26",
"credential": {
"accessKey": "DR06",
"secretKey": "lt0ZxGkw"
},
"region": "us-east-1",
"browser": "on",
"worm": "off",
"domain": "",
"storageclass": {
"standard": "",
"rrs": ""
},
"cache": {
"drives": [],
"expiry": 90,
"maxuse": 80,
"exclude": []
},
"notify": {
"amqp": {
"1": {
"enable": false,
"url": "",
"exchange": "",
"routingKey": "",
"exchangeType": "",
"deliveryMode": 0,
"mandatory": false,
"immediate": false,
"durable": false,
"internal": false,
"noWait": false,
"autoDeleted": false
}
},
"nats": {
"1": {
"enable": false,
"address": "",
"subject": "",
"username": "",
"password": "",
"token": "",
"secure": false,
"pingInterval": 0,
"streaming": {
"enable": false,
"clusterID": "",
"clientID": "",
"async": false,
"maxPubAcksInflight": 0
}
}
},
"elasticsearch": {
"1": {
"enable": false,
"format": "namespace",
"url": "",
"index": ""
}
},
"redis": {
"1": {
"enable": false,
"format": "namespace",
"address": "",
"password": "",
"key": ""
}
},
"postgresql": {
"1": {
"enable": false,
"format": "namespace",
"connectionString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"kafka": {
"1": {
"enable": false,
"brokers": null,
"topic": ""
}
},
"webhook": {
"1": {
"enable": false,
"endpoint": ""
}
},
"mysql": {
"1": {
"enable": false,
"format": "namespace",
"dsnString": "",
"table": "",
"host": "",
"port": "",
"user": "",
"password": "",
"database": ""
}
},
"mqtt": {
"1": {
"enable": false,
"broker": "",
"topic": "",
"qos": 0,
"clientId": "",
"username": "",
"password": "",
"reconnectInterval": 0,
"keepAliveInterval": 0
}
}
}
}
---
# Source: minio/templates/service.yaml
apiVersion: v1
kind: Service
metadata:
name: RELEASE-NAME-minio
labels:
app: minio
chart: minio-1.7.0
release: RELEASE-NAME
heritage: Tiller
spec:
type: ClusterIP
clusterIP: None
ports:
- name: service
port: 9000
targetPort: 9000
protocol: TCP
selector:
app: minio
release: RELEASE-NAME
---
# Source: minio/templates/statefulset.yaml
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: RELEASE-NAME-minio
labels:
app: minio
chart: minio-1.7.0
release: RELEASE-NAME
heritage: Tiller
spec:
serviceName: RELEASE-NAME-minio
replicas: 4
selector:
matchLabels:
app: minio
release: RELEASE-NAME
template:
metadata:
name: RELEASE-NAME-minio
labels:
app: minio
release: RELEASE-NAME
spec:
containers:
- name: minio
image: node1:5000/minio/minio:RELEASE.2018-09-01T00-38-25Z
imagePullPolicy: IfNotPresent
command: [ "/bin/sh",
"-ce",
"cp /tmp/config.json &&
/usr/bin/docker-entrypoint.sh minio -C server
http://RELEASE-NAME-minio-0.RELEASE-NAME-minio.default.svc.cluster.local/export
http://RELEASE-NAME-minio-1.RELEASE-NAME-minio.default.svc.cluster.local/export
http://RELEASE-NAME-minio-2.RELEASE-NAME-minio.default.svc.cluster.local/export
http://RELEASE-NAME-minio-3.RELEASE-NAME-minio.default.svc.cluster.local/export" ]
volumeMounts:
- name: export
mountPath: /export
- name: minio-server-config
mountPath: "/tmp/config.json"
subPath: config.json
- name: minio-config-dir
mountPath:
ports:
- name: service
containerPort: 9000
env:
- name: MINIO_ACCESS_KEY
valueFrom:
secretKeyRef:
name: RELEASE-NAME-minio
key: accesskey
- name: MINIO_SECRET_KEY
valueFrom:
secretKeyRef:
name: RELEASE-NAME-minio
key: secretkey
livenessProbe:
tcpSocket:
port: service
initialDelaySeconds: 5
periodSeconds: 30
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
readinessProbe:
tcpSocket:
port: service
periodSeconds: 15
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
resources:
requests:
cpu: 250m
memory: 256Mi
volumes:
- name: minio-user
secret:
secretName: RELEASE-NAME-minio
- name: minio-server-config
configMap:
name: RELEASE-NAME-minio
- name: minio-config-dir
emptyDir: {}
volumeClaimTemplates:
- metadata:
name: export
spec:
accessModes: [ "ReadWriteOnce" ]
storageClassName: local-fast
resources:
requests:
storage: 49Gi
---
# Source: minio/templates/ingress.yaml
apiVersion: extensions/v1beta1
kind: Ingress
metadata:
name: RELEASE-NAME-minio
labels:
app: minio
chart: minio-1.7.0
release: RELEASE-NAME
heritage: Tiller
annotations:
nginx.ingress.kubernetes.io/affinity: cookie
nginx.ingress.kubernetes.io/session-cookie-hash: sha1
nginx.ingress.kubernetes.io/session-cookie-name: route
spec:
tls:
- hosts:
- minio.sample.com
secretName: tls-secret
rules:
- host: minio.sample.com
http:
paths:
- path: /
backend:
serviceName: RELEASE-NAME-minio
servicePort: 9000
I suspect you are not getting the physical volume. Check your kube-controller-manager logs on your active master. This will vary depending on the cloud you are using: AWS, GCP, Azure, Openstack, etc. The kube-controller-manager is usually running on a docker container on the master. So you can do something like:
docker logs <kube-controller-manager-container>
Also, check:
kubectl get pvc
kubectl get pv
Hope it helps.
bit more digging gave me the answer, statefulset was deployed but pods were not created
kubectl describe statefulset -n <namespace> minio
the log said it was looking for mount path which was "" (in previous versions of charts), changing it solved my issue.