I read through the karpenter document at https://karpenter.sh/v0.16.1/getting-started/getting-started-with-terraform/#install-karpenter-helm-chart. I followed instructions step by step. I got errors at the end.
kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller
DEBUG controller.provisioning Relaxing soft constraints for pod since it previously failed to schedule, removing: spec.topologySpreadConstraints = {"maxSkew":1,"topologyKey":"topology.kubernetes.io/zone","whenUnsatisfiable":"ScheduleAnyway","labelSelector":{"matchLabels":{"app.kubernetes.io/instance":"karpenter","app.kubernetes.io/name":"karpenter"}}} {"commit": "b157d45", "pod": "karpenter/karpenter-5755bb5b54-rh65t"}
2022-09-10T00:13:13.122Z
ERROR controller.provisioning Could not schedule pod, incompatible with provisioner "default", incompatible requirements, key karpenter.sh/provisioner-name, karpenter.sh/provisioner-name DoesNotExist not in karpenter.sh/provisioner-name In [default] {"commit": "b157d45", "pod": "karpenter/karpenter-5755bb5b54-rh65t"}
Below is the source code:
cat main.tf
terraform {
required_version = "~> 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 4.0"
}
helm = {
source = "hashicorp/helm"
version = "~> 2.5"
}
kubectl = {
source = "gavinbunney/kubectl"
version = "~> 1.14"
}
}
}
provider "aws" {
region = "us-east-1"
}
locals {
cluster_name = "karpenter-demo"
# Used to determine correct partition (i.e. - `aws`, `aws-gov`, `aws-cn`, etc.)
partition = data.aws_partition.current.partition
}
data "aws_partition" "current" {}
module "vpc" {
# https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws/latest
source = "terraform-aws-modules/vpc/aws"
version = "3.14.4"
name = local.cluster_name
cidr = "10.0.0.0/16"
azs = ["us-east-1a", "us-east-1b", "us-east-1c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.101.0/24", "10.0.102.0/24", "10.0.103.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
one_nat_gateway_per_az = false
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
}
module "eks" {
# https://registry.terraform.io/modules/terraform-aws-modules/eks/aws/latest
source = "terraform-aws-modules/eks/aws"
version = "18.29.0"
cluster_name = local.cluster_name
cluster_version = "1.22"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
# Required for Karpenter role below
enable_irsa = true
node_security_group_additional_rules = {
ingress_nodes_karpenter_port = {
description = "Cluster API to Node group for Karpenter webhook"
protocol = "tcp"
from_port = 8443
to_port = 8443
type = "ingress"
source_cluster_security_group = true
}
}
node_security_group_tags = {
# NOTE - if creating multiple security groups with this module, only tag the
# security group that Karpenter should utilize with the following tag
# (i.e. - at most, only one security group should have this tag in your account)
"karpenter.sh/discovery/${local.cluster_name}" = local.cluster_name
}
# Only need one node to get Karpenter up and running.
# This ensures core services such as VPC CNI, CoreDNS, etc. are up and running
# so that Karpenter can be deployed and start managing compute capacity as required
eks_managed_node_groups = {
initial = {
instance_types = ["m5.large"]
# Not required nor used - avoid tagging two security groups with same tag as well
create_security_group = false
min_size = 1
max_size = 1
desired_size = 1
iam_role_additional_policies = [
"arn:${local.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore", # Required by Karpenter
"arn:${local.partition}:iam::aws:policy/AmazonEKSWorkerNodePolicy",
"arn:${local.partition}:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", #for access to ECR images
"arn:${local.partition}:iam::aws:policy/CloudWatchAgentServerPolicy"
]
tags = {
# This will tag the launch template created for use by Karpenter
"karpenter.sh/discovery/${local.cluster_name}" = local.cluster_name
}
}
}
}
#The EKS module creates an IAM role for the EKS managed node group nodes. We’ll use that for Karpenter.
#We need to create an instance profile we can reference.
#Karpenter can use this instance profile to launch new EC2 instances and those instances will be able to connect to your cluster.
resource "aws_iam_instance_profile" "karpenter" {
name = "KarpenterNodeInstanceProfile-${local.cluster_name}"
role = module.eks.eks_managed_node_groups["initial"].iam_role_name
}
#Create the KarpenterController IAM Role
#Karpenter requires permissions like launching instances, which means it needs an IAM role that grants it access. The config
#below will create an AWS IAM Role, attach a policy, and authorize the Service Account to assume the role using IRSA. We will
#create the ServiceAccount and connect it to this role during the Helm chart install.
module "karpenter_irsa" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "5.3.3"
role_name = "karpenter-controller-${local.cluster_name}"
attach_karpenter_controller_policy = true
karpenter_tag_key = "karpenter.sh/discovery/${local.cluster_name}"
karpenter_controller_cluster_id = module.eks.cluster_id
karpenter_controller_node_iam_role_arns = [
module.eks.eks_managed_node_groups["initial"].iam_role_arn
]
oidc_providers = {
ex = {
provider_arn = module.eks.oidc_provider_arn
namespace_service_accounts = ["karpenter:karpenter"]
}
}
}
#Install Karpenter Helm Chart
#Use helm to deploy Karpenter to the cluster. We are going to use the helm_release Terraform resource to do the deploy and pass in the
#cluster details and IAM role Karpenter needs to assume.
provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", local.cluster_name]
}
}
}
resource "helm_release" "karpenter" {
namespace = "karpenter"
create_namespace = true
name = "karpenter"
repository = "https://charts.karpenter.sh"
chart = "karpenter"
version = "v0.16.1"
set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.karpenter_irsa.iam_role_arn
}
set {
name = "clusterName"
value = module.eks.cluster_id
}
set {
name = "clusterEndpoint"
value = module.eks.cluster_endpoint
}
set {
name = "aws.defaultInstanceProfile"
value = aws_iam_instance_profile.karpenter.name
}
}
#Provisioner
#Create a default provisioner using the command below. This provisioner configures instances to connect to your cluster’s endpoint and
#discovers resources like subnets and security groups using the cluster’s name.
#This provisioner will create capacity as long as the sum of all created capacity is less than the specified limit.
provider "kubectl" {
apply_retry_count = 5
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
resource "kubectl_manifest" "karpenter_provisioner" {
yaml_body = <<-YAML
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
requirements:
- key: karpenter.sh/capacity-type
operator: In
values: ["spot"]
limits:
resources:
cpu: 1000
provider:
subnetSelector:
Name: "*private*"
securityGroupSelector:
karpenter.sh/discovery/${module.eks.cluster_id}: ${module.eks.cluster_id}
tags:
karpenter.sh/discovery/${module.eks.cluster_id}: ${module.eks.cluster_id}
ttlSecondsAfterEmpty: 30
YAML
depends_on = [
helm_release.karpenter
]
}
cat <<EOF | kubectl apply -f -
apiVersion: apps/v1
kind: Deployment
metadata:
name: inflate
spec:
replicas: 0
selector:
matchLabels:
app: inflate
template:
metadata:
labels:
app: inflate
spec:
terminationGracePeriodSeconds: 0
containers:
- name: inflate
image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
resources:
requests:
cpu: 1
EOF
kubectl scale deployment inflate --replicas 5
kubectl logs -f -n karpenter -l app.kubernetes.io/name=karpenter -c controller
DEBUG controller.provisioning Relaxing soft constraints for pod since it previously failed to schedule, removing: spec.topologySpreadConstraints = {"maxSkew":1,"topologyKey":"topology.kubernetes.io/zone","whenUnsatisfiable":"ScheduleAnyway","labelSelector":{"matchLabels":{"app.kubernetes.io/instance":"karpenter","app.kubernetes.io/name":"karpenter"}}} {"commit": "b157d45", "pod": "karpenter/karpenter-5755bb5b54-rh65t"}
2022-09-10T00:13:13.122Z
ERROR controller.provisioning Could not schedule pod, incompatible with provisioner "default", incompatible requirements, key karpenter.sh/provisioner-name, karpenter.sh/provisioner-name DoesNotExist not in karpenter.sh/provisioner-name In [default] {"commit": "b157d45", "pod": "karpenter/karpenter-5755bb5b54-rh65t"}
I belive this is due to the pod topology defined in the Karpenter deployment here:
https://github.com/aws/karpenter/blob/main/charts/karpenter/values.yaml#L73-L77
, you can read further on what pod topologySpreadConstraints does here:
https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
If you increase the desired_size to 2 which matches the default deployment replicas above, that should resove the error.
I have a K8 cluster that has smb mounted drives connected to an AWS Storage Gateway / file share. We've recently undergone a migration of that SGW to another AWS account and while doing that the IP address and password for that SGW changed.
I noticed that our existing setup has a K8 storage class that looks for a K8 secret called "smbcreds". In that K8 secret they have keys "username" and "password". I'm assuming it's in line with the setup guide for the Helm chart we're using "csi-driver-smb".
I assumed changing the secret used for the storage class would update everything downstream that uses that storage class, but apparently it does not. I'm obviously a little cautious when it comes to potentially blowing away important data, what do I need to do to update everything to use the new secret and IP config?
Here is a simple example of our setup in Terraform -
provider "kubernetes" {
config_path = "~/.kube/config"
config_context = "minikube"
}
resource "helm_release" "container_storage_interface_for_aws" {
count = 1
name = "local-filesystem-csi"
repository = "https://raw.githubusercontent.com/kubernetes-csi/csi-driver-smb/master/charts"
chart = "csi-driver-smb"
namespace = "default"
}
resource "kubernetes_storage_class" "aws_storage_gateway" {
count = 1
metadata {
name = "smbmount"
}
storage_provisioner = "smb.csi.k8s.io"
reclaim_policy = "Retain"
volume_binding_mode = "WaitForFirstConsumer"
parameters = {
source = "//1.2.3.4/old-file-share"
"csi.storage.k8s.io/node-stage-secret-name" = "smbcreds"
"csi.storage.k8s.io/node-stage-secret-namespace" = "default"
}
mount_options = ["vers=3.0", "dir_mode=0777", "file_mode=0777"]
}
resource "kubernetes_persistent_volume_claim" "aws_storage_gateway" {
count = 1
metadata {
name = "smbmount-volume-claim"
}
spec {
access_modes = ["ReadWriteMany"]
resources {
requests = {
storage = "10Gi"
}
}
storage_class_name = "smbmount"
}
}
resource "kubernetes_deployment" "main" {
metadata {
name = "sample-pod"
}
spec {
replicas = 1
selector {
match_labels = {
app = "sample-pod"
}
}
template {
metadata {
labels = {
app = "sample-pod"
}
}
spec {
volume {
name = "shared-fileshare"
persistent_volume_claim {
claim_name = "smbmount-volume-claim"
}
}
container {
name = "ubuntu"
image = "ubuntu"
command = ["sleep", "3600"]
image_pull_policy = "IfNotPresent"
volume_mount {
name = "shared-fileshare"
read_only = false
mount_path = "/data"
}
}
}
}
}
}
My original change was to change the K8 secret "smbcreds" and change source = "//1.2.3.4/old-file-share" to source = "//5.6.7.8/new-file-share"
The solution I settled on was to create a second K8 storage class and persistent volume claim that's connected to the new AWS Storage Gateway. I then switched the K8 deployments to use the new PVC.
I run a kubernetes cluster on aws managed by terraform. I'd like to automatically restart the pods in the cluster at some regular interval, maybe weekly. Since the entire cluster is managed by terraform, I'd like to run the auto restart command through terraform as well.
At first I assumed that kubernetes would have some kind of ttl for its pods, but that does not seem to be the case.
Elsewhere on SO I've seen the ability to run auto restarts using a cron job managed by kubernetes (eg: How to schedule pods restart). Terraform has a relevant resource -- the kubernetes_cron_job -- but I can't fully understand how to set it up with the permissions necessary to actually run.
Would appreciate some feedback!
Below is what I've tried:
resource "kubernetes_cron_job" "deployment_restart" {
metadata {
name = "deployment-restart"
}
spec {
concurrency_policy = "Forbid"
schedule = "0 8 * * *"
starting_deadline_seconds = 10
successful_jobs_history_limit = 10
job_template {
metadata {}
spec {
backoff_limit = 2
active_deadline_seconds = 600
template {
metadata {}
spec {
service_account_name = var.service_account.name
container {
name = "kubectl"
image = "bitnami/kubectl"
command = ["kubectl rollout restart deploy"]
}
}
}
}
}
}
}
resource "kubernetes_role" "deployment_restart" {
metadata {
name = "deployment-restart"
}
rule {
api_groups = ["apps", "extensions"]
resources = ["deployments"]
verbs = ["get", "list", "patch", "watch"]
}
}
resource "kubernetes_role_binding" "deployment_restart" {
metadata {
name = "deployment-restart"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "Role"
name = kubernetes_role.deployment_restart.metadata[0].name
}
subject {
kind = "ServiceAccount"
name = var.service_account.name
api_group = "rbac.authorization.k8s.io"
}
}
This was based on a combination of Granting RBAC roles in k8s cluster using terraform and How to schedule pods restart.
Currently getting the following error:
Error: RoleBinding.rbac.authorization.k8s.io "deployment-restart" is invalid: subjects[0].apiGroup: Unsupported value: "rbac.authorization.k8s.io": supported values: ""
As per the official documentation rolebinding.subjects.apiGroup for Service Accounts should be empty.
kubectl explain rolebinding.subjects.apiGroup
KIND: RoleBinding VERSION: rbac.authorization.k8s.io/v1
FIELD: apiGroup
DESCRIPTION:
APIGroup holds the API group of the referenced subject. Defaults to "" for
ServiceAccount subjects. Defaults to "rbac.authorization.k8s.io" for User
and Group subjects.
I would like to deploy an application and the pod should not go to running status(it should be non-operational). User might trigger this when it really requires using Infrastructure as Code (Terraform). I am aware of using kubectl scale -- replicas=0 . Any other leads or info will be well appreciated.
You can keep the replica count to zero for the Deployment or POD into your YAML file if you are using it.
Or if you are using the Terraform
resource "kubernetes_deployment" "example" {
metadata {
name = "terraform-example"
labels = {
test = "MyExampleApp"
}
}
spec {
replicas = 0
selector {
match_labels = {
test = "MyExampleApp"
}
}
template {
metadata {
labels = {
test = "MyExampleApp"
}
}
spec {
container {
image = "nginx:1.7.8"
name = "example"
resources {
limits = {
cpu = "0.5"
memory = "512Mi"
}
requests = {
cpu = "250m"
memory = "50Mi"
}
}
liveness_probe {
http_get {
path = "/nginx_status"
port = 80
http_header {
name = "X-Custom-Header"
value = "Awesome"
}
}
initial_delay_seconds = 3
period_seconds = 3
}
}
}
}
}
}
There is no other way around you can use the client of Kubernetes to do this if don't want to use the Terraform.
If you want to edit the local file using the terraform checkout local-exec
This invokes a process on the machine running Terraform, not on the
resource.
resource "aws_instance" "web" {
# ...
provisioner "local-exec" {
command = "echo ${self.private_ip} >> private_ips.txt"
}
}
using sed command in local-exec or any other command you can update the YAML and apply it.
https://www.terraform.io/docs/language/resources/provisioners/local-exec.html
On trying to create a replication controller on a cluster in GKE using terraform, I get this error:
kubernetes_replication_controller.k8_pod: Failed to create replication controller: replicationcontrollers is forbidden: User "client" cannot create replicationcontrollers in the namespace "default"
I have kubectl configured and have tested it by creating pods/replication controllers on google kubernetes engine. I have also tried to use
gcloud config unset container/use_client_certificate as some resources suggest:
Resource 1 Resource 2
The resource provisioning code sample is as follows:
resource "kubernetes_replication_controller" "k8_pod" {
metadata {
name = "testing-deployment"
labels {
App = "wassup"
}
}
spec {
selector {
App = "wassup"
}
template {
container {
image = "image/name/here"
name = "testing-deployment"
port {
container_port = 8080
}
}
}
}
}