GKE node pool dont autoscaling to 0 nodes - kubernetes

I made a gke cluster using Terraform
resource "google_container_cluster" "airflow_prd" {
name = "airflow-prd"
remove_default_node_pool = true
initial_node_count = 1
network = var.vpc
location = var.zone_prd
subnetwork = var.subnet_prd
project = "xxxxxx"
private_cluster_config {
enable_private_endpoint = false
enable_private_nodes = true
master_ipv4_cidr_block = "172.13.0.0/28"
master_global_access_config {
enabled = true
}
}
ip_allocation_policy {
cluster_secondary_range_name = ""
}
}
resource "google_container_node_pool" "default_prd" {
name = "default"
cluster = google_container_cluster.airflow_prd.name
initial_node_count = 2
location = var.zone_prd
node_config {
machine_type = "e2-small"
oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
service_account = "xxxxxxxxx.iam.gserviceaccount.com"
}
autoscaling {
max_node_count = 4
min_node_count = 2
}
}
resource "google_container_node_pool" "airflow_prd" {
name = "airflow"
cluster = google_container_cluster.airflow_prd.name
initial_node_count = 0
location = var.zone_prd
node_config {
machine_type = "e2-standard-8"
oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
service_account = "xxxxxx.iam.gserviceaccount.com"
}
autoscaling {
max_node_count = 1
min_node_count = 0
}
}
resource "google_container_node_pool" "etl_32_prd" {
name = "etl-32"
cluster = google_container_cluster.airflow_prd.name
initial_node_count = 0
location = var.zone_prd
node_config {
machine_type = "e2-standard-8"
oauth_scopes = ["https://www.googleapis.com/auth/cloud-platform"]
service_account = "xxxxxx.iam.gserviceaccount.com"
}
autoscaling {
max_node_count = 4
min_node_count = 0
}
}
The problem is with node pool etl-32. It automatically creates nodes when needed. When it is no longer needed, the number of nodes is reduced to 1, not to 0, which is what I want. How to make it go down to 0? The system pods are all in node pool default_prd, which always has 2 nodes

Related

Getting Hashicorp Provider Error while creating aks cluster using terraform

I am getting error while creating AKS Cluster Using Terraform
Error:
Error: Failed to query available provider packages
Could not retrieve the list of available versions for provider hashicorp/file: provider registry registry.terraform.io does not have a provider named
registry.terraform.io/hashicorp/file
All modules should specify their required_providers so that external consumers will get the correct providers when using a module. To see which modules are currently depending on hashicorp/file, run the following command:
terraform providers
Above is the error i am facing. I have written Terraform code as shown below.
provider.tf:
============
provider "azurerm" {
features {}
}
terraform {
required_providers {
azurerm = {
source = "hashicorp/azurerm"
version = "2.39.0"
}
}
}
terraform.tfvars:
=================
resource_group_name = "a0474899701"
location = "CentralUS"
cluster_name = "aks01"
kubernetes_version = "1.24.4"
system_node_count = 2
user_node_count = 1
spot_node_count = 2
acr_name = "devops_acr_tf"
aks_network_plugin = "kubenet"
client_id = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
client_secret = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
main.tf:
========
# Create an Resource Group
resource "azurerm_resource_group" "aks-rg" {
name = var.resource_group_name
location = var.location
}
# Create an ACR instance
resource "azurerm_container_registry" "acr" {
name = var.acr_name
resource_group_name = azurerm_resource_group.aks-rg.name
location = var.location
sku = "Standard"
admin_enabled = false
}
# Create a role assignment to allow AKS to access ACR
resource "azurerm_role_assignment" "role_acrpull" {
scope = azurerm_container_registry.acr.id
role_definition_name = "AcrPull"
# principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity.0.object_id
principal_id = azurerm_kubernetes_cluster.aks.kubelet_identity.0.client_id
skip_service_principal_aad_check = true
}
# Create a Kubernetes secret to hold the ACR credentials
# It holds the ACR credentials in a Docker config JSON format
resource "kubernetes_secret" "acr_credentials" {
metadata {
name = "acr-credentials"
}
data = {
".dockerconfigjson" = azurerm_container_registry.acr.docker_config_json
}
}
# Private Key Creation
resource "tls_private_key" "aks_ssh_key" {
algorithm = "RSA"
}
resource "tls_public_key" "aks_ssh_key" {
private_key_pem = tls_private_key.aks_ssh_key.private_key_pem
}
resource "file" "private_key" {
content = tls_private_key.aks_ssh_key.private_key_pem
filename = "aks_private_key.pem"
}
# virtual network (aks_vnet) is created in the same resource group
resource "azurerm_virtual_network" "aks_vnet" {
name = "${var.resource_group_name}-vnet01"
# address_space = ["10.0.0.0/16"]
address_space = ["10.172.144.0/26"]
location = azurerm_resource_group.aks_rg.location
resource_group_name = azurerm_resource_group.aks_rg.name
}
# subnet (aks_subnet) is created within the virtual network
resource "azurerm_subnet" "aks_subnet" {
name = "${var.resource_group_name}-vnet01-subnet01"
resource_group_name = azurerm_resource_group.aks_rg.name
virtual_network_name = azurerm_virtual_network.aks_vnet.name
# address_prefix = "10.0.1.0/24"
address_prefix = "10.172.144.0/27"
}
resource "azurerm_network_security_group" "azure-sg" {
name = "${var.resource_group_name}-nsg01"
location = azurerm_resource_group.aks_rg.location
resource_group_name = azurerm_resource_group.aks_rg.name
security_rule {
name = "allow-ssh"
priority = 100
direction = "Inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "22"
source_address_prefix = "*"
destination_address_prefix = "*"
}
}
resource "azurerm_kubernetes_cluster" "aks" {
name = var.cluster_name
kubernetes_version = var.kubernetes_version
location = var.location
resource_group_name = azurerm_resource_group.aks-rg.name
security_group_name = azurerm_network_security_group.azure-sg.name
dns_prefix = var.cluster_name
default_node_pool {
name = "system"
node_count = var.system_node_count
vm_size = "Standard_E4as_v4"
os_disk_size_gb = 20
os_disk_type = "Ephemeral"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
os_type = "Linux"
node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
enable_node_public_ip = false
enable_auto_scaling = false
}
additional_node_pools {
name = "user"
node_count = var.user_node_count
vm_size = "Standard_E8as_v4"
os_disk_size_gb = 20
os_disk_type = "Ephemeral"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
type = "User"
# os_type = "RedHat"
os_type = "Linux"
node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
enable_node_public_ip = false
enable_auto_scaling = false
}
additional_node_pools {
name = "spot"
node_count = var.spot_node_count
vm_size = "Standard_D2s_v3"
os_disk_size_gb = 20
os_disk_type = "Ephemeral"
vnet_subnet_id = azurerm_subnet.aks_subnet.id
type = "User"
# os_type = "RedHat"
os_type = "Linux"
node_image_version = "AKSUbuntu-1804gen2containerd-2023.01.10"
max_price = 0.5
enable_node_public_ip = false
enable_auto_scaling = false
eviction_policy = "Spot"
taints = ["kubernetes.azure.com/scalesetpriority=spot:NoSchedule"]
labels = {
"kubernetes.azure.com/scalesetpriority" = "spot"
}
}
kubernetes_cluster_config {
max_pods_per_node = "110"
}
identity {
type = "SystemAssigned"
}
linux_profile {
admin_username = "azureuser"
ssh_key {
key_data = tls_public_key.aks_ssh_key.public_key_openssh
}
}
network_profile {
pod_cidr = "172.32.0.0/19"
service_cidr = "172.32.0.0/19"
load_balancer_sku = "Standard"
network_plugin = var.aks_network_plugin
dns_service_ip = "172.32.0.10"
docker_bridge_cidr = "172.34.0.1/16"
}
service_principal {
client_id = var.client_id
client_secret = var.client_secret
}
tags = {
Environment = "Development"
}
}
# ACR can be attached to the AKS cluster using the "azurerm_kubernetes_cluster_container_registry_config" resource type
resource "azurerm_kubernetes_cluster_container_registry_config" "acr_config" {
cluster_name = azurerm_kubernetes_cluster.aks.name
registry_id = azurerm_container_registry.acr.id
namespace = "aks"
default_action = "Allow"
}
Above is my Code I am facing above error. even i have changed my provider.tf still facing same issue. Can anyone please tell me How to solve this error
Thanks
I tried to reproduce the same in my environment to create AKS Cluster using Terraform:
Kindly use the below Terraform code to create AKS Cluster.
Terraform Code:
provider "azurerm" {
features {}
}
resource "azurerm_resource_group" "venkatesh" {
name = "venkat-resources"
location = "West Europe"
}
resource "azurerm_container_registry" "venkatreg" {
name = "Testcontainerregistery"
resource_group_name = azurerm_resource_group.venkatesh.name
location = azurerm_resource_group.venkatesh.location
sku = "Premium"
}
resource "azurerm_kubernetes_cluster" "venkatcluster" {
name = "example-aks1"
location = azurerm_resource_group.venkatesh.location
resource_group_name = azurerm_resource_group.venkatesh.name
dns_prefix = "exampleaks1"
default_node_pool {
name = "default"
node_count = 1
vm_size = "Standard_D2_v2"
}
identity {
type = "SystemAssigned"
}
tags = {
Environment = "Production"
}
}
resource "azurerm_role_assignment" "example" {
principal_id = azurerm_kubernetes_cluster.venkatcluster.kubelet_identity[0].object_id
role_definition_name = "AcrPull"
scope = azurerm_container_registry.venkatreg.id
skip_service_principal_aad_check = true
}
Terraform apply:
Once ran the code resources are created successfully.
Reference:
Create a Kubernetes cluster with Azure Kubernetes Service using Terraform.

Create Pods using Rancher with Terraform

I created this simple Terraform script with Rancher to create namespace in imported Kubernetes cluster:
terraform {
required_providers {
rancher2 = {
source = "rancher/rancher2"
version = "1.24.1"
}
}
}
provider "rancher2" {
api_url = "https://192.168.1.128/v3"
token_key = "token-n4fxx:4qcgctvph7qh2sdnn762zpzg889rgw8xpd2nvcnpnr4v4wpb9zljtd"
insecure = true
}
resource "rancher2_namespace" "zone-1" {
name = "zone-1"
project_id = "c-m-xmhbjzdt:p-sd86v"
description = "zone-1 namespace"
resource_quota {
limit {
limits_cpu = "100m"
limits_memory = "100Mi"
requests_storage = "1Gi"
}
}
container_resource_limit {
limits_cpu = "20m"
limits_memory = "20Mi"
requests_cpu = "1m"
requests_memory = "1Mi"
}
}
The question is how I can create Pods into the Kubernetes cluster using again Terraform script?
Terraform offers the Kubernetes Provider which allows you to create all kind of Kubernetes objects.
To quote the documentation of the "kubernetes_pod"-resource:
resource "kubernetes_pod" "test" {
metadata {
name = "terraform-example"
}
spec {
container {
image = "nginx:1.21.6"
name = "example"
env {
name = "environment"
value = "test"
}
port {
container_port = 80
}
liveness_probe {
http_get {
path = "/"
port = 80
http_header {
name = "X-Custom-Header"
value = "Awesome"
}
}
initial_delay_seconds = 3
period_seconds = 3
}
}
dns_config {
nameservers = ["1.1.1.1", "8.8.8.8", "9.9.9.9"]
searches = ["example.com"]
option {
name = "ndots"
value = 1
}
option {
name = "use-vc"
}
}
dns_policy = "None"
}
}

I want to use same terraform code for both kubernetes clusters Oracle (OKE) and AWS (EKS)

I want to use same terraform code for both kubernetes clusters oracle (OKE) and AWS (EKS) and install the argocd helm chart on it, I given my code below for your reference, now the problem I am facing now, if I set k8s_cluster_type to "eks", so everything works fine but when I set the k8s_cluster_type to "oke", it fails with given below error,
I get this error:-
│ Error: error reading EKS Cluster (oh-appb-01): couldn't find resource
│
│ with data.aws_eks_cluster.eks,
│ on main.tf line 137, in data "aws_eks_cluster" "eks":
│ 137: data "aws_eks_cluster" "eks" {
│
What I want to do:-
In case if I give eks_cluster_name (dummy_cluster_name) or eks cluster doesn't exist, still this code should run as same like oke, because if I assign dummy_name oke cluster or oke cluster doens't exist and input variable k8s_cluster_type to "eks" this terraform runs successfully, Could you suggest, how can I use same terraform for both kubernetes cluster to install argocd helm chart
locals {
argocd_ns = "argocd"
kubeconfig_str = var.k8s_cluster_type == "oke" ? data.oci_containerengine_cluster_kube_config.k8s.content : data.template_file.temp_kubeconfig_eks.rendered
kubeconfig = yamldecode( local.kubeconfig_str )
exec_cli = var.k8s_cluster_type == "oke"? "oci" : "aws"
cluster_cert = var.k8s_cluster_type == "oke" ? base64decode(local.kubeconfig["clusters"][0]["cluster"]["certificate-authority-data"]) : base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data)
cluster_endpoint = var.k8s_cluster_type == "oke" ? local.kubeconfig["clusters"][0]["cluster"]["server"] : data.aws_eks_cluster.eks.endpoint
exec_args_oke = [
local.kubeconfig["users"][0]["user"]["exec"]["args"][0],
local.kubeconfig["users"][0]["user"]["exec"]["args"][1],
local.kubeconfig["users"][0]["user"]["exec"]["args"][2],
local.kubeconfig["users"][0]["user"]["exec"]["args"][3],
local.kubeconfig["users"][0]["user"]["exec"]["args"][4],
local.kubeconfig["users"][0]["user"]["exec"]["args"][5],
local.kubeconfig["users"][0]["user"]["exec"]["args"][6]
]
exec_args_eks = ["eks", "get-token", "--cluster-name", var.eks_cluster_name]
exec_args = var.k8s_cluster_type == "oke" ? local.exec_args_oke : local.exec_args_eks
}
data "oci_containerengine_cluster_kube_config" "k8s" {
#count = var.k8s_cluster_type == "oke" ? 1 : 0
cluster_id = var.k8s_cluster_id
}
resource "local_file" "temp_kubeconfig" {
count = var.k8s_cluster_type == "oke" ? 1 : 0
content = local.kubeconfig_str
filename = "${path.module}/kubeconfig_temp"
}
resource "local_file" "temp_kubeconfig_eks" {
count = var.k8s_cluster_type == "oke" ? 0 : 1
content = data.template_file.temp_kubeconfig_eks.rendered
filename = "${path.module}/kubeconfig_temp"
}
resource "kubernetes_namespace" "argocd" {
metadata {
name = local.argocd_ns
}
}
resource "kubernetes_secret" "root_repo" {
depends_on = [kubernetes_namespace.argocd]
metadata {
name = var.argocd_root_repo.name
namespace = local.argocd_ns
labels = {
"argocd.argoproj.io/secret-type" = "repository"
}
}
data = {
url = var.argocd_root_repo.url
name = var.argocd_root_repo.name
password = var.argocd_root_repo_token
username = var.argocd_root_repo.name
}
}
data "template_file" "argocd-helm-values-override" {
template = file("${path.module}/templates/argocd-helm-values-override.tpl")
vars = {
argocd_ns = local.argocd_ns
repo_url = var.argocd_root_repo.url
repo_path = var.argocd_root_repo.path
repo_revision = var.argocd_root_repo.revision
}
}
resource "helm_release" "argocd" {
depends_on = [kubernetes_namespace.argocd, kubernetes_secret.root_repo ]
version = var.argocd_release.chart_version
name = var.argocd_release.release_name
chart = var.argocd_release.chart_name
repository = var.argocd_release.chart_repo
namespace = local.argocd_ns
values = [
data.template_file.argocd-helm-values-override.rendered
]
}
data "template_file" "temp_kubeconfig_eks" {
count = var.k8s_cluster_type == "oke" ? 0 : 1
template = file("${path.module}/templates/kubeconfig_eks.tpl")
vars = {
cluster_url = data.aws_eks_cluster.eks.endpoint
cluster_region = var.region
cluster_cert_authority_data = data.aws_eks_cluster.eks.certificate_authority.0.data
cluster_name = var.eks_cluster_name
}
}
provider "kubernetes" {
cluster_ca_certificate = local.cluster_cert
host = local.cluster_endpoint
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = local.exec_cli
args = local.exec_args
}
}
provider "helm" {
kubernetes {
cluster_ca_certificate = local.cluster_cert
host = local.cluster_endpoint
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = local.exec_cli
args = local.exec_args
}
}
}
data "aws_eks_cluster" "eks" {
#count = var.k8s_cluster_type == "oke" ? 0 : 1
name = var.eks_cluster_name
}
data "aws_eks_cluster_auth" "eks" {
#count = var.k8s_cluster_type == "oke" ? 0 : 1
name = var.eks_cluster_name
}
*.tfvars file:-
8s_cluster_id ="ocid1.cluster.oc1.xxx.xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
k8s_cluster_type = "oke"
argocd_root_repo = {
name = "argocd-xxxx-xxxx-config",
url = "https://github.com/xxxxx/xxxx/argocd-xxxx-xxxx-config",
path = "clusters/localsand1/apps",
revision = "master"
}
region = "us-east-1"
eks_cluster_name = "oh-appb-01"

Terraform Azure create vnt-sub-db

I have some terraform code that creates a 3vm & vnet. but I cannot find work way to add 2 SGN and 3 subnets.
I have already read a lot of Terraform and Microsoft Documentation.
They have a lot of good solutions, but they not working on my terraform code. I will be very glad full for some help.
The main.tf
resource "random_pet" "rg-name" {
prefix = var.resource_group_name_prefix
}
resource "azurerm_resource_group" "rg" {
name = random_pet.rg-name.id
location = var.resource_group_location
}
# Create virtual network
resource "azurerm_virtual_network" "Vnet" {
name = "Vnet"
address_space = ["10.0.0.0/16"]
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
}
# Create subnet
resource "azurerm_subnet" "Subnet" {
name = "subnet0"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.Vnet.name
address_prefixes = ["10.0.1.0/24"]
}
resource "azurerm_subnet" "Subnet2" {
name = "subnet1"
resource_group_name = azurerm_resource_group.rg.name
virtual_network_name = azurerm_virtual_network.Vnet.name
address_prefixes = ["10.0.2.0/24"]
}
#Associate subnet to subnet_network_security_group
resource "azurerm_subnet_network_security_group_association" "public" {
subnet_id = azurerm_subnet.Subnet.id
network_security_group_id = azurerm_network_security_group.NSG.id
}
# Create public IPs
resource "azurerm_public_ip" "Pub_IP" {
name = "PublicIP"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
allocation_method = "Static"
}
# Create Network Security Group and rule
resource "azurerm_network_security_group" "NSG" {
name = "app_nsg_rule"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
}
#Create an Inbound rule for app nsg
resource "azurerm_network_security_rule" "app_nsg_rule" {
name = "port8080"
priority = 110
direction = "inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "8080"
source_address_prefix = "*"
destination_address_prefix = "*"
resource_group_name = azurerm_resource_group.rg.name
network_security_group_name = azurerm_network_security_group.NSG.name
}
#Create an Inbound rule for db nsg
resource "azurerm_network_security_rule" "db_nsg_rule" {
name = "port5432"
priority = 100
direction = "inbound"
access = "Allow"
protocol = "Tcp"
source_port_range = "*"
destination_port_range = "5432"
source_address_prefix = "*"
destination_address_prefix = "*"
resource_group_name = azurerm_resource_group.rg.name
network_security_group_name = azurerm_network_security_group.NSG.name
}
# Create network interface
resource "azurerm_network_interface" "nic1" {
name = "NIC1"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
ip_configuration {
name = "NicConf1"
subnet_id = azurerm_subnet.Subnet.id
private_ip_address_allocation = "Dynamic"
public_ip_address_id = azurerm_public_ip.Pub_IP.id
}
# ip_configuration {
# name = "NicConf2"
# subnet_id = azurerm_subnet.Subnet2.id
# private_ip_address_allocation = "Dynamic"
# public_ip_address_id = azurerm_public_ip.Pub_IP.id
# }
}
# Connect the security group to the network interface
resource "azurerm_network_interface_security_group_association" "NSG_Connect" {
network_interface_id = azurerm_network_interface.nic1.id
network_security_group_id = azurerm_network_security_group.NSG.id
}
# Create virtual machine
resource "azurerm_virtual_machine" "vm_app" {
count = 3
name = "app_vm${count.index}"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
network_interface_ids = [azurerm_network_interface.nic1.id,count.index]
vm_size = "Standard_B2s"
# Uncomment this line to delete the OS disk automatically when deleting the VM
# delete_os_disk_on_termination = true
# Uncomment this line to delete the data disks automatically when deleting the VM
# delete_data_disks_on_termination = true
storage_image_reference {
publisher = "Canonical"
offer = "0001-com-ubuntu-server-focal"
sku = "20_04-lts-gen2"
version = "latest"
}
storage_os_disk {
name = "disk${count.index}"
caching = "ReadWrite"
create_option = "FromImage"
managed_disk_type = "Standard_LRS"
}
}
# Import image from Image gallery
resource "azurerm_resource_group" "Image_App" {
name = "Weight_Tracker_application"
location = "EastUS"
}
#Create a Loadbalancer
resource "azurerm_lb" "LB" {
name = "loadBalancer"
location = azurerm_resource_group.rg.location
resource_group_name = azurerm_resource_group.rg.name
frontend_ip_configuration {
name = "PublicIPAddress"
public_ip_address_id = azurerm_public_ip.Pub_IP.id
}
}
#Create a Postgress Flexible Server
resource "azurerm_postgresql_flexible_server" "PS_SQL" {
name = "sqlflexserver"
resource_group_name = azurerm_resource_group.rg.name
location = azurerm_resource_group.rg.location
version = "12"
administrator_login = "postgres"
administrator_password = "p#ssw0rd42"
storage_mb = 32768
sku_name = "GP_Standard_D4s_v3"
}
resource "azurerm_postgresql_flexible_server_database" "PS_FLEX" {
name = "db-server"
server_id = azurerm_postgresql_flexible_server.PS_SQL.id
collation = "en_US.utf8"
charset = "utf8"
}

Assign memory ressource of Pods from Terraform

I have a K8S cluster on GCP where I have to run Data Science workload.
Some of they are in status "Evicted" because
The node was low on resource: memory. Container base was using 5417924Ki, which exceeds its request of 0.
I manage my architecture with Terraform and know how to manage cluster auto-scaling but I have no idea, even after reading the doc, how to manage this at a Pod level
resource "google_container_cluster" "k8s_cluster" {
name = "my-cluster-name
description = ""
location = var.default_region
network = var.network
subnetwork = var.subnetwork
initial_node_count = 1
remove_default_node_pool = true
ip_allocation_policy {
# VPC-native cluster using alias IP addresses
cluster_secondary_range_name = "gke-pods"
services_secondary_range_name = "gke-services"
}
maintenance_policy {
daily_maintenance_window {
start_time = "03:00"
}
}
master_authorized_networks_config {
cidr_blocks {
display_name = var.airflow.display_name
cidr_block = var.airflow.cidr_block
}
cidr_blocks {
display_name = var.gitlab.display_name
cidr_block = var.gitlab.cidr_block
}
}
network_policy {
enabled = false
}
private_cluster_config {
enable_private_endpoint = true
enable_private_nodes = true
master_ipv4_cidr_block = var.vpc_range_k8s_master
}
resource_labels = {
zone = var.zone
role = var.role
env = var.environment
}
# Disable basic auth and client certificate
master_auth {
username = ""
password = ""
client_certificate_config {
issue_client_certificate = false
}
}
cluster_autoscaling {
enabled = true
resource_limits {
resource_type = "cpu"
minimum = 1
maximum = 4
}
resource_limits {
resource_type = "memory"
minimum = 1
maximum = 2
}
}
}