Skip to content

v2.0.1

Imported from Confluence

Content may be outdated. Verify before following any procedures. View original | Last updated: January 2024

While updating gke core direct found and a bug which remove max_count and min_count from node pools. Had to create custom module from official gke v29.0.0Was in v29.0.0

  dynamic "autoscaling" {
    for_each = lookup(each.value, "autoscaling", true) ? [each.value] : []
    content {
      min_node_count       = contains(keys(autoscaling.value), "total_min_count") ? null : lookup(autoscaling.value, "min_count", 1)
      max_node_count       = contains(keys(autoscaling.value), "total_max_count") ? null : lookup(autoscaling.value, "max_count", 100)
      location_policy      = lookup(autoscaling.value, "location_policy", null)
      total_min_node_count = lookup(autoscaling.value, "total_min_count", null)
      total_max_node_count = lookup(autoscaling.value, "total_max_count", null)
    }
  }

this  doesn’t with our deployment since we define variable for node pools parameters like this:

variable "node_pools_params" {
    type = map(object({
      config = object(
        {
          max_pods_per_node = number
          disk_size_gb      = number
          machine_type      = string
          disk_type         = string
          spot              = bool
          od                = optional(bool, false)
          auto_upgrade      = bool
          auto_repair       = bool
          min_count         = optional(number)
          max_count         = optional(number)
          total_min_count   = optional(number)
          total_max_count   = optional(number)
          pod_range         = optional(string, "")
          location_policy   = optional(string)
          max_surge         = optional(number)
        }
      )
      k8s_labels_override = optional(map(string), {})
      vm_labels_override = optional(map(string), {})
      taints_override = optional(list(map(string)), [])
  }))
}

The issue is that min_count = optional(number) if not specified returns null and code above doesn’t use min/max_count. To fix this I changed in v29.0.0-m1 

  dynamic "autoscaling" {
    for_each = lookup(each.value, "autoscaling", true) ? [each.value] : []
    content {
      min_node_count       = lookup(autoscaling.value, "total_min_count", null) != null ? null : lookup(autoscaling.value, "min_count", 1)
      max_node_count       = lookup(autoscaling.value, "total_max_count", null) != null ? null : lookup(autoscaling.value, "max_count", 100)
      location_policy      = lookup(autoscaling.value, "location_policy", null)
      total_min_node_count = lookup(autoscaling.value, "total_min_count", null)
      total_max_node_count = lookup(autoscaling.value, "total_max_count", null)
    }
  }

As a bonus in modified module I added shielded_instance_config in:

  cluster_autoscaling {
    enabled = var.cluster_autoscaling.enabled
    dynamic "auto_provisioning_defaults" {
      for_each = var.cluster_autoscaling.enabled ? [1] : []

      content {
        service_account = local.service_account
        oauth_scopes    = local.node_pools_oauth_scopes["all"]

        management {
          auto_repair  = lookup(var.cluster_autoscaling, "auto_repair", true)
          auto_upgrade = lookup(var.cluster_autoscaling, "auto_upgrade", true)
        }

        shielded_instance_config {
          enable_integrity_monitoring = true
          enable_secure_boot          = false
        }

        disk_size = lookup(var.cluster_autoscaling, "disk_size", 100)
        disk_type = lookup(var.cluster_autoscaling, "disk_type", "pd-standard")

      }
    }

This will allow us to get finally clean output if there are no changes and removes annoying message 
image (17).png

10:18
Also it modifies maintenance window for Mon-Fri 10am
10:18
tested in dev, will apply prod tomorrow