Skip to content

Cycle error when eks managed addon updates #3499

@marcofranssen

Description

@marcofranssen

Description

When there are updates available for module.cluster.module.eks.aws_eks_addon.this following cycle error happens.

$ terraform plan
│ Error: Cycle: module.cluster.module.eks.aws_eks_addon.this["coredns"], module.cluster.module.eks.aws_eks_addon.this["eks-pod-identity-agent"], module.cluster.module.eks.aws_eks_addon.this["kube-proxy"], module.cluster.module.eks.aws_eks_addon.this["aws-ebs-csi-driver"], module.cluster.module.eks.aws_eks_addon.this["vpc-cni"], module.cluster.module.eks.module.self_managed_node_group.module.user_data (close), module.cluster.module.eks.module.self_managed_node_group (close), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.nodeadm_cloudinit (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.data.cloudinit_config.al2023_eks_managed_node_group (expand), module.cluster.module.eks.module.self_managed_node_group.var.cluster_endpoint (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_endpoint (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.cluster_dns_ips (expand), module.cluster.module.eks.module.self_managed_node_group.var.cluster_service_cidr (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_service_cidr (expand), module.cluster.module.eks.module.self_managed_node_group.var.cluster_auth_base64 (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_auth_base64 (expand), module.cluster.module.eks.time_sleep.this[0], module.cluster.module.eks.module.self_managed_node_group.var.cluster_name (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_name (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.user_data (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.user_data_type_to_rendered (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.output.user_data (expand), module.cluster.module.eks.module.self_managed_node_group.output.user_data (expand), module.cluster.module.eks.aws_eks_addon.this["snapshot-controller"], module.cluster.module.eks.time_sleep.this[0] (destroy deposed e8061dd8), module.cluster.module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"] (destroy)

If your request is for a new feature, please use the Feature request template.

  • ✋ I have searched the open/closed issues and my issue is not listed.

Versions

  • Module version [Required]: >= 21.x
  • Terraform version: 1.5.7
  • Provider version(s): 6.10.0

Reproduction Code [Required]

locals {
  cluster_addons = {
    aws-ebs-csi-driver = {
      addon_version            = data.aws_eks_addon_version.aws_ebs_csi_driver_latest.version
      service_account_role_arn = module.aws_ebs_csi_driver_irsa.arn
    }
    vpc-cni = {
      addon_version            = data.aws_eks_addon_version.vpc_cni_latest.version
      service_account_role_arn = module.vpc_cni_irsa.arn
      configuration_values = jsonencode({
        env : {
          ENABLE_PREFIX_DELEGATION : "true"
        },
        "enableNetworkPolicy" : var.enable_network_policy ? "true" : "false"
      })
    }
  }
}

data "aws_eks_addon_version" "vpc_cni_latest" {
  addon_name         = "vpc-cni"
  kubernetes_version = var.kubernetes_version
  most_recent        = true
}

data "aws_eks_addon_version" "aws_ebs_csi_driver_latest" {
  addon_name         = "aws-ebs-csi-driver"
  kubernetes_version = var.kubernetes_version
  most_recent        = true
}

module "vpc_cni_irsa" {
  source  = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts"
  version = "6.2.0"

  name                  = "${var.resource_prefix}-eks-vpc-cni"
  policy_name           = "${var.resource_prefix}-eks-vpc-cni"
  use_name_prefix       = false
  attach_vpc_cni_policy = true
  vpc_cni_enable_ipv4   = true

  oidc_providers = {
    main = {
      provider_arn               = module.eks.oidc_provider_arn
      namespace_service_accounts = ["kube-system:aws-node"]
    }
  }
}

module "aws_ebs_csi_driver_irsa" {
  source  = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts"
  version = "6.2.0"

  name                  = "${var.resource_prefix}-eks-aws-ebs-csi-driver"
  policy_name           = "${var.resource_prefix}-eks-aws-ebs-csi-driver"
  use_name_prefix       = false
  attach_ebs_csi_policy = true
  ebs_csi_kms_cmk_arns  = [aws_kms_key.ebs.arn]

  oidc_providers = {
    main = {
      provider_arn               = module.eks.oidc_provider_arn
      namespace_service_accounts = ["kube-system:ebs-csi-controller-sa"]
    }
  }
}

module "eks" {
  source  = "terraform-aws-modules/eks/aws"
  version = "21.1.5"

  name               = local.cluster_name
  kubernetes_version = var.kubernetes_version

  create_security_group      = false
  create_node_security_group = false

  enable_cluster_creator_admin_permissions = true

  endpoint_public_access       = var.enable_public_cluster_endpoint
  endpoint_private_access      = var.enable_private_cluster_endpoint
  endpoint_public_access_cidrs = var.cluster_endpoint_public_access_cidr_blocks

  enabled_log_types                      = var.enabled_log_types
  cloudwatch_log_group_retention_in_days = var.cloudwatch_log_group_retention_in_days

  create_kms_key         = var.eks_managed_kms_key_enabled
  kms_key_administrators = var.kms_admin_arns

  addons = local.cluster_addons

  encryption_config = {
    provider_key_arn = aws_kms_key.eks.arn
    resources        = ["secrets"]
  }

  vpc_id     = var.vpc.vpc.vpc_id
  subnet_ids = var.vpc.vpc.private_subnets

  # This is required to tag the cluster security group for use by Karpenter
  cluster_tags = {
    "karpenter.sh/discovery" = local.cluster_name,
  }

  fargate_profiles = local.fargate_profiles

  self_managed_node_groups = var.self_managed_node_groups

  authentication_mode = "API"
  access_entries      = var.access_entries
}

Steps to reproduce the behavior:

Downgrade one addon so terraform tries to upgrade it again.

Expected behavior

Terraform plan and apply should work even if there are addon updates available.

Actual behavior

Cycle error when there is a addon update available during terraform plan.

Terminal Output Screenshot(s)


│ Error: Cycle: module.cluster.module.eks.aws_eks_addon.this["coredns"], module.cluster.module.eks.aws_eks_addon.this["eks-pod-identity-agent"], module.cluster.module.eks.aws_eks_addon.this["kube-proxy"], module.cluster.module.eks.aws_eks_addon.this["aws-ebs-csi-driver"], module.cluster.module.eks.aws_eks_addon.this["vpc-cni"], module.cluster.module.eks.module.self_managed_node_group.module.user_data (close), module.cluster.module.eks.module.self_managed_node_group (close), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.nodeadm_cloudinit (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.data.cloudinit_config.al2023_eks_managed_node_group (expand), module.cluster.module.eks.module.self_managed_node_group.var.cluster_endpoint (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_endpoint (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.cluster_dns_ips (expand), module.cluster.module.eks.module.self_managed_node_group.var.cluster_service_cidr (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_service_cidr (expand), module.cluster.module.eks.module.self_managed_node_group.var.cluster_auth_base64 (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_auth_base64 (expand), module.cluster.module.eks.time_sleep.this[0], module.cluster.module.eks.module.self_managed_node_group.var.cluster_name (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.var.cluster_name (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.user_data (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.local.user_data_type_to_rendered (expand), module.cluster.module.eks.module.self_managed_node_group.module.user_data.output.user_data (expand), module.cluster.module.eks.module.self_managed_node_group.output.user_data (expand), module.cluster.module.eks.aws_eks_addon.this["snapshot-controller"], module.cluster.module.eks.time_sleep.this[0] (destroy deposed e8061dd8), module.cluster.module.eks.aws_iam_role_policy_attachment.this["AmazonEKSVPCResourceController"] (destroy)

Additional context

When I switch my codebase to the situation prior to this module version and aws provider bump I can apply the changes to addons from there. Then switching back to the latest module using terraform provider 6.10 will succeed this time. But as soon there is a new eks managed addon version available the plan will fail again with this cycle.

Metadata

Metadata

Assignees

No one assigned

    Labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions