diff --git a/CHANGELOG.md b/CHANGELOG.md index b167b40..e40caa4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,13 +7,15 @@ project adheres to [Semantic Versioning](http://semver.org/). ## Next release -## [[v7.?.?](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v7.0.0...HEAD)] - 2019-??-??] +## [[v8.?.?](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v7.0.0...HEAD)] - 2019-??-??] - Test against minimum versions specified in `versions.tf` (by @dpiddockcmp) +- Added flag `create_eks` to conditionally create resources (by @syst0m / @tbeijen) - Support for AWS EKS Managed Node Groups. (by @wmorgan6796) - Updated instance_profile_names and instance_profile_arns outputs to also consider launch template as well as asg (by @ankitwal) - Added a if check on `aws-auth` configmap when map_roles is empty. - **Breaking:** Configure the aws-auth configmap using the terraform kubernetes providers. Read the [docs](docs/upgrading-to-aws-auth-kubernetes-provider.md) for more info (by @sdehaes) +- Removed no longer used variable `write_aws_auth_config` (by @tbeijen) - Updated application of `aws-auth` configmap to create `kube_config.yaml` and `aws_auth_configmap.yaml` in sequence (and not parallel) to `kubectl apply` (by @knittingdev) - Exit with error code when `aws-auth` configmap is unable to be updated (by @knittingdev) - Fix deprecated interpolation-only expression (by @angelabad) diff --git a/README.md b/README.md index 76a385f..20d2501 100644 --- a/README.md +++ b/README.md @@ -52,6 +52,40 @@ module "my-cluster" { ] } ``` +## Conditional creation + +Sometimes you need to have a way to create EKS resources conditionally but Terraform does not allow to use `count` inside `module` block, so the solution is to specify argument `create_eks`. + +Using this feature _and_ having `manage_aws_auth=true` (the default) requires to set up the kubernetes provider in a way that allows the data sources to not exist. + +```hcl +data "aws_eks_cluster" "cluster" { + count = var.create_eks ? 1 : 0 + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + count = var.create_eks ? 1 : 0 + name = module.eks.cluster_id +} + +# In case of not creating the cluster, this will be an incompletely configured, unused provider, which poses no problem. +provider "kubernetes" { + host = element(concat(data.aws_eks_cluster.cluster[*].endpoint, list("")), 0) + cluster_ca_certificate = base64decode(element(concat(data.aws_eks_cluster.cluster[*].certificate_authority.0.data, list("")), 0)) + token = element(concat(data.aws_eks_cluster_auth.cluster[*].token, list("")), 0) + load_config_file = false + version = "~> 1.10" +} + +# This cluster will not be created +module "eks" { + source = "terraform-aws-modules/eks/aws" + + create_eks = false + # ... omitted +} +``` ## Other documentation @@ -131,6 +165,7 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | cluster\_security\_group\_id | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | string | `""` | no | | cluster\_version | Kubernetes version to use for the EKS cluster. | string | `"1.14"` | no | | config\_output\_path | Where to save the Kubectl config file (if `write_kubeconfig = true`). Assumed to be a directory if the value ends with a forward slash `/`. | string | `"./"` | no | +| create\_eks | Controls if EKS resources should be created (it affects almost all resources) | bool | `"true"` | no | | iam\_path | If provided, all IAM roles will be created on this path. | string | `"/"` | no | | kubeconfig\_aws\_authenticator\_additional\_args | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"]. | list(string) | `[]` | no | | kubeconfig\_aws\_authenticator\_command | Command to use to fetch AWS EKS credentials. | string | `"aws-iam-authenticator"` | no | @@ -164,7 +199,6 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | workers\_additional\_policies | Additional policies to be added to workers | list(string) | `[]` | no | | workers\_group\_defaults | Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys. | any | `{}` | no | | workers\_role\_name | User defined workers role name. | string | `""` | no | -| write\_aws\_auth\_config | Whether to write the aws-auth configmap file. | bool | `"true"` | no | | write\_kubeconfig | Whether to write a Kubectl config file containing the cluster configuration. Saved to `config_output_path`. | bool | `"true"` | no | ## Outputs diff --git a/aws_auth.tf b/aws_auth.tf index 7dd8089..aabf6fd 100644 --- a/aws_auth.tf +++ b/aws_auth.tf @@ -2,7 +2,7 @@ data "aws_caller_identity" "current" { } data "template_file" "launch_template_worker_role_arns" { - count = local.worker_group_launch_template_count + count = var.create_eks ? local.worker_group_launch_template_count : 0 template = file("${path.module}/templates/worker-role.tpl") vars = { @@ -22,7 +22,7 @@ data "template_file" "launch_template_worker_role_arns" { } data "template_file" "worker_role_arns" { - count = local.worker_group_count + count = var.create_eks ? local.worker_group_count : 0 template = file("${path.module}/templates/worker-role.tpl") vars = { @@ -43,7 +43,7 @@ data "template_file" "worker_role_arns" { } resource "kubernetes_config_map" "aws_auth" { - count = var.manage_aws_auth ? 1 : 0 + count = var.create_eks && var.manage_aws_auth ? 1 : 0 metadata { name = "aws-auth" diff --git a/cluster.tf b/cluster.tf index 7b8a1c9..b7ced5c 100644 --- a/cluster.tf +++ b/cluster.tf @@ -1,5 +1,5 @@ resource "aws_cloudwatch_log_group" "this" { - count = length(var.cluster_enabled_log_types) > 0 ? 1 : 0 + count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0 name = "/aws/eks/${var.cluster_name}/cluster" retention_in_days = var.cluster_log_retention_in_days kms_key_id = var.cluster_log_kms_key_id @@ -7,6 +7,7 @@ resource "aws_cloudwatch_log_group" "this" { } resource "aws_eks_cluster" "this" { + count = var.create_eks ? 1 : 0 name = var.cluster_name enabled_cluster_log_types = var.cluster_enabled_log_types role_arn = local.cluster_iam_role_arn @@ -33,7 +34,7 @@ resource "aws_eks_cluster" "this" { } resource "aws_security_group" "cluster" { - count = var.cluster_create_security_group ? 1 : 0 + count = var.cluster_create_security_group && var.create_eks ? 1 : 0 name_prefix = var.cluster_name description = "EKS cluster security group." vpc_id = var.vpc_id @@ -46,7 +47,7 @@ resource "aws_security_group" "cluster" { } resource "aws_security_group_rule" "cluster_egress_internet" { - count = var.cluster_create_security_group ? 1 : 0 + count = var.cluster_create_security_group && var.create_eks ? 1 : 0 description = "Allow cluster egress access to the Internet." protocol = "-1" security_group_id = local.cluster_security_group_id @@ -57,7 +58,7 @@ resource "aws_security_group_rule" "cluster_egress_internet" { } resource "aws_security_group_rule" "cluster_https_worker_ingress" { - count = var.cluster_create_security_group ? 1 : 0 + count = var.cluster_create_security_group && var.create_eks ? 1 : 0 description = "Allow pods to communicate with the EKS cluster API." protocol = "tcp" security_group_id = local.cluster_security_group_id @@ -68,7 +69,7 @@ resource "aws_security_group_rule" "cluster_https_worker_ingress" { } resource "aws_iam_role" "cluster" { - count = var.manage_cluster_iam_resources ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 name_prefix = var.cluster_name assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json permissions_boundary = var.permissions_boundary @@ -78,13 +79,13 @@ resource "aws_iam_role" "cluster" { } resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" { - count = var.manage_cluster_iam_resources ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" role = local.cluster_iam_role_name } resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" { - count = var.manage_cluster_iam_resources ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" role = local.cluster_iam_role_name } diff --git a/data.tf b/data.tf index 20fd356..f43ac9e 100644 --- a/data.tf +++ b/data.tf @@ -66,19 +66,20 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" { } data "template_file" "kubeconfig" { + count = var.create_eks ? 1 : 0 template = file("${path.module}/templates/kubeconfig.tpl") vars = { kubeconfig_name = local.kubeconfig_name - endpoint = aws_eks_cluster.this.endpoint - cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data + endpoint = aws_eks_cluster.this[0].endpoint + cluster_auth_base64 = aws_eks_cluster.this[0].certificate_authority[0].data aws_authenticator_command = var.kubeconfig_aws_authenticator_command aws_authenticator_command_args = length(var.kubeconfig_aws_authenticator_command_args) > 0 ? " - ${join( "\n - ", var.kubeconfig_aws_authenticator_command_args, )}" : " - ${join( "\n - ", - formatlist("\"%s\"", ["token", "-i", aws_eks_cluster.this.name]), + formatlist("\"%s\"", ["token", "-i", aws_eks_cluster.this[0].name]), )}" aws_authenticator_additional_args = length(var.kubeconfig_aws_authenticator_additional_args) > 0 ? " - ${join( "\n - ", @@ -107,7 +108,7 @@ EOF } data "template_file" "userdata" { - count = local.worker_group_count + count = var.create_eks ? local.worker_group_count : 0 template = lookup( var.worker_groups[count.index], "userdata_template_file", @@ -120,9 +121,9 @@ data "template_file" "userdata" { vars = merge({ platform = lookup(var.worker_groups[count.index], "platform", local.workers_group_defaults["platform"]) - cluster_name = aws_eks_cluster.this.name - endpoint = aws_eks_cluster.this.endpoint - cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data + cluster_name = aws_eks_cluster.this[0].name + endpoint = aws_eks_cluster.this[0].endpoint + cluster_auth_base64 = aws_eks_cluster.this[0].certificate_authority[0].data pre_userdata = lookup( var.worker_groups[count.index], "pre_userdata", @@ -153,7 +154,7 @@ data "template_file" "userdata" { } data "template_file" "launch_template_userdata" { - count = local.worker_group_launch_template_count + count = var.create_eks ? local.worker_group_launch_template_count : 0 template = lookup( var.worker_groups_launch_template[count.index], "userdata_template_file", @@ -166,9 +167,9 @@ data "template_file" "launch_template_userdata" { vars = merge({ platform = lookup(var.worker_groups_launch_template[count.index], "platform", local.workers_group_defaults["platform"]) - cluster_name = aws_eks_cluster.this.name - endpoint = aws_eks_cluster.this.endpoint - cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data + cluster_name = aws_eks_cluster.this[0].name + endpoint = aws_eks_cluster.this[0].endpoint + cluster_auth_base64 = aws_eks_cluster.this[0].certificate_authority[0].data pre_userdata = lookup( var.worker_groups_launch_template[count.index], "pre_userdata", diff --git a/kubectl.tf b/kubectl.tf index c7b86d4..56aba1f 100644 --- a/kubectl.tf +++ b/kubectl.tf @@ -1,6 +1,5 @@ resource "local_file" "kubeconfig" { - count = var.write_kubeconfig ? 1 : 0 - content = data.template_file.kubeconfig.rendered + count = var.write_kubeconfig && var.create_eks ? 1 : 0 + content = data.template_file.kubeconfig[0].rendered filename = substr(var.config_output_path, -1, 1) == "/" ? "${var.config_output_path}kubeconfig_${var.cluster_name}" : var.config_output_path } - diff --git a/node_groups.tf b/node_groups.tf index c47436f..c604da6 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -1,6 +1,6 @@ resource "aws_iam_role" "node_groups" { - count = local.worker_group_managed_node_group_count > 0 ? 1 : 0 - name = "${var.workers_role_name != "" ? var.workers_role_name : aws_eks_cluster.this.name}-managed-node-groups" + count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0 + name = "${var.workers_role_name != "" ? var.workers_role_name : aws_eks_cluster.this[0].name}-managed-node-groups" assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json permissions_boundary = var.permissions_boundary path = var.iam_path @@ -9,46 +9,46 @@ resource "aws_iam_role" "node_groups" { } resource "aws_iam_role_policy_attachment" "node_groups_AmazonEKSWorkerNodePolicy" { - count = local.worker_group_managed_node_group_count > 0 ? 1 : 0 + count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" role = aws_iam_role.node_groups[0].name } resource "aws_iam_role_policy_attachment" "node_groups_AmazonEKS_CNI_Policy" { - count = local.worker_group_managed_node_group_count > 0 ? 1 : 0 + count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" role = aws_iam_role.node_groups[0].name } resource "aws_iam_role_policy_attachment" "node_groups_AmazonEC2ContainerRegistryReadOnly" { - count = local.worker_group_managed_node_group_count > 0 ? 1 : 0 + count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" role = aws_iam_role.node_groups[0].name } resource "aws_iam_role_policy_attachment" "node_groups_additional_policies" { - for_each = toset(var.workers_additional_policies) + for_each = var.create_eks && local.worker_group_managed_node_group_count > 0 ? toset(var.workers_additional_policies) : [] role = aws_iam_role.node_groups[0].name policy_arn = each.key } resource "aws_iam_role_policy_attachment" "node_groups_autoscaling" { - count = var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0 + count = var.create_eks && var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0 policy_arn = aws_iam_policy.node_groups_autoscaling[0].arn role = aws_iam_role.node_groups[0].name } resource "aws_iam_policy" "node_groups_autoscaling" { - count = var.manage_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0 - name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}" - description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}" - policy = data.aws_iam_policy_document.worker_autoscaling.json + count = var.create_eks && var.manage_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0 + name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this[0].name}" + description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this[0].name}" + policy = data.aws_iam_policy_document.worker_autoscaling[0].json path = var.iam_path } resource "random_pet" "node_groups" { - for_each = local.node_groups + for_each = var.create_eks ? local.node_groups : {} separator = "-" length = 2 @@ -67,7 +67,7 @@ resource "random_pet" "node_groups" { } resource "aws_eks_node_group" "workers" { - for_each = local.node_groups + for_each = var.create_eks ? local.node_groups : {} node_group_name = join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id]) @@ -93,7 +93,7 @@ resource "aws_eks_node_group" "workers" { source_security_group_ids = lookup(each.value, "key_name", "") != "" ? lookup(each.value, "source_security_group_ids", []) : null } - version = aws_eks_cluster.this.version + version = aws_eks_cluster.this[0].version tags = lookup(each.value, "node_group_additional_tags", null) diff --git a/outputs.tf b/outputs.tf index d18dc78..2478dc2 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,26 +1,26 @@ output "cluster_id" { description = "The name/id of the EKS cluster." - value = aws_eks_cluster.this.id + value = element(concat(aws_eks_cluster.this.*.id, list("")), 0) } output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster." - value = aws_eks_cluster.this.arn + value = element(concat(aws_eks_cluster.this.*.arn, list("")), 0) } output "cluster_certificate_authority_data" { description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster." - value = aws_eks_cluster.this.certificate_authority[0].data + value = element(concat(aws_eks_cluster.this[*].certificate_authority[0].data, list("")), 0) } output "cluster_endpoint" { description = "The endpoint for your EKS Kubernetes API." - value = aws_eks_cluster.this.endpoint + value = element(concat(aws_eks_cluster.this.*.endpoint, list("")), 0) } output "cluster_version" { description = "The Kubernetes server version for the EKS cluster." - value = aws_eks_cluster.this.version + value = element(concat(aws_eks_cluster.this[*].version, list("")), 0) } output "cluster_security_group_id" { @@ -45,17 +45,17 @@ output "cluster_iam_role_arn" { output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster OIDC Issuer" - value = concat(aws_eks_cluster.this.identity.*.oidc.0.issuer, [""])[0] + value = concat(aws_eks_cluster.this[*].identity[*].oidc.0.issuer, [""])[0] } output "cloudwatch_log_group_name" { description = "Name of cloudwatch log group created" - value = aws_cloudwatch_log_group.this.*.name + value = aws_cloudwatch_log_group.this[*].name } output "kubeconfig" { description = "kubectl config file contents for this EKS cluster." - value = data.template_file.kubeconfig.rendered + value = concat(data.template_file.kubeconfig[*].rendered, [""])[0] } output "kubeconfig_filename" { diff --git a/variables.tf b/variables.tf index 498313b..366c926 100644 --- a/variables.tf +++ b/variables.tf @@ -48,12 +48,6 @@ variable "manage_aws_auth" { default = true } -variable "write_aws_auth_config" { - description = "Whether to write the aws-auth configmap file." - type = bool - default = true -} - variable "map_accounts" { description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format." type = list(string) @@ -294,8 +288,14 @@ variable "attach_worker_cni_policy" { default = true } +variable "create_eks" { + description = "Controls if EKS resources should be created (it affects almost all resources)" + type = bool + default = true +} + variable "node_groups" { description = "A list of maps defining node group configurations to be defined using AWS EKS Managed Node Groups. See workers_group_defaults for valid keys." type = any default = [] -} \ No newline at end of file +} diff --git a/workers.tf b/workers.tf index e946e78..ce0f1e2 100644 --- a/workers.tf +++ b/workers.tf @@ -1,12 +1,12 @@ # Worker Groups using Launch Configurations resource "aws_autoscaling_group" "workers" { - count = local.worker_group_count + count = var.create_eks ? local.worker_group_count : 0 name_prefix = join( "-", compact( [ - aws_eks_cluster.this.name, + aws_eks_cluster.this[0].name, lookup(var.worker_groups[count.index], "name", count.index), lookup(var.worker_groups[count.index], "asg_recreate_on_change", local.workers_group_defaults["asg_recreate_on_change"]) ? random_pet.workers[count.index].id : "" ] @@ -91,16 +91,16 @@ resource "aws_autoscaling_group" "workers" { [ { "key" = "Name" - "value" = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg" + "value" = "${aws_eks_cluster.this[0].name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg" "propagate_at_launch" = true }, { - "key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" + "key" = "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}" "value" = "owned" "propagate_at_launch" = true }, { - "key" = "k8s.io/cluster/${aws_eks_cluster.this.name}" + "key" = "k8s.io/cluster/${aws_eks_cluster.this[0].name}" "value" = "owned" "propagate_at_launch" = true }, @@ -114,8 +114,8 @@ resource "aws_autoscaling_group" "workers" { "propagate_at_launch" = false }, { - "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" - "value" = aws_eks_cluster.this.name + "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this[0].name}" + "value" = aws_eks_cluster.this[0].name "propagate_at_launch" = false }, { @@ -143,8 +143,8 @@ resource "aws_autoscaling_group" "workers" { } resource "aws_launch_configuration" "workers" { - count = local.worker_group_count - name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}" + count = var.create_eks ? local.worker_group_count : 0 + name_prefix = "${aws_eks_cluster.this[0].name}-${lookup(var.worker_groups[count.index], "name", count.index)}" associate_public_ip_address = lookup( var.worker_groups[count.index], "public_ip", @@ -232,7 +232,7 @@ resource "aws_launch_configuration" "workers" { } resource "random_pet" "workers" { - count = local.worker_group_count + count = var.create_eks ? local.worker_group_count : 0 separator = "-" length = 2 @@ -243,21 +243,21 @@ resource "random_pet" "workers" { } resource "aws_security_group" "workers" { - count = var.worker_create_security_group ? 1 : 0 - name_prefix = aws_eks_cluster.this.name + count = var.worker_create_security_group && var.create_eks ? 1 : 0 + name_prefix = aws_eks_cluster.this[0].name description = "Security group for all nodes in the cluster." vpc_id = var.vpc_id tags = merge( var.tags, { - "Name" = "${aws_eks_cluster.this.name}-eks_worker_sg" - "kubernetes.io/cluster/${aws_eks_cluster.this.name}" = "owned" + "Name" = "${aws_eks_cluster.this[0].name}-eks_worker_sg" + "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}" = "owned" }, ) } resource "aws_security_group_rule" "workers_egress_internet" { - count = var.worker_create_security_group ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 description = "Allow nodes all egress to the Internet." protocol = "-1" security_group_id = local.worker_security_group_id @@ -268,7 +268,7 @@ resource "aws_security_group_rule" "workers_egress_internet" { } resource "aws_security_group_rule" "workers_ingress_self" { - count = var.worker_create_security_group ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 description = "Allow node to communicate with each other." protocol = "-1" security_group_id = local.worker_security_group_id @@ -279,7 +279,7 @@ resource "aws_security_group_rule" "workers_ingress_self" { } resource "aws_security_group_rule" "workers_ingress_cluster" { - count = var.worker_create_security_group ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 description = "Allow workers pods to receive communication from the cluster control plane." protocol = "tcp" security_group_id = local.worker_security_group_id @@ -290,7 +290,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster" { } resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" { - count = var.worker_create_security_group ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0 + count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0 description = "Allow workers Kubelets to receive communication from the cluster control plane." protocol = "tcp" security_group_id = local.worker_security_group_id @@ -301,7 +301,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" { } resource "aws_security_group_rule" "workers_ingress_cluster_https" { - count = var.worker_create_security_group ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane." protocol = "tcp" security_group_id = local.worker_security_group_id @@ -312,8 +312,8 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" { } resource "aws_iam_role" "workers" { - count = var.manage_worker_iam_resources ? 1 : 0 - name_prefix = var.workers_role_name != "" ? null : aws_eks_cluster.this.name + count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 + name_prefix = var.workers_role_name != "" ? null : aws_eks_cluster.this[0].name name = var.workers_role_name != "" ? var.workers_role_name : null assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json permissions_boundary = var.permissions_boundary @@ -323,8 +323,8 @@ resource "aws_iam_role" "workers" { } resource "aws_iam_instance_profile" "workers" { - count = var.manage_worker_iam_resources ? local.worker_group_count : 0 - name_prefix = aws_eks_cluster.this.name + count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_count : 0 + name_prefix = aws_eks_cluster.this[0].name role = lookup( var.worker_groups[count.index], "iam_role_id", @@ -335,44 +335,45 @@ resource "aws_iam_instance_profile" "workers" { } resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" { - count = var.manage_worker_iam_resources ? 1 : 0 + count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" role = aws_iam_role.workers[0].name } resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" { - count = var.manage_worker_iam_resources && var.attach_worker_cni_policy ? 1 : 0 + count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" role = aws_iam_role.workers[0].name } resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" { - count = var.manage_worker_iam_resources ? 1 : 0 + count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" role = aws_iam_role.workers[0].name } resource "aws_iam_role_policy_attachment" "workers_additional_policies" { - count = var.manage_worker_iam_resources ? length(var.workers_additional_policies) : 0 + count = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0 role = aws_iam_role.workers[0].name policy_arn = var.workers_additional_policies[count.index] } resource "aws_iam_role_policy_attachment" "workers_autoscaling" { - count = var.manage_worker_iam_resources && var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy ? 1 : 0 + count = var.manage_worker_iam_resources && var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy && var.create_eks ? 1 : 0 policy_arn = aws_iam_policy.worker_autoscaling[0].arn role = aws_iam_role.workers[0].name } resource "aws_iam_policy" "worker_autoscaling" { - count = var.manage_worker_iam_resources && var.manage_worker_autoscaling_policy ? 1 : 0 - name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}" - description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}" - policy = data.aws_iam_policy_document.worker_autoscaling.json + count = var.manage_worker_iam_resources && var.manage_worker_autoscaling_policy && var.create_eks ? 1 : 0 + name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this[0].name}" + description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this[0].name}" + policy = data.aws_iam_policy_document.worker_autoscaling[0].json path = var.iam_path } data "aws_iam_policy_document" "worker_autoscaling" { + count = var.manage_worker_iam_resources && var.manage_worker_autoscaling_policy && var.create_eks ? 1 : 0 statement { sid = "eksWorkerAutoscalingAll" effect = "Allow" @@ -402,7 +403,7 @@ data "aws_iam_policy_document" "worker_autoscaling" { condition { test = "StringEquals" - variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this.name}" + variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this[0].name}" values = ["owned"] } diff --git a/workers_launch_template.tf b/workers_launch_template.tf index fefacd6..519a289 100644 --- a/workers_launch_template.tf +++ b/workers_launch_template.tf @@ -1,12 +1,12 @@ # Worker Groups using Launch Templates resource "aws_autoscaling_group" "workers_launch_template" { - count = local.worker_group_launch_template_count + count = var.create_eks ? local.worker_group_launch_template_count : 0 name_prefix = join( "-", compact( [ - aws_eks_cluster.this.name, + aws_eks_cluster.this[0].name, lookup(var.worker_groups_launch_template[count.index], "name", count.index), lookup(var.worker_groups_launch_template[count.index], "asg_recreate_on_change", local.workers_group_defaults["asg_recreate_on_change"]) ? random_pet.workers_launch_template[count.index].id : "" ] @@ -167,7 +167,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { [ { "key" = "Name" - "value" = "${aws_eks_cluster.this.name}-${lookup( + "value" = "${aws_eks_cluster.this[0].name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -175,7 +175,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { "propagate_at_launch" = true }, { - "key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}" + "key" = "kubernetes.io/cluster/${aws_eks_cluster.this[0].name}" "value" = "owned" "propagate_at_launch" = true }, @@ -189,8 +189,8 @@ resource "aws_autoscaling_group" "workers_launch_template" { "propagate_at_launch" = false }, { - "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}" - "value" = aws_eks_cluster.this.name + "key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this[0].name}" + "value" = aws_eks_cluster.this[0].name "propagate_at_launch" = false }, { @@ -218,8 +218,8 @@ resource "aws_autoscaling_group" "workers_launch_template" { } resource "aws_launch_template" "workers_launch_template" { - count = local.worker_group_launch_template_count - name_prefix = "${aws_eks_cluster.this.name}-${lookup( + count = var.create_eks ? (local.worker_group_launch_template_count) : 0 + name_prefix = "${aws_eks_cluster.this[0].name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -364,7 +364,7 @@ resource "aws_launch_template" "workers_launch_template" { tags = merge( { - "Name" = "${aws_eks_cluster.this.name}-${lookup( + "Name" = "${aws_eks_cluster.this[0].name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -382,7 +382,7 @@ resource "aws_launch_template" "workers_launch_template" { } resource "random_pet" "workers_launch_template" { - count = local.worker_group_launch_template_count + count = var.create_eks ? local.worker_group_launch_template_count : 0 separator = "-" length = 2 @@ -401,8 +401,8 @@ resource "random_pet" "workers_launch_template" { } resource "aws_iam_instance_profile" "workers_launch_template" { - count = var.manage_worker_iam_resources ? local.worker_group_launch_template_count : 0 - name_prefix = aws_eks_cluster.this.name + count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_template_count : 0 + name_prefix = aws_eks_cluster.this[0].name role = lookup( var.worker_groups_launch_template[count.index], "iam_role_id",