Add destroy time flag (#580)

* Add destroy-time flag

* Update changelog

Fix cluster count

* Fix cluster count

* Fix docs

* Fix outputs

* Fix unsupported attribute on cluster_certificate_authority_data output

Co-Authored-By: Daniel Piddock <33028589+dpiddockcmp@users.noreply.github.com>

* Remove unnecessary flatten from cluster_endpoint output

Co-Authored-By: Daniel Piddock <33028589+dpiddockcmp@users.noreply.github.com>

* Improve description of var.enabled

* Fix errors manifesting when used on an existing-cluster

* Update README.md

* Renamed destroy-time flag

* Revert removal of changelog addition entry

* Update flag name in readme

* Update flag variable name

* Update cluster referencing for consistency

* Update flag name to `create_eks`

* Fixed incorrect count-based reference to aws_eks_cluster.this (there's only one)

* Replaced all incorrect aws_eks_cluster.this[count.index] references (there will be just one, so using '[0]').

* Changelog update, explicitly mentioning flag

* Fixed interpolation deprecation warning

* Fixed outputs to support conditional cluster

* Applied create_eks to aws_auth.tf

* Removed unused variable. Updated Changelog. Formatting.

* Fixed references to aws_eks_cluster.this[0] that would raise errors when setting create_eks to false whilst having launch templates or launch configurations configured.

* Readme and example updates.

* Revert "Readme and example updates."

This reverts commit 18a0746355e136010ad54858a1b518406f6a3638.

* Updated readme section of conditionally creation with provider example.

* Added conditions to node_groups.

* Fixed reversed map_roles check

* Update aws_auth.tf

Revert this due to https://github.com/terraform-aws-modules/terraform-aws-eks/pull/611
This commit is contained in:
Tomislav Tomašić
2019-12-09 09:06:10 +00:00
committed by Max Williams
parent 7c2c4a6aa5
commit 124ea7c151
11 changed files with 138 additions and 100 deletions

View File

@@ -1,6 +1,6 @@
resource "aws_iam_role" "node_groups" {
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
name = "${var.workers_role_name != "" ? var.workers_role_name : aws_eks_cluster.this.name}-managed-node-groups"
count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0
name = "${var.workers_role_name != "" ? var.workers_role_name : aws_eks_cluster.this[0].name}-managed-node-groups"
assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
permissions_boundary = var.permissions_boundary
path = var.iam_path
@@ -9,46 +9,46 @@ resource "aws_iam_role" "node_groups" {
}
resource "aws_iam_role_policy_attachment" "node_groups_AmazonEKSWorkerNodePolicy" {
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = aws_iam_role.node_groups[0].name
}
resource "aws_iam_role_policy_attachment" "node_groups_AmazonEKS_CNI_Policy" {
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = aws_iam_role.node_groups[0].name
}
resource "aws_iam_role_policy_attachment" "node_groups_AmazonEC2ContainerRegistryReadOnly" {
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
count = var.create_eks && local.worker_group_managed_node_group_count > 0 ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = aws_iam_role.node_groups[0].name
}
resource "aws_iam_role_policy_attachment" "node_groups_additional_policies" {
for_each = toset(var.workers_additional_policies)
for_each = var.create_eks && local.worker_group_managed_node_group_count > 0 ? toset(var.workers_additional_policies) : []
role = aws_iam_role.node_groups[0].name
policy_arn = each.key
}
resource "aws_iam_role_policy_attachment" "node_groups_autoscaling" {
count = var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0
count = var.create_eks && var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0
policy_arn = aws_iam_policy.node_groups_autoscaling[0].arn
role = aws_iam_role.node_groups[0].name
}
resource "aws_iam_policy" "node_groups_autoscaling" {
count = var.manage_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}"
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}"
policy = data.aws_iam_policy_document.worker_autoscaling.json
count = var.create_eks && var.manage_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this[0].name}"
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this[0].name}"
policy = data.aws_iam_policy_document.worker_autoscaling[0].json
path = var.iam_path
}
resource "random_pet" "node_groups" {
for_each = local.node_groups
for_each = var.create_eks ? local.node_groups : {}
separator = "-"
length = 2
@@ -67,7 +67,7 @@ resource "random_pet" "node_groups" {
}
resource "aws_eks_node_group" "workers" {
for_each = local.node_groups
for_each = var.create_eks ? local.node_groups : {}
node_group_name = join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id])
@@ -93,7 +93,7 @@ resource "aws_eks_node_group" "workers" {
source_security_group_ids = lookup(each.value, "key_name", "") != "" ? lookup(each.value, "source_security_group_ids", []) : null
}
version = aws_eks_cluster.this.version
version = aws_eks_cluster.this[0].version
tags = lookup(each.value, "node_group_additional_tags", null)