Use join and splat syntax to access conditional resources (#569)

This commit is contained in:
Miguel Ferreira
2019-10-29 13:55:21 +01:00
committed by Thierno IB. BARRY
parent 4f552891ff
commit a8e54ccf73
4 changed files with 15 additions and 14 deletions

View File

@@ -21,6 +21,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
- **Breaking:** The `kubectl` configuration file can now be fully-specified using `config_output_path`. Previously it was assumed that `config_output_path` referred to a directory and always ended with a forward slash. This is a breaking change if `config_output_path` does **not** end with a forward slash (which was advised against by the documentation).
- Changed logic for setting default ebs_optimized to only require maintaining a list of instance types that don't support it (by @jeffmhastings)
- Bumped minimum terraform version to 0.12.2 to prevent an error on yamlencode function (by @toadjaune)
- Access conditional resource using join function in combination with splat syntax (by @miguelaferreira)
# History

View File

@@ -49,7 +49,7 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
count = var.cluster_create_security_group ? 1 : 0
description = "Allow cluster egress access to the Internet."
protocol = "-1"
security_group_id = aws_security_group.cluster[0].id
security_group_id = local.cluster_security_group_id
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
@@ -60,7 +60,7 @@ resource "aws_security_group_rule" "cluster_https_worker_ingress" {
count = var.cluster_create_security_group ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API."
protocol = "tcp"
security_group_id = aws_security_group.cluster[0].id
security_group_id = local.cluster_security_group_id
source_security_group_id = local.worker_security_group_id
from_port = 443
to_port = 443
@@ -80,11 +80,11 @@ resource "aws_iam_role" "cluster" {
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
count = var.manage_cluster_iam_resources ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = aws_iam_role.cluster[0].name
role = local.cluster_iam_role_name
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
count = var.manage_cluster_iam_resources ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = aws_iam_role.cluster[0].name
role = local.cluster_iam_role_name
}

View File

@@ -8,10 +8,10 @@ locals {
)
]
cluster_security_group_id = var.cluster_create_security_group ? aws_security_group.cluster[0].id : var.cluster_security_group_id
cluster_iam_role_name = var.manage_cluster_iam_resources ? aws_iam_role.cluster[0].name : var.cluster_iam_role_name
cluster_iam_role_arn = var.manage_cluster_iam_resources ? aws_iam_role.cluster[0].arn : data.aws_iam_role.custom_cluster_iam_role[0].arn
worker_security_group_id = var.worker_create_security_group ? aws_security_group.workers[0].id : var.worker_security_group_id
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name

View File

@@ -260,7 +260,7 @@ resource "aws_security_group_rule" "workers_egress_internet" {
count = var.worker_create_security_group ? 1 : 0
description = "Allow nodes all egress to the Internet."
protocol = "-1"
security_group_id = aws_security_group.workers[0].id
security_group_id = local.worker_security_group_id
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
@@ -271,8 +271,8 @@ resource "aws_security_group_rule" "workers_ingress_self" {
count = var.worker_create_security_group ? 1 : 0
description = "Allow node to communicate with each other."
protocol = "-1"
security_group_id = aws_security_group.workers[0].id
source_security_group_id = aws_security_group.workers[0].id
security_group_id = local.worker_security_group_id
source_security_group_id = local.worker_security_group_id
from_port = 0
to_port = 65535
type = "ingress"
@@ -282,7 +282,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
count = var.worker_create_security_group ? 1 : 0
description = "Allow workers pods to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = aws_security_group.workers[0].id
security_group_id = local.worker_security_group_id
source_security_group_id = local.cluster_security_group_id
from_port = var.worker_sg_ingress_from_port
to_port = 65535
@@ -293,7 +293,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
count = var.worker_create_security_group ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
description = "Allow workers Kubelets to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = aws_security_group.workers[0].id
security_group_id = local.worker_security_group_id
source_security_group_id = local.cluster_security_group_id
from_port = 10250
to_port = 10250
@@ -304,7 +304,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" {
count = var.worker_create_security_group ? 1 : 0
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
protocol = "tcp"
security_group_id = aws_security_group.workers[0].id
security_group_id = local.worker_security_group_id
source_security_group_id = local.cluster_security_group_id
from_port = 443
to_port = 443