feat: Allow communication between pods on workers and pods using the primary cluster security group (optional) (#892)

NOTES: New variable `worker_create_cluster_primary_security_group_rules` to allow communication between pods on workers and pods using the primary cluster security group (Managed Node Groups or Fargate). It defaults to `false` to avoid potential conflicts with existing security group rules users may have implemented.
This commit is contained in:
Simon Gurcke
2020-05-31 06:43:26 +10:00
committed by GitHub
parent 7ffe5fa88f
commit 3fefc2a66c
5 changed files with 35 additions and 5 deletions

View File

@@ -200,6 +200,7 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| worker\_ami\_name\_filter\_windows | Name filter for AWS EKS Windows worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no |
| worker\_ami\_owner\_id | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"602401143452"` | no |
| worker\_ami\_owner\_id\_windows | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | `string` | `"801119661308"` | no |
| worker\_create\_cluster\_primary\_security\_group\_rules | Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group. | `bool` | `false` | no |
| worker\_create\_initial\_lifecycle\_hooks | Whether to create initial lifecycle hooks provided in worker groups. | `bool` | `false` | no |
| worker\_create\_security\_group | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | `bool` | `true` | no |
| worker\_groups | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers\_group\_defaults for valid keys. | `any` | `[]` | no |

View File

@@ -8,10 +8,11 @@ locals {
)
]
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
cluster_primary_security_group_id = var.cluster_version >= 1.14 ? element(concat(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, list("")), 0) : null
cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
default_ami_id_linux = coalesce(local.workers_group_defaults.ami_id, data.aws_ami.eks_worker.id)

View File

@@ -53,7 +53,7 @@ output "cluster_oidc_issuer_url" {
output "cluster_primary_security_group_id" {
description = "The cluster primary security group ID created by the EKS cluster on 1.14 or later. Referred to as 'Cluster security group' in the EKS console."
value = var.cluster_version >= 1.14 ? element(concat(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, list("")), 0) : null
value = local.cluster_primary_security_group_id
}
output "cloudwatch_log_group_name" {

View File

@@ -228,6 +228,12 @@ variable "worker_create_initial_lifecycle_hooks" {
default = false
}
variable "worker_create_cluster_primary_security_group_rules" {
description = "Whether to create security group rules to allow communication between pods on workers and pods using the primary cluster security group."
type = bool
default = false
}
variable "permissions_boundary" {
description = "If provided, all IAM roles will be created with this permissions boundary attached."
type = string

View File

@@ -338,6 +338,28 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" {
type = "ingress"
}
resource "aws_security_group_rule" "workers_ingress_cluster_primary" {
count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)."
protocol = "all"
security_group_id = local.worker_security_group_id
source_security_group_id = local.cluster_primary_security_group_id
from_port = 0
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "cluster_primary_ingress_workers" {
count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0
description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)."
protocol = "all"
security_group_id = local.cluster_primary_security_group_id
source_security_group_id = local.worker_security_group_id
from_port = 0
to_port = 65535
type = "ingress"
}
resource "aws_iam_role" "workers" {
count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0
name_prefix = var.workers_role_name != "" ? null : aws_eks_cluster.this[0].name