Control plane security group always whitelist worker security group and revert #186 (#631)

* Updates and revert #186

* update readme

* update changelog

* update changelog
This commit is contained in:
Ryan Ooi
2019-12-20 23:45:01 +08:00
committed by Max Williams
parent 583c32d286
commit b7ffc1b591
6 changed files with 18 additions and 25 deletions

View File

@@ -21,6 +21,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
- Fix cluster_oidc_issuer_url output from list to string (by @chewvader)
- Fix idempotency issues for node groups with no remote_access configuration (by @jeffmhastings)
- Added support to create IAM OpenID Connect Identity Provider to enable EKS Identity Roles for Service Accounts (IRSA). (by @alaa)
- **Breaking:** Change logic of security group whitelisting. Will always whitelist worker security group on control plane security group either provide one or create new one. See Important notes below for upgrade notes (by @ryanooi)
#### Important notes
@@ -35,6 +36,12 @@ terraform import module.cluster1.kubernetes_config_map.aws_auth[0] kube-system/a
You could also delete the aws-auth config map before doing an apply but this means you need to the apply with the **same user/role that created the cluster**.
For security group whitelisting change. After upgrade, have to remove `cluster_create_security_group` and `worker_create_security_group` variable. If you have whitelist worker security group before, you will have to delete it(and apply again) or import it.
```
terraform import module.eks.aws_security_group_rule.cluster_https_worker_ingress <CONTROL_PLANE_SECURITY_GROUP_ID>_ingress_tcp_443_443_<WORKER_SECURITY_GROUP_ID>
```
# History
## [[v7.0.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v7.0.1...v7.0.0)] - 2019-12-11]

View File

@@ -152,7 +152,6 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
|------|-------------|:----:|:-----:|:-----:|
| attach\_worker\_autoscaling\_policy | Whether to attach the module managed cluster autoscaling iam policy to the default worker IAM role. This requires `manage_worker_autoscaling_policy = true` | bool | `"true"` | no |
| attach\_worker\_cni\_policy | Whether to attach the Amazon managed `AmazonEKS_CNI_Policy` IAM policy to the default worker IAM role. WARNING: If set `false` the permissions must be assigned to the `aws-node` DaemonSet pods via another method or nodes will not be able to join the cluster. | bool | `"true"` | no |
| cluster\_create\_security\_group | Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`. | bool | `"true"` | no |
| cluster\_create\_timeout | Timeout value when creating the EKS cluster. | string | `"15m"` | no |
| cluster\_delete\_timeout | Timeout value when deleting the EKS cluster. | string | `"15m"` | no |
| cluster\_enabled\_log\_types | A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html) | list(string) | `[]` | no |
@@ -193,7 +192,6 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| worker\_ami\_owner\_id | The ID of the owner for the AMI to use for the AWS EKS workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | string | `"602401143452"` | no |
| worker\_ami\_owner\_id\_windows | The ID of the owner for the AMI to use for the AWS EKS Windows workers. Valid values are an AWS account ID, 'self' (the current account), or an AWS owner alias (e.g. 'amazon', 'aws-marketplace', 'microsoft'). | string | `"801119661308"` | no |
| worker\_create\_initial\_lifecycle\_hooks | Whether to create initial lifecycle hooks provided in worker groups. | bool | `"false"` | no |
| worker\_create\_security\_group | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | bool | `"true"` | no |
| worker\_groups | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys. | any | `[]` | no |
| worker\_groups\_launch\_template | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys. | any | `[]` | no |
| worker\_security\_group\_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | string | `""` | no |

View File

@@ -34,7 +34,7 @@ resource "aws_eks_cluster" "this" {
}
resource "aws_security_group" "cluster" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
count = var.cluster_security_group_id == "" && var.create_eks ? 1 : 0
name_prefix = var.cluster_name
description = "EKS cluster security group."
vpc_id = var.vpc_id
@@ -47,7 +47,7 @@ resource "aws_security_group" "cluster" {
}
resource "aws_security_group_rule" "cluster_egress_internet" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
count = var.cluster_security_group_id == "" && var.create_eks ? 1 : 0
description = "Allow cluster egress access to the Internet."
protocol = "-1"
security_group_id = local.cluster_security_group_id
@@ -58,7 +58,7 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
count = var.create_eks ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API."
protocol = "tcp"
security_group_id = local.cluster_security_group_id

View File

@@ -8,10 +8,10 @@ locals {
)
]
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
cluster_security_group_id = var.cluster_security_group_id == "" ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
worker_security_group_id = var.worker_security_group_id == "" ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name

View File

@@ -204,18 +204,6 @@ variable "local_exec_interpreter" {
default = ["/bin/sh", "-c"]
}
variable "cluster_create_security_group" {
description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`."
type = bool
default = true
}
variable "worker_create_security_group" {
description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`."
type = bool
default = true
}
variable "worker_create_initial_lifecycle_hooks" {
description = "Whether to create initial lifecycle hooks provided in worker groups."
type = bool

View File

@@ -243,7 +243,7 @@ resource "random_pet" "workers" {
}
resource "aws_security_group" "workers" {
count = var.worker_create_security_group && var.create_eks ? 1 : 0
count = var.worker_security_group_id == "" && var.create_eks ? 1 : 0
name_prefix = aws_eks_cluster.this[0].name
description = "Security group for all nodes in the cluster."
vpc_id = var.vpc_id
@@ -257,7 +257,7 @@ resource "aws_security_group" "workers" {
}
resource "aws_security_group_rule" "workers_egress_internet" {
count = var.worker_create_security_group && var.create_eks ? 1 : 0
count = var.worker_security_group_id == "" && var.create_eks ? 1 : 0
description = "Allow nodes all egress to the Internet."
protocol = "-1"
security_group_id = local.worker_security_group_id
@@ -268,7 +268,7 @@ resource "aws_security_group_rule" "workers_egress_internet" {
}
resource "aws_security_group_rule" "workers_ingress_self" {
count = var.worker_create_security_group && var.create_eks ? 1 : 0
count = var.worker_security_group_id == "" && var.create_eks ? 1 : 0
description = "Allow node to communicate with each other."
protocol = "-1"
security_group_id = local.worker_security_group_id
@@ -279,7 +279,7 @@ resource "aws_security_group_rule" "workers_ingress_self" {
}
resource "aws_security_group_rule" "workers_ingress_cluster" {
count = var.worker_create_security_group && var.create_eks ? 1 : 0
count = var.worker_security_group_id == "" && var.create_eks ? 1 : 0
description = "Allow workers pods to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = local.worker_security_group_id
@@ -290,7 +290,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
count = var.worker_security_group_id == "" && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
description = "Allow workers Kubelets to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = local.worker_security_group_id
@@ -301,7 +301,7 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
}
resource "aws_security_group_rule" "workers_ingress_cluster_https" {
count = var.worker_create_security_group && var.create_eks ? 1 : 0
count = var.worker_security_group_id == "" && var.create_eks ? 1 : 0
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
protocol = "tcp"
security_group_id = local.worker_security_group_id