fix: Disable creation of cluster security group rules that map to node security group when create_node_security_group = false (#2274)

* fix: Disable creation of cluster security group rules that map to node security group when `create_node_security_group` = `false`

* feat: Update Fargate example to run only Fargate and show disabling of both cluster and node security groups

* fix: Ensure CoreDNS changes are made ahead of install
This commit is contained in:
Bryant Biggs
2022-10-14 09:16:57 -04:00
committed by GitHub
parent 8dc5ad4478
commit 28ccecefe2
5 changed files with 178 additions and 60 deletions

View File

@@ -26,7 +26,7 @@ By default, EKS creates a cluster primary security group that is created outside
attach_cluster_primary_security_group = true # default is false attach_cluster_primary_security_group = true # default is false
node_security_group_tags = { node_security_group_tags = {
"kubernetes.io/cluster/<CLUSTER_NAME>" = "" # or any other value other than "owned" "kubernetes.io/cluster/<CLUSTER_NAME>" = null # or any other value other than "owned"
} }
``` ```
@@ -36,7 +36,7 @@ By default, EKS creates a cluster primary security group that is created outside
attach_cluster_primary_security_group = true # default is false attach_cluster_primary_security_group = true # default is false
cluster_tags = { cluster_tags = {
"kubernetes.io/cluster/<CLUSTER_NAME>" = "" # or any other value other than "owned" "kubernetes.io/cluster/<CLUSTER_NAME>" = null # or any other value other than "owned"
} }
``` ```

View File

@@ -21,13 +21,16 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------| |------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 | | <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 | | <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 | | <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.7 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
## Providers ## Providers
| Name | Version | | Name | Version |
|------|---------| |------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 | | <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.7 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
## Modules ## Modules
@@ -41,6 +44,11 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type | | Name | Type |
|------|------| |------|------|
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource | | [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs ## Inputs

View File

@@ -2,6 +2,20 @@ provider "aws" {
region = local.region region = local.region
} }
provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
}
locals { locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}" name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.22" cluster_version = "1.22"
@@ -27,14 +41,8 @@ module "eks" {
cluster_endpoint_public_access = true cluster_endpoint_public_access = true
cluster_addons = { cluster_addons = {
# Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {} kube-proxy = {}
vpc-cni = { vpc-cni = {}
resolve_conflicts = "OVERWRITE"
}
} }
cluster_encryption_config = [{ cluster_encryption_config = [{
@@ -45,28 +53,13 @@ module "eks" {
vpc_id = module.vpc.vpc_id vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets subnet_ids = module.vpc.private_subnets
# You require a node group to schedule coredns which is critical for running correctly internal DNS. # Fargate profiles use the cluster primary security group so these are not utilized
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS` create_cluster_security_group = false
# available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html create_node_security_group = false
eks_managed_node_groups = {
example = {
desired_size = 1
instance_types = ["t3.large"]
labels = {
Example = "managed_node_groups"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
tags = {
ExtraTag = "example"
}
}
}
fargate_profiles = { fargate_profiles = {
default = { example = {
name = "default" name = "example"
selectors = [ selectors = [
{ {
namespace = "backend" namespace = "backend"
@@ -75,32 +68,9 @@ module "eks" {
} }
}, },
{ {
namespace = "default" namespace = "app-*"
labels = { labels = {
WorkerType = "fargate" Application = "app-wildcard"
}
}
]
tags = {
Owner = "default"
}
timeouts = {
create = "20m"
delete = "20m"
}
}
secondary = {
name = "secondary"
selectors = [
{
namespace = "default"
labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
} }
} }
] ]
@@ -111,12 +81,147 @@ module "eks" {
tags = { tags = {
Owner = "secondary" Owner = "secondary"
} }
timeouts = {
create = "20m"
delete = "20m"
}
}
kube_system = {
name = "kube-system"
selectors = [
{ namespace = "kube-system" }
]
} }
} }
tags = local.tags tags = local.tags
} }
################################################################################
# Modify EKS CoreDNS Deployment
################################################################################
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}
# Separate resource so that this is only ever executed once
resource "null_resource" "remove_default_coredns_deployment" {
triggers = {}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}
# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
}
resource "null_resource" "modify_kube_dns" {
triggers = {}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}
# We are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}"
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
depends_on = [
null_resource.remove_default_coredns_deployment
]
}
################################################################################
# CoreDNS Helm Chart (self-managed)
################################################################################
data "aws_eks_addon_version" "this" {
for_each = toset(["coredns"])
addon_name = each.value
kubernetes_version = module.eks.cluster_version
most_recent = true
}
resource "helm_release" "coredns" {
name = "coredns"
namespace = "kube-system"
create_namespace = false
description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
chart = "coredns"
version = "1.19.4"
repository = "https://coredns.github.io/helm"
# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
values = [
<<-EOT
image:
repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns
tag: ${data.aws_eks_addon_version.this["coredns"].version}
deployment:
name: coredns
annotations:
eks.amazonaws.com/compute-type: fargate
service:
name: kube-dns
annotations:
eks.amazonaws.com/compute-type: fargate
podAnnotations:
eks.amazonaws.com/compute-type: fargate
EOT
]
depends_on = [
# Need to ensure the CoreDNS updates are peformed before provisioning
null_resource.modify_kube_dns
]
}
################################################################################ ################################################################################
# Supporting Resources # Supporting Resources
################################################################################ ################################################################################

View File

@@ -6,9 +6,13 @@ terraform {
source = "hashicorp/aws" source = "hashicorp/aws"
version = ">= 3.72" version = ">= 3.72"
} }
kubernetes = { helm = {
source = "hashicorp/kubernetes" source = "hashicorp/helm"
version = ">= 2.10" version = ">= 2.7"
}
null = {
source = "hashicorp/null"
version = ">= 3.0"
} }
} }
} }

View File

@@ -128,7 +128,8 @@ locals {
cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
cluster_security_group_rules = { # Do not add rules to node security group if the module is not creating it
cluster_security_group_rules = local.create_node_sg ? {
ingress_nodes_443 = { ingress_nodes_443 = {
description = "Node groups to cluster API" description = "Node groups to cluster API"
protocol = "tcp" protocol = "tcp"
@@ -153,7 +154,7 @@ locals {
type = "egress" type = "egress"
source_node_security_group = true source_node_security_group = true
} }
} } : {}
} }
resource "aws_security_group" "cluster" { resource "aws_security_group" "cluster" {