feat: Add support for managing aws-auth configmap using new kubernetes_config_map_v1_data resource (#1999)

This commit is contained in:
Bryant Biggs
2022-04-09 03:15:46 -04:00
committed by GitHub
parent 3ff17205a4
commit da3d54cde7
16 changed files with 263 additions and 208 deletions

View File

@@ -180,6 +180,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 3.0 |
## Providers
@@ -187,6 +188,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_kubernetes"></a> [kubernetes](#provider\_kubernetes) | >= 2.10 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 3.0 |
## Modules
@@ -216,6 +218,8 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| [aws_security_group.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group_rule.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [aws_security_group_rule.node](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group_rule) | resource |
| [kubernetes_config_map.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map) | resource |
| [kubernetes_config_map_v1_data.aws_auth](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/config_map_v1_data) | resource |
| [aws_iam_policy_document.assume_role_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_iam_policy_document.cni_ipv6_policy](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source |
@@ -226,6 +230,12 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_attach_cluster_encryption_policy"></a> [attach\_cluster\_encryption\_policy](#input\_attach\_cluster\_encryption\_policy) | Indicates whether or not to attach an additional policy for the cluster IAM role to utilize the encryption key provided | `bool` | `true` | no |
| <a name="input_aws_auth_accounts"></a> [aws\_auth\_accounts](#input\_aws\_auth\_accounts) | List of account maps to add to the aws-auth configmap | `list(any)` | `[]` | no |
| <a name="input_aws_auth_fargate_profile_pod_execution_role_arns"></a> [aws\_auth\_fargate\_profile\_pod\_execution\_role\_arns](#input\_aws\_auth\_fargate\_profile\_pod\_execution\_role\_arns) | List of Fargate profile pod execution role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no |
| <a name="input_aws_auth_node_iam_role_arns_non_windows"></a> [aws\_auth\_node\_iam\_role\_arns\_non\_windows](#input\_aws\_auth\_node\_iam\_role\_arns\_non\_windows) | List of non-Windows based node IAM role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no |
| <a name="input_aws_auth_node_iam_role_arns_windows"></a> [aws\_auth\_node\_iam\_role\_arns\_windows](#input\_aws\_auth\_node\_iam\_role\_arns\_windows) | List of Windows based node IAM role ARNs to add to the aws-auth configmap | `list(string)` | `[]` | no |
| <a name="input_aws_auth_roles"></a> [aws\_auth\_roles](#input\_aws\_auth\_roles) | List of role maps to add to the aws-auth configmap | `list(any)` | `[]` | no |
| <a name="input_aws_auth_users"></a> [aws\_auth\_users](#input\_aws\_auth\_users) | List of user maps to add to the aws-auth configmap | `list(any)` | `[]` | no |
| <a name="input_cloudwatch_log_group_kms_key_id"></a> [cloudwatch\_log\_group\_kms\_key\_id](#input\_cloudwatch\_log\_group\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `null` | no |
| <a name="input_cloudwatch_log_group_retention_in_days"></a> [cloudwatch\_log\_group\_retention\_in\_days](#input\_cloudwatch\_log\_group\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days | `number` | `90` | no |
| <a name="input_cluster_additional_security_group_ids"></a> [cluster\_additional\_security\_group\_ids](#input\_cluster\_additional\_security\_group\_ids) | List of additional, externally created security group IDs to attach to the cluster control plane | `list(string)` | `[]` | no |
@@ -255,6 +265,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| <a name="input_cluster_timeouts"></a> [cluster\_timeouts](#input\_cluster\_timeouts) | Create, update, and delete timeout configurations for the cluster | `map(string)` | `{}` | no |
| <a name="input_cluster_version"></a> [cluster\_version](#input\_cluster\_version) | Kubernetes `<major>.<minor>` version to use for the EKS cluster (i.e.: `1.21`) | `string` | `null` | no |
| <a name="input_create"></a> [create](#input\_create) | Controls if EKS resources should be created (affects nearly all resources) | `bool` | `true` | no |
| <a name="input_create_aws_auth_configmap"></a> [create\_aws\_auth\_configmap](#input\_create\_aws\_auth\_configmap) | Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the confgimap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap` | `bool` | `false` | no |
| <a name="input_create_cloudwatch_log_group"></a> [create\_cloudwatch\_log\_group](#input\_create\_cloudwatch\_log\_group) | Determines whether a log group is created by this module for the cluster logs. If not, AWS will automatically create one if logging is enabled | `bool` | `true` | no |
| <a name="input_create_cluster_security_group"></a> [create\_cluster\_security\_group](#input\_create\_cluster\_security\_group) | Determines if a security group is created for the cluster or use the existing `cluster_security_group_id` | `bool` | `true` | no |
| <a name="input_create_cni_ipv6_iam_policy"></a> [create\_cni\_ipv6\_iam\_policy](#input\_create\_cni\_ipv6\_iam\_policy) | Determines whether to create an [`AmazonEKS_CNI_IPv6_Policy`](https://docs.aws.amazon.com/eks/latest/userguide/cni-iam-role.html#cni-iam-role-create-ipv6-policy) | `bool` | `false` | no |
@@ -274,6 +285,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| <a name="input_iam_role_permissions_boundary"></a> [iam\_role\_permissions\_boundary](#input\_iam\_role\_permissions\_boundary) | ARN of the policy that is used to set the permissions boundary for the IAM role | `string` | `null` | no |
| <a name="input_iam_role_tags"></a> [iam\_role\_tags](#input\_iam\_role\_tags) | A map of additional tags to add to the IAM role created | `map(string)` | `{}` | no |
| <a name="input_iam_role_use_name_prefix"></a> [iam\_role\_use\_name\_prefix](#input\_iam\_role\_use\_name\_prefix) | Determines whether the IAM role name (`iam_role_name`) is used as a prefix | `string` | `true` | no |
| <a name="input_manage_aws_auth_configmap"></a> [manage\_aws\_auth\_configmap](#input\_manage\_aws\_auth\_configmap) | Determines whether to manage the aws-auth configmap | `bool` | `false` | no |
| <a name="input_node_security_group_additional_rules"></a> [node\_security\_group\_additional\_rules](#input\_node\_security\_group\_additional\_rules) | List of additional security group rules to add to the node security group created. Set `source_cluster_security_group = true` inside rules to set the `cluster_security_group` as source | `any` | `{}` | no |
| <a name="input_node_security_group_description"></a> [node\_security\_group\_description](#input\_node\_security\_group\_description) | Description of the node security group created | `string` | `"EKS node shared security group"` | no |
| <a name="input_node_security_group_id"></a> [node\_security\_group\_id](#input\_node\_security\_group\_id) | ID of an existing security group to attach to the node groups created | `string` | `""` | no |
@@ -293,7 +305,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| Name | Description |
|------|-------------|
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | [DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |

View File

@@ -35,14 +35,13 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
## Modules
@@ -64,8 +63,6 @@ Note that this example may create resources which cost money. Run `terraform des
|------|------|
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs

View File

@@ -8,6 +8,18 @@ provider "aws" {
}
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
region = "eu-west-1"
@@ -180,6 +192,43 @@ module "eks" {
}
}
# aws-auth configmap
manage_aws_auth_configmap = true
aws_auth_node_iam_role_arns_non_windows = [
module.eks_managed_node_group.iam_role_arn,
module.self_managed_node_group.iam_role_arn,
]
aws_auth_fargate_profile_pod_execution_role_arns = [
module.fargate_profile.fargate_profile_pod_execution_role_arn
]
aws_auth_roles = [
{
rolearn = "arn:aws:iam::66666666666:role/role1"
username = "role1"
groups = ["system:masters"]
},
]
aws_auth_users = [
{
userarn = "arn:aws:iam::66666666666:user/user1"
username = "user1"
groups = ["system:masters"]
},
{
userarn = "arn:aws:iam::66666666666:user/user2"
username = "user2"
groups = ["system:masters"]
},
]
aws_auth_accounts = [
"777777777777",
"888888888888",
]
tags = local.tags
}
@@ -269,80 +318,6 @@ module "disabled_self_managed_node_group" {
create = false
}
################################################################################
# aws-auth configmap
# Only EKS managed node groups automatically add roles to aws-auth configmap
# so we need to ensure fargate profiles and self-managed node roles are added
################################################################################
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
# we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules
aws_auth_configmap_yaml = <<-EOT
${chomp(module.eks.aws_auth_configmap_yaml)}
- rolearn: ${module.eks_managed_node_group.iam_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
- rolearn: ${module.self_managed_node_group.iam_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
- rolearn: ${module.fargate_profile.fargate_profile_pod_execution_role_arn}
username: system:node:{{SessionName}}
groups:
- system:bootstrappers
- system:nodes
- system:node-proxier
EOT
}
resource "null_resource" "patch" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${local.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = self.triggers.kubeconfig
}
command = self.triggers.cmd_patch
}
}
################################################################################
# Supporting resources
################################################################################

View File

@@ -6,9 +6,9 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.72"
}
null = {
source = "hashicorp/null"
version = ">= 3.0"
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
}
}

View File

@@ -58,7 +58,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 2.2 |
## Providers
@@ -66,7 +66,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 2.2 |
## Modules
@@ -89,13 +88,11 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs

View File

@@ -2,6 +2,18 @@ provider "aws" {
region = local.region
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.22"
@@ -58,6 +70,8 @@ module "eks" {
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
manage_aws_auth_configmap = true
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
@@ -340,59 +354,6 @@ resource "aws_iam_role_policy_attachment" "additional" {
role = each.value.iam_role_name
}
################################################################################
# aws-auth configmap
# Only EKS managed node groups automatically add roles to aws-auth configmap
# so we need to ensure fargate profiles and self-managed node roles are added
################################################################################
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}
resource "null_resource" "patch" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = self.triggers.kubeconfig
}
command = self.triggers.cmd_patch
}
}
################################################################################
# Supporting Resources
################################################################################

View File

@@ -6,13 +6,13 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.72"
}
null = {
source = "hashicorp/null"
version = ">= 3.0"
}
tls = {
source = "hashicorp/tls"
version = ">= 2.2"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
}
}

View File

@@ -21,6 +21,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
## Providers

View File

@@ -6,5 +6,9 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.72"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
}
}

View File

@@ -27,7 +27,7 @@ Note that this example may create resources which cost money. Run `terraform des
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 2.2 |
## Providers
@@ -35,7 +35,6 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 2.2 |
## Modules
@@ -54,12 +53,10 @@ Note that this example may create resources which cost money. Run `terraform des
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs

View File

@@ -2,6 +2,18 @@ provider "aws" {
region = local.region
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.22"
@@ -46,6 +58,10 @@ module "eks" {
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
# Self managed node groups will not automatically create the aws-auth configmap so we need to
create_aws_auth_configmap = true
manage_aws_auth_configmap = true
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
@@ -291,62 +307,6 @@ module "eks" {
tags = local.tags
}
################################################################################
# aws-auth configmap
# Only EKS managed node groups automatically add roles to aws-auth configmap
# so we need to ensure fargate profiles and self-managed node roles are added
################################################################################
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}
resource "null_resource" "apply" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
cmd_patch = <<-EOT
kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = self.triggers.kubeconfig
}
command = self.triggers.cmd_patch
}
}
################################################################################
# Supporting Resources
################################################################################

View File

@@ -6,13 +6,13 @@ terraform {
source = "hashicorp/aws"
version = ">= 3.72"
}
null = {
source = "hashicorp/null"
version = ">= 3.0"
}
tls = {
source = "hashicorp/tls"
version = ">= 2.2"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
}
}

95
main.tf
View File

@@ -347,3 +347,98 @@ resource "aws_eks_identity_provider_config" "this" {
tags = var.tags
}
################################################################################
# aws-auth configmap
################################################################################
locals {
node_iam_role_arns_non_windows = compact(concat(
[for group in module.eks_managed_node_group : group.iam_role_arn],
[for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"],
var.aws_auth_node_iam_role_arns_non_windows,
))
node_iam_role_arns_windows = compact(concat(
[for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"],
var.aws_auth_node_iam_role_arns_windows,
))
fargate_profile_pod_execution_role_arns = compact(concat(
[for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn],
var.aws_auth_fargate_profile_pod_execution_role_arns,
))
aws_auth_configmap_data = {
mapRoles = yamlencode(concat(
[for role_arn in local.node_iam_role_arns_non_windows : {
rolearn = role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"system:bootstrappers",
"system:nodes",
]
}
],
[for role_arn in local.node_iam_role_arns_windows : {
rolearn = role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"eks:kube-proxy-windows",
"system:bootstrappers",
"system:nodes",
]
}
],
# Fargate profile
[for role_arn in local.fargate_profile_pod_execution_role_arns : {
rolearn = role_arn
username = "system:node:{{SessionName}}"
groups = [
"system:bootstrappers",
"system:nodes",
"system:node-proxier",
]
}
],
var.aws_auth_roles
))
mapUsers = yamlencode(var.aws_auth_users)
mapAccounts = yamlencode(var.aws_auth_accounts)
}
}
resource "kubernetes_config_map" "aws_auth" {
count = var.create && var.create_aws_auth_configmap ? 1 : 0
metadata {
name = "aws-auth"
namespace = "kube-system"
}
data = local.aws_auth_configmap_data
lifecycle {
# We are ignoring the data here since we will manage it with the resource below
# This is only intended to be used in scenarios where the configmap does not exist
ignore_changes = [data]
}
}
resource "kubernetes_config_map_v1_data" "aws_auth" {
count = var.create && var.manage_aws_auth_configmap ? 1 : 0
force = true
metadata {
name = "aws-auth"
namespace = "kube-system"
}
data = local.aws_auth_configmap_data
depends_on = [
# Required for instances where the configmap does not exist yet to avoid race condition
kubernetes_config_map.aws_auth,
]
}

View File

@@ -182,7 +182,7 @@ output "self_managed_node_groups_autoscaling_group_names" {
################################################################################
output "aws_auth_configmap_yaml" {
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
description = "[DEPRECATED - use `var.manage_aws_auth_configmap`] Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
value = templatefile("${path.module}/templates/aws_auth_cm.tpl",
{
eks_managed_role_arns = [for group in module.eks_managed_node_group : group.iam_role_arn]

View File

@@ -428,3 +428,55 @@ variable "putin_khuylo" {
type = bool
default = true
}
################################################################################
# aws-auth configmap
################################################################################
variable "manage_aws_auth_configmap" {
description = "Determines whether to manage the aws-auth configmap"
type = bool
default = false
}
variable "create_aws_auth_configmap" {
description = "Determines whether to create the aws-auth configmap. NOTE - this is only intended for scenarios where the confgimap does not exist (i.e. - when using only self-managed node groups). Most users should use `manage_aws_auth_configmap`"
type = bool
default = false
}
variable "aws_auth_node_iam_role_arns_non_windows" {
description = "List of non-Windows based node IAM role ARNs to add to the aws-auth configmap"
type = list(string)
default = []
}
variable "aws_auth_node_iam_role_arns_windows" {
description = "List of Windows based node IAM role ARNs to add to the aws-auth configmap"
type = list(string)
default = []
}
variable "aws_auth_fargate_profile_pod_execution_role_arns" {
description = "List of Fargate profile pod execution role ARNs to add to the aws-auth configmap"
type = list(string)
default = []
}
variable "aws_auth_roles" {
description = "List of role maps to add to the aws-auth configmap"
type = list(any)
default = []
}
variable "aws_auth_users" {
description = "List of user maps to add to the aws-auth configmap"
type = list(any)
default = []
}
variable "aws_auth_accounts" {
description = "List of account maps to add to the aws-auth configmap"
type = list(any)
default = []
}

View File

@@ -10,5 +10,9 @@ terraform {
source = "hashicorp/tls"
version = ">= 3.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
}
}