fix: Correct cluster access entry to create multiple policy associations per access entry (#2892)

This commit is contained in:
Bryant Biggs
2024-02-03 06:47:15 -05:00
committed by GitHub
parent a68aac6cf2
commit 417791374c
4 changed files with 104 additions and 10 deletions

View File

@@ -14,10 +14,12 @@ Terraform module which creates AWS EKS (Kubernetes) resources
- [Upgrade to v17.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-17.0.md)
- [Upgrade to v18.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-18.0.md)
- [Upgrade to v19.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md)
- [Upgrade to v20.x](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/UPGRADE-19.0.md)
### External Documentation
Please note that we strive to provide a comprehensive suite of documentation for __*configuring and utilizing the module(s)*__ defined here, and that documentation regarding EKS (including EKS managed node group, self managed node group, and Fargate profile) and/or Kubernetes features, usage, etc. are better left up to their respective sources:
- [AWS EKS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
- [Kubernetes Documentation](https://kubernetes.io/docs/home/)
@@ -72,6 +74,28 @@ module "eks" {
}
}
# Cluster access entry
# To add the current caller identity as an administrator
enable_cluster_creator_admin_permissions = true
access_entries = {
# One access entry with a policy associated
example = {
kubernetes_groups = []
principal_arn = "arn:aws:iam::123456789012:role/something"
policy_associations = {
example = {
policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
access_scope = {
namespaces = ["default"]
type = "namespace"
}
}
}
}
}
tags = {
Environment = "dev"
Terraform = "true"
@@ -79,6 +103,16 @@ module "eks" {
}
```
### Cluster Access Entry
When enabling `authentication_mode = "API_AND_CONFIG_MAP"`, EKS will automatically create an access entry for the IAM role(s) used by managed nodegroup(s) and Fargate profile(s). There are no additional actions required by users. For self-managed nodegroups and the Karpenter sub-module, this project automatically adds the access entry on behalf of users so there are no additional actions required by users.
On clusters that were created prior to CAM support, there will be an existing access entry for the cluster creator. This was previously not visible when using `aws-auth` ConfigMap, but will become visible when access entry is enabled.
### Bootstrap Cluster Creator Admin Permissions
Setting the `bootstrap_cluster_creator_admin_permissions` is a one time operation when the cluster is created; it cannot be modified later through the EKS API. In this project we are hardcoding this to `false`. If users wish to achieve the same functionality, we will do that through an access entry which can be enabled or disabled at any time of their choosing using the variable `enable_cluster_creator_admin_permissions`
## Examples
- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups

View File

@@ -55,6 +55,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role) | resource |
| [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |

View File

@@ -35,6 +35,8 @@ module "eks" {
cluster_ip_family = "ipv6"
create_cni_ipv6_iam_policy = true
enable_cluster_creator_admin_permissions = true
cluster_addons = {
coredns = {
most_recent = true
@@ -241,6 +243,46 @@ module "eks" {
}
}
access_entries = {
# One access entry with a policy associated
ex-single = {
kubernetes_groups = []
principal_arn = aws_iam_role.this["single"].arn
policy_associations = {
single = {
policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
access_scope = {
namespaces = ["default"]
type = "namespace"
}
}
}
}
# Example of adding multiple policies to a single access entry
ex-multiple = {
kubernetes_groups = []
principal_arn = aws_iam_role.this["multiple"].arn
policy_associations = {
ex-one = {
policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSEditPolicy"
access_scope = {
namespaces = ["default"]
type = "namespace"
}
}
ex-two = {
policy_arn = "arn:aws:eks::aws:cluster-access-policy/AmazonEKSViewPolicy"
access_scope = {
type = "cluster"
}
}
}
}
}
tags = local.tags
}
@@ -436,3 +478,26 @@ data "aws_ami" "eks_default_bottlerocket" {
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
}
}
resource "aws_iam_role" "this" {
for_each = toset(["single", "multiple"])
name = "ex-${each.key}"
# Just using for this example
assume_role_policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = "sts:AssumeRole"
Effect = "Allow"
Sid = "Example"
Principal = {
Service = "ec2.amazonaws.com"
}
},
]
})
tags = local.tags
}

14
main.tf
View File

@@ -168,28 +168,22 @@ locals {
for pol_key, pol_val in lookup(entry_val, "policy_associations", {}) :
merge(
{
principal_arn = entry_val.principal_arn
kubernetes_groups = lookup(entry_val, "kubernetes_groups", [])
tags = lookup(entry_val, "tags", {})
type = lookup(entry_val, "type", "STANDARD")
user_name = lookup(entry_val, "user_name", null)
principal_arn = entry_val.principal_arn
entry_key = entry_key
pol_key = pol_key
},
{ for k, v in {
association_policy_arn = pol_val.policy_arn
association_access_scope_type = pol_val.access_scope.type
association_access_scope_namespaces = lookup(pol_val.access_scope, "namespaces", [])
} : k => v if !contains(["EC2_LINUX", "EC2_WINDOWS", "FARGATE_LINUX"], lookup(entry_val, "type", "STANDARD")) },
{
entry_key = entry_key
pol_key = pol_key
}
)
]
])
}
resource "aws_eks_access_entry" "this" {
for_each = { for k, v in local.flattened_access_entries : "${v.entry_key}_${v.pol_key}" => v if local.create }
for_each = { for k, v in local.merged_access_entries : k => v if local.create }
cluster_name = aws_eks_cluster.this[0].name
kubernetes_groups = try(each.value.kubernetes_groups, [])