Use kubernetes provider to manage aws auth (#355)

This commit changes the way aws auth is managed. Before a local file
was used the generate the template and a null resource to apply it. This
is now switched to the terraform kubernetes provider.
This commit is contained in:
Stijn De Haes
2019-11-28 10:25:13 +01:00
committed by Max Williams
parent b69c8fb759
commit 9363662574
10 changed files with 108 additions and 82 deletions

View File

@@ -7,7 +7,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
## Next release ## Next release
## [[v7.?.?](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v6.1.0...HEAD)] - 2019-??-??] ## [[v7.?.?](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v7.0.0...HEAD)] - 2019-??-??]
### Added ### Added
@@ -16,6 +16,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
### Changed ### Changed
- Updated instance_profile_names and instance_profile_arns outputs to also consider launch template as well as asg (by @ankitwal) - Updated instance_profile_names and instance_profile_arns outputs to also consider launch template as well as asg (by @ankitwal)
- **Breaking:** Configure the aws-auth configmap using the terraform kubernetes providers. Read the [docs](docs/upgrading-to-aws-auth-kubernetes-provider.md) for more info (by @sdehaes)
- Updated application of `aws-auth` configmap to create `kube_config.yaml` and `aws_auth_configmap.yaml` in sequence (and not parallel) to `kubectl apply` (by @knittingdev) - Updated application of `aws-auth` configmap to create `kube_config.yaml` and `aws_auth_configmap.yaml` in sequence (and not parallel) to `kubectl apply` (by @knittingdev)
- Exit with error code when `aws-auth` configmap is unable to be updated (by @knittingdev) - Exit with error code when `aws-auth` configmap is unable to be updated (by @knittingdev)
- Fix deprecated interpolation-only expression (by @angelabad) - Fix deprecated interpolation-only expression (by @angelabad)

View File

@@ -18,9 +18,29 @@ Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aw
## Usage example ## Usage example
A full example leveraging other community modules is contained in the [examples/basic directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic). Here's the gist of using it via the Terraform registry: A full example leveraging other community modules is contained in the [examples/basic directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
Please do not forget to set the provider to the EKS cluster. This is needed to provision the aws_auth configmap in
kube-system. You can also use this provider to create your own kubernetes resources with the terraform kubernetes
provider.
Here's the gist of using it via the Terraform registry:
```hcl ```hcl
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.9"
}
module "my-cluster" { module "my-cluster" {
source = "terraform-aws-modules/eks/aws" source = "terraform-aws-modules/eks/aws"
cluster_name = "my-cluster" cluster_name = "my-cluster"

View File

@@ -1,40 +1,3 @@
resource "local_file" "config_map_aws_auth" {
count = var.write_aws_auth_config ? 1 : 0
content = data.template_file.config_map_aws_auth.rendered
filename = "${var.config_output_path}config-map-aws-auth_${var.cluster_name}.yaml"
}
resource "null_resource" "update_config_map_aws_auth" {
count = var.manage_aws_auth ? 1 : 0
depends_on = [aws_eks_cluster.this]
provisioner "local-exec" {
working_dir = path.module
command = <<EOS
completed_apply=0
for i in `seq 1 10`; do \
echo "${null_resource.update_config_map_aws_auth[0].triggers.kube_config_map_rendered}" > kube_config.yaml && \
echo "${null_resource.update_config_map_aws_auth[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml && \
kubectl apply -f aws_auth_configmap.yaml --kubeconfig kube_config.yaml && \
completed_apply=1 && break || \
sleep 10; \
done; \
rm aws_auth_configmap.yaml kube_config.yaml;
if [ "$completed_apply" = "0" ]; then exit 1; fi;
EOS
interpreter = var.local_exec_interpreter
}
triggers = {
kube_config_map_rendered = data.template_file.kubeconfig.rendered
config_map_rendered = data.template_file.config_map_aws_auth.rendered
endpoint = aws_eks_cluster.this.endpoint
}
}
data "aws_caller_identity" "current" { data "aws_caller_identity" "current" {
} }
@@ -79,21 +42,20 @@ data "template_file" "worker_role_arns" {
} }
} }
data "template_file" "config_map_aws_auth" { resource "kubernetes_config_map" "aws_auth" {
template = file("${path.module}/templates/config-map-aws-auth.yaml.tpl") count = var.manage_aws_auth ? 1 : 0
vars = { metadata {
worker_role_arn = join( name = "aws-auth"
"", namespace = "kube-system"
distinct( }
concat(
data.template_file.launch_template_worker_role_arns.*.rendered, data = {
data.template_file.worker_role_arns.*.rendered, mapRoles = <<EOF
), ${join("", distinct(concat(data.template_file.launch_template_worker_role_arns.*.rendered, data.template_file.worker_role_arns.*.rendered)))}
), ${yamlencode(var.map_roles)}
) EOF
map_users = yamlencode(var.map_users), mapUsers = yamlencode(var.map_users)
map_roles = yamlencode(var.map_roles), mapAccounts = yamlencode(var.map_accounts)
map_accounts = yamlencode(var.map_accounts)
} }
} }

View File

@@ -0,0 +1,14 @@
# Upgrading from version <= 7.x to 8.0.0
In version 8.0.0 the way the aws-auth config map in the kube-system namespaces is managed, has been changed.
Before this was managed via kubectl using a null resources. This was changed to be managed by the terraform kubernetes
provider.
To upgrade you have to add the kubernetes provider to the place you are calling the module. You can see examples in
the [examples](../examples) folder.
You also have to delete the aws-auth config map before doing an apply.
**This means you need to the apply with the same user/role that created the cluster.**
Since this user will be the only one with admin on the k8s cluster. After that the resource is managed trough the
terraform kubernetes provider.

View File

@@ -23,6 +23,22 @@ provider "template" {
version = "~> 2.1" version = "~> 2.1"
} }
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.10"
}
data "aws_availability_zones" "available" { data "aws_availability_zones" "available" {
} }

View File

@@ -23,6 +23,22 @@ provider "template" {
version = "~> 2.1" version = "~> 2.1"
} }
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.10"
}
data "aws_availability_zones" "available" { data "aws_availability_zones" "available" {
} }

View File

@@ -23,6 +23,22 @@ provider "template" {
version = "~> 2.1" version = "~> 2.1"
} }
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.10"
}
data "aws_availability_zones" "available" { data "aws_availability_zones" "available" {
} }

View File

@@ -30,7 +30,7 @@ output "cluster_security_group_id" {
output "config_map_aws_auth" { output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this EKS cluster." description = "A kubernetes configuration to authenticate to this EKS cluster."
value = data.template_file.config_map_aws_auth.rendered value = kubernetes_config_map.aws_auth.*
} }
output "cluster_iam_role_name" { output "cluster_iam_role_name" {

View File

@@ -1,19 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
${worker_role_arn}
%{if chomp(map_roles) != "[]" }
${indent(4, map_roles)}
%{ endif }
%{if chomp(map_users) != "[]" }
mapUsers: |
${indent(4, map_users)}
%{ endif }
%{if chomp(map_accounts) != "[]" }
mapAccounts: |
${indent(4, map_accounts)}
%{ endif }

View File

@@ -1,8 +1,8 @@
- rolearn: ${worker_role_arn} - rolearn: ${worker_role_arn}
username: system:node:{{EC2PrivateDNSName}} username: system:node:{{EC2PrivateDNSName}}
groups: groups:
- system:bootstrappers - system:bootstrappers
- system:nodes - system:nodes
%{~ if platform == "windows" ~} %{~ if platform == "windows" ~}
- eks:kube-proxy-windows - eks:kube-proxy-windows
%{~ endif ~} %{~ endif ~}