allow specifying an IAM role for each worker group (#137)

* allow creating an IAM role for each worker group

* moved change from 'changed' to 'added'

* create multiple roles not just profiles

* fix config_map_aws_auth generation

* don't duplicate worker-role templating

* specify ARNs for worker groups individually

todo fix aws_auth configmap

* fixed AWS auth

* fix aws_iam_instance_profile.workers name
fix iam_instance_profile fallback

* fix outputs

* fix iam_instance_profile calculation

* hopefully fix aws auth configmap generation

* manually fill out remainder of arn

* remove depends_on in worker_role_arns template file

this was causing resources to be recreated every time

* fmt

* fix typo, move iam_role_id default to defaults map
This commit is contained in:
Andrew Lavery
2018-09-24 07:08:35 -07:00
committed by Max Williams
parent b6f6a82352
commit b623bc234a
8 changed files with 27 additions and 12 deletions

View File

@@ -10,6 +10,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
### Added
- A useful addition (slam dunk, @self 🔥)
- Worker groups can be created with a specified IAM profile. (from @laverya)
### Changed

View File

@@ -133,8 +133,8 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| cluster_version | The Kubernetes server version for the EKS cluster. |
| config_map_aws_auth | A kubernetes configuration to authenticate to this EKS cluster. |
| kubeconfig | kubectl config file contents for this EKS cluster. |
| worker_iam_role_arn | IAM role ID attached to EKS workers |
| worker_iam_role_name | IAM role name attached to EKS workers |
| worker_iam_role_arn | default IAM role ARN for EKS worker groups |
| worker_iam_role_name | default IAM role name for EKS worker groups |
| worker_security_group_id | Security group ID attached to the EKS workers. |
| workers_asg_arns | IDs of the autoscaling groups containing workers. |
| workers_asg_names | Names of the autoscaling groups containing workers. |

View File

@@ -16,11 +16,22 @@ resource "null_resource" "update_config_map_aws_auth" {
count = "${var.manage_aws_auth ? 1 : 0}"
}
data "aws_caller_identity" "current" {}
data "template_file" "worker_role_arns" {
count = "${var.worker_group_count}"
template = "${file("${path.module}/templates/worker-role.tpl")}"
vars {
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(aws_iam_instance_profile.workers.*.role, count.index)}"
}
}
data "template_file" "config_map_aws_auth" {
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
vars {
worker_role_arn = "${aws_iam_role.workers.arn}"
worker_role_arn = "${join("", distinct(data.template_file.worker_role_arns.*.rendered))}"
map_users = "${join("", data.template_file.map_users.*.rendered)}"
map_roles = "${join("", data.template_file.map_roles.*.rendered)}"
map_accounts = "${join("", data.template_file.map_accounts.*.rendered)}"

View File

@@ -30,6 +30,7 @@ locals {
autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling.
additional_security_group_ids = "" # A comman delimited list of additional security group ids to include in worker launch config
protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
iam_role_id = "${aws_iam_role.workers.id}" # Use the specified IAM role if set.
}
workers_group_defaults = "${merge(local.workers_group_defaults_defaults, var.workers_group_defaults)}"

View File

@@ -55,11 +55,11 @@ output "worker_security_group_id" {
}
output "worker_iam_role_name" {
description = "IAM role name attached to EKS workers"
description = "default IAM role name for EKS worker groups"
value = "${aws_iam_role.workers.name}"
}
output "worker_iam_role_arn" {
description = "IAM role ID attached to EKS workers"
description = "default IAM role ARN for EKS worker groups"
value = "${aws_iam_role.workers.arn}"
}

View File

@@ -5,11 +5,7 @@ metadata:
namespace: kube-system
data:
mapRoles: |
- rolearn: ${worker_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
${worker_role_arn}
${map_roles}
mapUsers: |
${map_users}

View File

@@ -0,0 +1,5 @@
- rolearn: ${worker_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes

View File

@@ -26,7 +26,7 @@ resource "aws_launch_configuration" "workers" {
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
associate_public_ip_address = "${lookup(var.worker_groups[count.index], "public_ip", lookup(local.workers_group_defaults, "public_ip"))}"
security_groups = ["${local.worker_security_group_id}", "${var.worker_additional_security_group_ids}", "${compact(split(",",lookup(var.worker_groups[count.index],"additional_security_group_ids",lookup(local.workers_group_defaults, "additional_security_group_ids"))))}"]
iam_instance_profile = "${aws_iam_instance_profile.workers.id}"
iam_instance_profile = "${element(aws_iam_instance_profile.workers.*.id, count.index)}"
image_id = "${lookup(var.worker_groups[count.index], "ami_id", lookup(local.workers_group_defaults, "ami_id"))}"
instance_type = "${lookup(var.worker_groups[count.index], "instance_type", lookup(local.workers_group_defaults, "instance_type"))}"
key_name = "${lookup(var.worker_groups[count.index], "key_name", lookup(local.workers_group_defaults, "key_name"))}"
@@ -97,7 +97,8 @@ resource "aws_iam_role" "workers" {
resource "aws_iam_instance_profile" "workers" {
name_prefix = "${aws_eks_cluster.this.name}"
role = "${aws_iam_role.workers.name}"
role = "${lookup(var.worker_groups[count.index], "iam_role_id", lookup(local.workers_group_defaults, "iam_role_id"))}"
count = "${var.worker_group_count}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {