mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-01-16 16:47:20 +01:00
Upgrade to terraform 0.12 (#394)
* run terraform upgrade tool * fix post upgrade TODOs * use strict typing for variables * upgrade examples, point them at VPC module tf 0.12 PR * remove unnecessary `coalesce()` calls coalesce(lookup(map, key, ""), default) -> lookup(map, key, default) * Fix autoscaling_enabled broken (#1) * always set a value for tags, fix coalescelist calls * always set a value for these tags * fix tag value * fix tag value * default element available * added default value * added a general default without this default - TF is throwing an error when running a destroy * Fix CI * Change vpc module back to `terraform-aws-modules/vpc/aws` in example * Update CHANGELOG.md * Change type of variable `cluster_log_retention_in_days` to number * Remove `xx_count` variables * Actual lists instead of strings with commas * Remove `xx_count` variable from docs * Replace element with list indexing * Change variable `worker_group_tags` to a attribute of worker_group * Fix workers_launch_template_mixed tags * Change override_instance_type_x variables to list. * Update CHANGELOG.md
This commit is contained in:
@@ -23,7 +23,7 @@ install:
|
||||
- bundle install
|
||||
|
||||
before_script:
|
||||
- export TERRAFORM_VERSION=0.11.14
|
||||
- export TERRAFORM_VERSION=0.12.2
|
||||
- curl --silent --output terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip"
|
||||
- unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform
|
||||
- mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/
|
||||
|
||||
@@ -7,7 +7,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
## Next release
|
||||
|
||||
## [[v4.?.?](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v4.0.0...HEAD)] - 2019-06-??]
|
||||
## [[v5.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v4.0.2...HEAD)] - 2019-06-??]
|
||||
|
||||
### Added
|
||||
|
||||
@@ -18,6 +18,11 @@ project adheres to [Semantic Versioning](http://semver.org/).
|
||||
|
||||
### Changed
|
||||
|
||||
- Finally, Terraform 0.12 support, [Upgrade Guide](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/394) (by @alex-goncharov @nauxliu @timboven)
|
||||
- All the xx_count variables have been removed (by @nauxliu on behalf of RightCapital)
|
||||
- Use actual lists in the workers group maps instead of strings with commas (by @nauxliu on behalf of RightCapital)
|
||||
- Move variable `worker_group_tags` to workers group's attribute `tags` (by @nauxliu on behalf of RightCapital)
|
||||
- Change override instance_types to list (by @nauxliu on behalf of RightCapital)
|
||||
- Fix toggle for IAM instance profile creation for mixed launch templates (by @jnozo)
|
||||
|
||||
# History
|
||||
|
||||
13
README.md
13
README.md
@@ -32,6 +32,11 @@ module "my-cluster" {
|
||||
{
|
||||
instance_type = "m4.large"
|
||||
asg_max_size = 5
|
||||
tags = {
|
||||
key = "foo"
|
||||
value = "bar"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
@@ -130,11 +135,8 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
|
||||
| manage\_cluster\_iam\_resources | Whether to let the module manage cluster IAM resources. If set to false, cluster_iam_role_name must be specified. | string | `"true"` | no |
|
||||
| manage\_worker\_iam\_resources | Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers. | string | `"true"` | no |
|
||||
| map\_accounts | Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | list | `[]` | no |
|
||||
| map\_accounts\_count | The count of accounts in the map_accounts list. | string | `"0"` | no |
|
||||
| map\_roles | Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | list | `[]` | no |
|
||||
| map\_roles\_count | The count of roles in the map_roles list. | string | `"0"` | no |
|
||||
| map\_users | Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | list | `[]` | no |
|
||||
| map\_users\_count | The count of roles in the map_users list. | string | `"0"` | no |
|
||||
| permissions\_boundary | If provided, all IAM roles will be created with this permissions boundary attached. | string | `""` | no |
|
||||
| subnets | A list of subnets to place the EKS cluster and workers within. | list | n/a | yes |
|
||||
| tags | A map of tags to add to all resources. | map | `{}` | no |
|
||||
@@ -142,17 +144,12 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
|
||||
| worker\_additional\_security\_group\_ids | A list of additional security group ids to attach to worker instances | list | `[]` | no |
|
||||
| worker\_ami\_name\_filter | Additional name filter for AWS EKS worker AMI. Default behaviour will get latest for the cluster_version but could be set to a release from amazon-eks-ami, e.g. "v20190220" | string | `"v*"` | no |
|
||||
| worker\_create\_security\_group | Whether to create a security group for the workers or attach the workers to `worker_security_group_id`. | string | `"true"` | no |
|
||||
| worker\_group\_count | The number of maps contained within the worker_groups list. | string | `"1"` | no |
|
||||
| worker\_group\_launch\_template\_count | The number of maps contained within the worker_groups_launch_template list. | string | `"0"` | no |
|
||||
| worker\_group\_launch\_template\_mixed\_count | The number of maps contained within the worker_groups_launch_template_mixed list. | string | `"0"` | no |
|
||||
| worker\_group\_tags | A map defining extra tags to be applied to the worker group ASG. | map | `{ "default": [] }` | no |
|
||||
| worker\_groups | A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys. | list | `[ { "name": "default" } ]` | no |
|
||||
| worker\_groups\_launch\_template | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys. | list | `[ { "name": "default" } ]` | no |
|
||||
| worker\_groups\_launch\_template\_mixed | A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys. | list | `[ { "name": "default" } ]` | no |
|
||||
| worker\_security\_group\_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `""` | no |
|
||||
| worker\_sg\_ingress\_from\_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `"1025"` | no |
|
||||
| workers\_additional\_policies | Additional policies to be added to workers | list | `[]` | no |
|
||||
| workers\_additional\_policies\_count | | string | `"0"` | no |
|
||||
| workers\_group\_defaults | Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys. | map | `{}` | no |
|
||||
| write\_aws\_auth\_config | Whether to write the aws-auth configmap file. | string | `"true"` | no |
|
||||
| write\_kubeconfig | Whether to write a Kubectl config file containing the cluster configuration. Saved to `config_output_path`. | string | `"true"` | no |
|
||||
|
||||
131
aws_auth.tf
131
aws_auth.tf
@@ -1,103 +1,140 @@
|
||||
resource "local_file" "config_map_aws_auth" {
|
||||
count = "${var.write_aws_auth_config ? 1 : 0}"
|
||||
content = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
count = var.write_aws_auth_config ? 1 : 0
|
||||
content = data.template_file.config_map_aws_auth.rendered
|
||||
filename = "${var.config_output_path}config-map-aws-auth_${var.cluster_name}.yaml"
|
||||
}
|
||||
|
||||
resource "null_resource" "update_config_map_aws_auth" {
|
||||
count = "${var.manage_aws_auth ? 1 : 0}"
|
||||
depends_on = ["aws_eks_cluster.this"]
|
||||
count = var.manage_aws_auth ? 1 : 0
|
||||
depends_on = [aws_eks_cluster.this]
|
||||
|
||||
provisioner "local-exec" {
|
||||
working_dir = "${path.module}"
|
||||
working_dir = path.module
|
||||
|
||||
command = <<EOS
|
||||
for i in `seq 1 10`; do \
|
||||
echo "${null_resource.update_config_map_aws_auth.triggers.kube_config_map_rendered}" > kube_config.yaml & \
|
||||
echo "${null_resource.update_config_map_aws_auth.triggers.config_map_rendered}" > aws_auth_configmap.yaml & \
|
||||
echo "${null_resource.update_config_map_aws_auth[0].triggers.kube_config_map_rendered}" > kube_config.yaml & \
|
||||
echo "${null_resource.update_config_map_aws_auth[0].triggers.config_map_rendered}" > aws_auth_configmap.yaml & \
|
||||
kubectl apply -f aws_auth_configmap.yaml --kubeconfig kube_config.yaml && break || \
|
||||
sleep 10; \
|
||||
done; \
|
||||
rm aws_auth_configmap.yaml kube_config.yaml;
|
||||
EOS
|
||||
|
||||
interpreter = ["${var.local_exec_interpreter}"]
|
||||
|
||||
interpreter = var.local_exec_interpreter
|
||||
}
|
||||
|
||||
triggers {
|
||||
kube_config_map_rendered = "${data.template_file.kubeconfig.rendered}"
|
||||
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
triggers = {
|
||||
kube_config_map_rendered = data.template_file.kubeconfig.rendered
|
||||
config_map_rendered = data.template_file.config_map_aws_auth.rendered
|
||||
endpoint = aws_eks_cluster.this.endpoint
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
data "aws_caller_identity" "current" {
|
||||
}
|
||||
|
||||
data "template_file" "launch_template_mixed_worker_role_arns" {
|
||||
count = "${var.worker_group_launch_template_mixed_count}"
|
||||
template = "${file("${path.module}/templates/worker-role.tpl")}"
|
||||
count = local.worker_group_launch_template_mixed_count
|
||||
template = file("${path.module}/templates/worker-role.tpl")
|
||||
|
||||
vars {
|
||||
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(coalescelist(aws_iam_instance_profile.workers_launch_template_mixed.*.role, data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.role_name), count.index)}"
|
||||
vars = {
|
||||
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(
|
||||
coalescelist(
|
||||
aws_iam_instance_profile.workers_launch_template_mixed.*.role,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.role_name,
|
||||
),
|
||||
count.index,
|
||||
)}"
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "launch_template_worker_role_arns" {
|
||||
count = "${var.worker_group_launch_template_count}"
|
||||
template = "${file("${path.module}/templates/worker-role.tpl")}"
|
||||
count = local.worker_group_launch_template_count
|
||||
template = file("${path.module}/templates/worker-role.tpl")
|
||||
|
||||
vars {
|
||||
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(coalescelist(aws_iam_instance_profile.workers_launch_template.*.role, data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name), count.index)}"
|
||||
vars = {
|
||||
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(
|
||||
coalescelist(
|
||||
aws_iam_instance_profile.workers_launch_template.*.role,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
|
||||
),
|
||||
count.index,
|
||||
)}"
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "worker_role_arns" {
|
||||
count = "${var.worker_group_count}"
|
||||
template = "${file("${path.module}/templates/worker-role.tpl")}"
|
||||
count = local.worker_group_count
|
||||
template = file("${path.module}/templates/worker-role.tpl")
|
||||
|
||||
vars {
|
||||
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(coalescelist(aws_iam_instance_profile.workers.*.role, data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name), count.index)}"
|
||||
vars = {
|
||||
worker_role_arn = "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${element(
|
||||
coalescelist(
|
||||
aws_iam_instance_profile.workers.*.role,
|
||||
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
|
||||
[""]
|
||||
),
|
||||
count.index,
|
||||
)}"
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "config_map_aws_auth" {
|
||||
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
|
||||
template = file("${path.module}/templates/config-map-aws-auth.yaml.tpl")
|
||||
|
||||
vars {
|
||||
worker_role_arn = "${join("", distinct(concat(data.template_file.launch_template_worker_role_arns.*.rendered, data.template_file.worker_role_arns.*.rendered, data.template_file.launch_template_mixed_worker_role_arns.*.rendered)))}"
|
||||
map_users = "${join("", data.template_file.map_users.*.rendered)}"
|
||||
map_roles = "${join("", data.template_file.map_roles.*.rendered)}"
|
||||
map_accounts = "${join("", data.template_file.map_accounts.*.rendered)}"
|
||||
vars = {
|
||||
worker_role_arn = join(
|
||||
"",
|
||||
distinct(
|
||||
concat(
|
||||
data.template_file.launch_template_worker_role_arns.*.rendered,
|
||||
data.template_file.worker_role_arns.*.rendered,
|
||||
data.template_file.launch_template_mixed_worker_role_arns.*.rendered,
|
||||
),
|
||||
),
|
||||
)
|
||||
map_users = join("", data.template_file.map_users.*.rendered)
|
||||
map_roles = join("", data.template_file.map_roles.*.rendered)
|
||||
map_accounts = join("", data.template_file.map_accounts.*.rendered)
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "map_users" {
|
||||
count = "${var.map_users_count}"
|
||||
template = "${file("${path.module}/templates/config-map-aws-auth-map_users.yaml.tpl")}"
|
||||
count = length(var.map_users)
|
||||
template = file(
|
||||
"${path.module}/templates/config-map-aws-auth-map_users.yaml.tpl",
|
||||
)
|
||||
|
||||
vars {
|
||||
user_arn = "${lookup(var.map_users[count.index], "user_arn")}"
|
||||
username = "${lookup(var.map_users[count.index], "username")}"
|
||||
group = "${lookup(var.map_users[count.index], "group")}"
|
||||
vars = {
|
||||
user_arn = var.map_users[count.index]["user_arn"]
|
||||
username = var.map_users[count.index]["username"]
|
||||
group = var.map_users[count.index]["group"]
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "map_roles" {
|
||||
count = "${var.map_roles_count}"
|
||||
template = "${file("${path.module}/templates/config-map-aws-auth-map_roles.yaml.tpl")}"
|
||||
count = length(var.map_roles)
|
||||
template = file(
|
||||
"${path.module}/templates/config-map-aws-auth-map_roles.yaml.tpl",
|
||||
)
|
||||
|
||||
vars {
|
||||
role_arn = "${lookup(var.map_roles[count.index], "role_arn")}"
|
||||
username = "${lookup(var.map_roles[count.index], "username")}"
|
||||
group = "${lookup(var.map_roles[count.index], "group")}"
|
||||
vars = {
|
||||
role_arn = var.map_roles[count.index]["role_arn"]
|
||||
username = var.map_roles[count.index]["username"]
|
||||
group = var.map_roles[count.index]["group"]
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "map_accounts" {
|
||||
count = "${var.map_accounts_count}"
|
||||
template = "${file("${path.module}/templates/config-map-aws-auth-map_accounts.yaml.tpl")}"
|
||||
count = length(var.map_accounts)
|
||||
template = file(
|
||||
"${path.module}/templates/config-map-aws-auth-map_accounts.yaml.tpl",
|
||||
)
|
||||
|
||||
vars {
|
||||
account_number = "${element(var.map_accounts, count.index)}"
|
||||
vars = {
|
||||
account_number = var.map_accounts[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
74
cluster.tf
74
cluster.tf
@@ -1,48 +1,53 @@
|
||||
resource "aws_cloudwatch_log_group" "this" {
|
||||
name = "/aws/eks/${var.cluster_name}/cluster"
|
||||
retention_in_days = "${var.cluster_log_retention_in_days}"
|
||||
retention_in_days = var.cluster_log_retention_in_days
|
||||
|
||||
count = "${length(var.cluster_enabled_log_types) > 0 ? 1 : 0}"
|
||||
count = length(var.cluster_enabled_log_types) > 0 ? 1 : 0
|
||||
}
|
||||
|
||||
resource "aws_eks_cluster" "this" {
|
||||
name = "${var.cluster_name}"
|
||||
enabled_cluster_log_types = "${var.cluster_enabled_log_types}"
|
||||
role_arn = "${local.cluster_iam_role_arn}"
|
||||
version = "${var.cluster_version}"
|
||||
name = var.cluster_name
|
||||
enabled_cluster_log_types = var.cluster_enabled_log_types
|
||||
role_arn = local.cluster_iam_role_arn
|
||||
version = var.cluster_version
|
||||
|
||||
vpc_config {
|
||||
security_group_ids = ["${local.cluster_security_group_id}"]
|
||||
subnet_ids = ["${var.subnets}"]
|
||||
endpoint_private_access = "${var.cluster_endpoint_private_access}"
|
||||
endpoint_public_access = "${var.cluster_endpoint_public_access}"
|
||||
security_group_ids = [local.cluster_security_group_id]
|
||||
subnet_ids = var.subnets
|
||||
endpoint_private_access = var.cluster_endpoint_private_access
|
||||
endpoint_public_access = var.cluster_endpoint_public_access
|
||||
}
|
||||
|
||||
timeouts {
|
||||
create = "${var.cluster_create_timeout}"
|
||||
delete = "${var.cluster_delete_timeout}"
|
||||
create = var.cluster_create_timeout
|
||||
delete = var.cluster_delete_timeout
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
"aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy",
|
||||
"aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy",
|
||||
"aws_cloudwatch_log_group.this",
|
||||
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
|
||||
aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
|
||||
aws_cloudwatch_log_group.this
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_security_group" "cluster" {
|
||||
count = "${var.cluster_create_security_group ? 1 : 0}"
|
||||
name_prefix = "${var.cluster_name}"
|
||||
count = var.cluster_create_security_group ? 1 : 0
|
||||
name_prefix = var.cluster_name
|
||||
description = "EKS cluster security group."
|
||||
vpc_id = "${var.vpc_id}"
|
||||
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}"
|
||||
vpc_id = var.vpc_id
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
"Name" = "${var.cluster_name}-eks_cluster_sg"
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_egress_internet" {
|
||||
count = "${var.cluster_create_security_group ? 1 : 0}"
|
||||
count = var.cluster_create_security_group ? 1 : 0
|
||||
description = "Allow cluster egress access to the Internet."
|
||||
protocol = "-1"
|
||||
security_group_id = "${aws_security_group.cluster.id}"
|
||||
security_group_id = aws_security_group.cluster[0].id
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
@@ -50,34 +55,35 @@ resource "aws_security_group_rule" "cluster_egress_internet" {
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
|
||||
count = "${var.cluster_create_security_group ? 1 : 0}"
|
||||
count = var.cluster_create_security_group ? 1 : 0
|
||||
description = "Allow pods to communicate with the EKS cluster API."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.cluster.id}"
|
||||
source_security_group_id = "${local.worker_security_group_id}"
|
||||
security_group_id = aws_security_group.cluster[0].id
|
||||
source_security_group_id = local.worker_security_group_id
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "cluster" {
|
||||
count = "${var.manage_cluster_iam_resources ? 1 : 0}"
|
||||
name_prefix = "${var.cluster_name}"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}"
|
||||
permissions_boundary = "${var.permissions_boundary}"
|
||||
path = "${var.iam_path}"
|
||||
count = var.manage_cluster_iam_resources ? 1 : 0
|
||||
name_prefix = var.cluster_name
|
||||
assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
|
||||
permissions_boundary = var.permissions_boundary
|
||||
path = var.iam_path
|
||||
force_detach_policies = true
|
||||
tags = "${var.tags}"
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
|
||||
count = "${var.manage_cluster_iam_resources ? 1 : 0}"
|
||||
count = var.manage_cluster_iam_resources ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
|
||||
role = "${aws_iam_role.cluster.name}"
|
||||
role = aws_iam_role.cluster[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
|
||||
count = "${var.manage_cluster_iam_resources ? 1 : 0}"
|
||||
count = var.manage_cluster_iam_resources ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
|
||||
role = "${aws_iam_role.cluster.name}"
|
||||
role = aws_iam_role.cluster[0].name
|
||||
}
|
||||
|
||||
|
||||
181
data.tf
181
data.tf
@@ -1,4 +1,5 @@
|
||||
data "aws_region" "current" {}
|
||||
data "aws_region" "current" {
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "workers_assume_role_policy" {
|
||||
statement {
|
||||
@@ -43,95 +44,169 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
|
||||
}
|
||||
|
||||
data "template_file" "kubeconfig" {
|
||||
template = "${file("${path.module}/templates/kubeconfig.tpl")}"
|
||||
template = file("${path.module}/templates/kubeconfig.tpl")
|
||||
|
||||
vars {
|
||||
kubeconfig_name = "${local.kubeconfig_name}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
region = "${data.aws_region.current.name}"
|
||||
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
aws_authenticator_command = "${var.kubeconfig_aws_authenticator_command}"
|
||||
aws_authenticator_command_args = "${length(var.kubeconfig_aws_authenticator_command_args) > 0 ? " - ${join("\n - ", var.kubeconfig_aws_authenticator_command_args)}" : " - ${join("\n - ", formatlist("\"%s\"", list("token", "-i", aws_eks_cluster.this.name)))}"}"
|
||||
aws_authenticator_additional_args = "${length(var.kubeconfig_aws_authenticator_additional_args) > 0 ? " - ${join("\n - ", var.kubeconfig_aws_authenticator_additional_args)}" : ""}"
|
||||
aws_authenticator_env_variables = "${length(var.kubeconfig_aws_authenticator_env_variables) > 0 ? " env:\n${join("\n", data.template_file.aws_authenticator_env_variables.*.rendered)}" : ""}"
|
||||
vars = {
|
||||
kubeconfig_name = local.kubeconfig_name
|
||||
endpoint = aws_eks_cluster.this.endpoint
|
||||
region = data.aws_region.current.name
|
||||
cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data
|
||||
aws_authenticator_command = var.kubeconfig_aws_authenticator_command
|
||||
aws_authenticator_command_args = length(var.kubeconfig_aws_authenticator_command_args) > 0 ? " - ${join(
|
||||
"\n - ",
|
||||
var.kubeconfig_aws_authenticator_command_args,
|
||||
)}" : " - ${join(
|
||||
"\n - ",
|
||||
formatlist("\"%s\"", ["token", "-i", aws_eks_cluster.this.name]),
|
||||
)}"
|
||||
aws_authenticator_additional_args = length(var.kubeconfig_aws_authenticator_additional_args) > 0 ? " - ${join(
|
||||
"\n - ",
|
||||
var.kubeconfig_aws_authenticator_additional_args,
|
||||
)}" : ""
|
||||
aws_authenticator_env_variables = length(var.kubeconfig_aws_authenticator_env_variables) > 0 ? " env:\n${join(
|
||||
"\n",
|
||||
data.template_file.aws_authenticator_env_variables.*.rendered,
|
||||
)}" : ""
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "aws_authenticator_env_variables" {
|
||||
count = "${length(var.kubeconfig_aws_authenticator_env_variables)}"
|
||||
count = length(var.kubeconfig_aws_authenticator_env_variables)
|
||||
|
||||
template = <<EOF
|
||||
- name: $${key}
|
||||
value: $${value}
|
||||
EOF
|
||||
|
||||
vars {
|
||||
value = "${element(values(var.kubeconfig_aws_authenticator_env_variables), count.index)}"
|
||||
key = "${element(keys(var.kubeconfig_aws_authenticator_env_variables), count.index)}"
|
||||
|
||||
vars = {
|
||||
value = values(var.kubeconfig_aws_authenticator_env_variables)[count.index]
|
||||
key = keys(var.kubeconfig_aws_authenticator_env_variables)[count.index]
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "userdata" {
|
||||
count = "${var.worker_group_count}"
|
||||
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
|
||||
count = local.worker_group_count
|
||||
template = file("${path.module}/templates/userdata.sh.tpl")
|
||||
|
||||
vars {
|
||||
cluster_name = "${aws_eks_cluster.this.name}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
pre_userdata = "${lookup(var.worker_groups[count.index], "pre_userdata", local.workers_group_defaults["pre_userdata"])}"
|
||||
additional_userdata = "${lookup(var.worker_groups[count.index], "additional_userdata", local.workers_group_defaults["additional_userdata"])}"
|
||||
bootstrap_extra_args = "${lookup(var.worker_groups[count.index], "bootstrap_extra_args", local.workers_group_defaults["bootstrap_extra_args"])}"
|
||||
kubelet_extra_args = "${lookup(var.worker_groups[count.index], "kubelet_extra_args", local.workers_group_defaults["kubelet_extra_args"])}"
|
||||
vars = {
|
||||
cluster_name = aws_eks_cluster.this.name
|
||||
endpoint = aws_eks_cluster.this.endpoint
|
||||
cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data
|
||||
pre_userdata = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"pre_userdata",
|
||||
local.workers_group_defaults["pre_userdata"],
|
||||
)
|
||||
additional_userdata = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"additional_userdata",
|
||||
local.workers_group_defaults["additional_userdata"],
|
||||
)
|
||||
bootstrap_extra_args = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"bootstrap_extra_args",
|
||||
local.workers_group_defaults["bootstrap_extra_args"],
|
||||
)
|
||||
kubelet_extra_args = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"kubelet_extra_args",
|
||||
local.workers_group_defaults["kubelet_extra_args"],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "launch_template_userdata" {
|
||||
count = "${var.worker_group_launch_template_count}"
|
||||
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
|
||||
count = local.worker_group_launch_template_count
|
||||
template = file("${path.module}/templates/userdata.sh.tpl")
|
||||
|
||||
vars {
|
||||
cluster_name = "${aws_eks_cluster.this.name}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
pre_userdata = "${lookup(var.worker_groups_launch_template[count.index], "pre_userdata", local.workers_group_defaults["pre_userdata"])}"
|
||||
additional_userdata = "${lookup(var.worker_groups_launch_template[count.index], "additional_userdata", local.workers_group_defaults["additional_userdata"])}"
|
||||
bootstrap_extra_args = "${lookup(var.worker_groups_launch_template[count.index], "bootstrap_extra_args", local.workers_group_defaults["bootstrap_extra_args"])}"
|
||||
kubelet_extra_args = "${lookup(var.worker_groups_launch_template[count.index], "kubelet_extra_args", local.workers_group_defaults["kubelet_extra_args"])}"
|
||||
vars = {
|
||||
cluster_name = aws_eks_cluster.this.name
|
||||
endpoint = aws_eks_cluster.this.endpoint
|
||||
cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data
|
||||
pre_userdata = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"pre_userdata",
|
||||
local.workers_group_defaults["pre_userdata"],
|
||||
)
|
||||
additional_userdata = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"additional_userdata",
|
||||
local.workers_group_defaults["additional_userdata"],
|
||||
)
|
||||
bootstrap_extra_args = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"bootstrap_extra_args",
|
||||
local.workers_group_defaults["bootstrap_extra_args"],
|
||||
)
|
||||
kubelet_extra_args = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"kubelet_extra_args",
|
||||
local.workers_group_defaults["kubelet_extra_args"],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
data "template_file" "workers_launch_template_mixed" {
|
||||
count = "${var.worker_group_launch_template_mixed_count}"
|
||||
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
|
||||
count = local.worker_group_launch_template_mixed_count
|
||||
template = file("${path.module}/templates/userdata.sh.tpl")
|
||||
|
||||
vars {
|
||||
cluster_name = "${aws_eks_cluster.this.name}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
pre_userdata = "${lookup(var.worker_groups_launch_template_mixed[count.index], "pre_userdata", local.workers_group_defaults["pre_userdata"])}"
|
||||
additional_userdata = "${lookup(var.worker_groups_launch_template_mixed[count.index], "additional_userdata", local.workers_group_defaults["additional_userdata"])}"
|
||||
bootstrap_extra_args = "${lookup(var.worker_groups_launch_template_mixed[count.index], "bootstrap_extra_args", local.workers_group_defaults["bootstrap_extra_args"])}"
|
||||
kubelet_extra_args = "${lookup(var.worker_groups_launch_template_mixed[count.index], "kubelet_extra_args", local.workers_group_defaults["kubelet_extra_args"])}"
|
||||
vars = {
|
||||
cluster_name = aws_eks_cluster.this.name
|
||||
endpoint = aws_eks_cluster.this.endpoint
|
||||
cluster_auth_base64 = aws_eks_cluster.this.certificate_authority[0].data
|
||||
pre_userdata = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"pre_userdata",
|
||||
local.workers_group_defaults["pre_userdata"],
|
||||
)
|
||||
additional_userdata = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"additional_userdata",
|
||||
local.workers_group_defaults["additional_userdata"],
|
||||
)
|
||||
bootstrap_extra_args = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"bootstrap_extra_args",
|
||||
local.workers_group_defaults["bootstrap_extra_args"],
|
||||
)
|
||||
kubelet_extra_args = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"kubelet_extra_args",
|
||||
local.workers_group_defaults["kubelet_extra_args"],
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_iam_role" "custom_cluster_iam_role" {
|
||||
count = "${var.manage_cluster_iam_resources ? 0 : 1}"
|
||||
name = "${var.cluster_iam_role_name}"
|
||||
count = var.manage_cluster_iam_resources ? 0 : 1
|
||||
name = var.cluster_iam_role_name
|
||||
}
|
||||
|
||||
data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
|
||||
count = "${var.manage_worker_iam_resources ? 0 : var.worker_group_count}"
|
||||
name = "${lookup(var.worker_groups[count.index], "iam_instance_profile_name", local.workers_group_defaults["iam_instance_profile_name"])}"
|
||||
count = var.manage_worker_iam_resources ? 0 : local.worker_group_count
|
||||
name = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"iam_instance_profile_name",
|
||||
local.workers_group_defaults["iam_instance_profile_name"],
|
||||
)
|
||||
}
|
||||
|
||||
data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
|
||||
count = "${var.manage_worker_iam_resources ? 0 : var.worker_group_launch_template_count}"
|
||||
name = "${lookup(var.worker_groups_launch_template[count.index], "iam_instance_profile_name", local.workers_group_defaults["iam_instance_profile_name"])}"
|
||||
count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
|
||||
name = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"iam_instance_profile_name",
|
||||
local.workers_group_defaults["iam_instance_profile_name"],
|
||||
)
|
||||
}
|
||||
|
||||
data "aws_iam_instance_profile" "custom_worker_group_launch_template_mixed_iam_instance_profile" {
|
||||
count = "${var.manage_worker_iam_resources ? 0 : var.worker_group_launch_template_mixed_count}"
|
||||
name = "${lookup(var.worker_groups_launch_template_mixed[count.index], "iam_instance_profile_name", local.workers_group_defaults["iam_instance_profile_name"])}"
|
||||
count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_mixed_count
|
||||
name = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"iam_instance_profile_name",
|
||||
local.workers_group_defaults["iam_instance_profile_name"],
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -27,8 +27,6 @@ Notes:
|
||||
Example worker group configuration that uses an ASG with launch configuration for each worker group:
|
||||
|
||||
```hcl
|
||||
worker_group_count = 3
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
name = "on-demand-1"
|
||||
@@ -64,8 +62,6 @@ Example worker group configuration that uses an ASG with launch configuration fo
|
||||
Launch Template support is a recent addition to both AWS and this module. It might not be as tried and tested but it's more suitable for spot instances as it allowed multiple instance types in the same worker group:
|
||||
|
||||
```hcl
|
||||
worker_group_count = 1
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
name = "on-demand-1"
|
||||
@@ -77,15 +73,10 @@ Launch Template support is a recent addition to both AWS and this module. It mig
|
||||
}
|
||||
]
|
||||
|
||||
worker_group_launch_template_mixed_count = 1
|
||||
|
||||
worker_groups_launch_template_mixed = [
|
||||
{
|
||||
name = "spot-1"
|
||||
override_instance_type_1 = "m5.large"
|
||||
override_instance_type_2 = "c5.large"
|
||||
override_instance_type_3 = "t3.large"
|
||||
override_instance_type_4 = "r5.large"
|
||||
override_instance_types = ["m5.large", "c5.large", "t3.large", "r5.large"]
|
||||
spot_instance_pools = 3
|
||||
asg_max_size = 5
|
||||
asg_desired_size = 5
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
terraform {
|
||||
required_version = ">= 0.11.8"
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
version = ">= 2.6.0"
|
||||
region = "${var.region}"
|
||||
version = ">= 2.11"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
provider "random" {
|
||||
version = "= 1.3.1"
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
locals {
|
||||
cluster_name = "test-eks-${random_string.suffix.result}"
|
||||
@@ -24,7 +25,7 @@ resource "random_string" "suffix" {
|
||||
|
||||
resource "aws_security_group" "worker_group_mgmt_one" {
|
||||
name_prefix = "worker_group_mgmt_one"
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
@@ -39,7 +40,7 @@ resource "aws_security_group" "worker_group_mgmt_one" {
|
||||
|
||||
resource "aws_security_group" "worker_group_mgmt_two" {
|
||||
name_prefix = "worker_group_mgmt_two"
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
@@ -54,7 +55,7 @@ resource "aws_security_group" "worker_group_mgmt_two" {
|
||||
|
||||
resource "aws_security_group" "all_worker_mgmt" {
|
||||
name_prefix = "all_worker_management"
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
@@ -71,10 +72,11 @@ resource "aws_security_group" "all_worker_mgmt" {
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "1.60.0"
|
||||
version = "2.6.0"
|
||||
|
||||
name = "test-vpc"
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = ["${data.aws_availability_zones.available.names}"]
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
@@ -96,8 +98,8 @@ module "vpc" {
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
cluster_name = "${local.cluster_name}"
|
||||
subnets = ["${module.vpc.private_subnets}"]
|
||||
cluster_name = local.cluster_name
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
tags = {
|
||||
Environment = "test"
|
||||
@@ -105,8 +107,7 @@ module "eks" {
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
worker_group_count = 2
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
@@ -114,22 +115,20 @@ module "eks" {
|
||||
instance_type = "t2.small"
|
||||
additional_userdata = "echo foo bar"
|
||||
asg_desired_capacity = 2
|
||||
additional_security_group_ids = "${aws_security_group.worker_group_mgmt_one.id}"
|
||||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
|
||||
},
|
||||
{
|
||||
name = "worker-group-2"
|
||||
instance_type = "t2.medium"
|
||||
additional_userdata = "echo foo bar"
|
||||
additional_security_group_ids = "${aws_security_group.worker_group_mgmt_two.id}"
|
||||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
|
||||
asg_desired_capacity = 1
|
||||
},
|
||||
]
|
||||
|
||||
worker_additional_security_group_ids = ["${aws_security_group.all_worker_mgmt.id}"]
|
||||
map_roles = "${var.map_roles}"
|
||||
map_roles_count = "${var.map_roles_count}"
|
||||
map_users = "${var.map_users}"
|
||||
map_users_count = "${var.map_users_count}"
|
||||
map_accounts = "${var.map_accounts}"
|
||||
map_accounts_count = "${var.map_accounts_count}"
|
||||
worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
|
||||
map_roles = var.map_roles
|
||||
map_users = var.map_users
|
||||
map_accounts = var.map_accounts
|
||||
}
|
||||
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = "${module.eks.cluster_endpoint}"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = "${module.eks.cluster_security_group_id}"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = "${module.eks.kubeconfig}"
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = "${module.eks.config_map_aws_auth}"
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "region" {
|
||||
description = "AWS region."
|
||||
value = "${var.region}"
|
||||
value = var.region
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ variable "region" {
|
||||
|
||||
variable "map_accounts" {
|
||||
description = "Additional AWS account numbers to add to the aws-auth configmap."
|
||||
type = "list"
|
||||
type = list(string)
|
||||
|
||||
default = [
|
||||
"777777777777",
|
||||
@@ -12,15 +12,9 @@ variable "map_accounts" {
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_accounts_count" {
|
||||
description = "The count of accounts in the map_accounts list."
|
||||
type = "string"
|
||||
default = 2
|
||||
}
|
||||
|
||||
variable "map_roles" {
|
||||
description = "Additional IAM roles to add to the aws-auth configmap."
|
||||
type = "list"
|
||||
type = list(map(string))
|
||||
|
||||
default = [
|
||||
{
|
||||
@@ -31,15 +25,9 @@ variable "map_roles" {
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_roles_count" {
|
||||
description = "The count of roles in the map_roles list."
|
||||
type = "string"
|
||||
default = 1
|
||||
}
|
||||
|
||||
variable "map_users" {
|
||||
description = "Additional IAM users to add to the aws-auth configmap."
|
||||
type = "list"
|
||||
type = list(map(string))
|
||||
|
||||
default = [
|
||||
{
|
||||
@@ -54,9 +42,3 @@ variable "map_users" {
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_users_count" {
|
||||
description = "The count of roles in the map_users list."
|
||||
type = "string"
|
||||
default = 2
|
||||
}
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
terraform {
|
||||
required_version = ">= 0.11.8"
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
version = ">= 2.6.0"
|
||||
region = "${var.region}"
|
||||
version = ">= 2.11"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
provider "random" {
|
||||
version = "= 1.3.1"
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
locals {
|
||||
cluster_name = "test-eks-lt-${random_string.suffix.result}"
|
||||
@@ -24,10 +25,11 @@ resource "random_string" "suffix" {
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "1.60.0"
|
||||
version = "2.6.0"
|
||||
|
||||
name = "test-vpc-lt"
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = ["${data.aws_availability_zones.available.names}"]
|
||||
azs = data.aws_availability_zones.available.names
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
tags = {
|
||||
@@ -37,11 +39,9 @@ module "vpc" {
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
cluster_name = "${local.cluster_name}"
|
||||
subnets = ["${module.vpc.public_subnets}"]
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
worker_group_count = 0
|
||||
worker_group_launch_template_count = 2
|
||||
cluster_name = local.cluster_name
|
||||
subnets = module.vpc.public_subnets
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
@@ -58,3 +58,4 @@ module "eks" {
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = "${module.eks.cluster_endpoint}"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = "${module.eks.cluster_security_group_id}"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = "${module.eks.kubeconfig}"
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = "${module.eks.config_map_aws_auth}"
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "region" {
|
||||
description = "AWS region."
|
||||
value = "${var.region}"
|
||||
value = var.region
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
variable "region" {
|
||||
default = "us-west-2"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,17 +1,18 @@
|
||||
terraform {
|
||||
required_version = ">= 0.11.8"
|
||||
required_version = ">= 0.12.0"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
version = ">= 2.6.0"
|
||||
region = "${var.region}"
|
||||
version = ">= 2.11"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
provider "random" {
|
||||
version = "= 1.3.1"
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
locals {
|
||||
cluster_name = "test-eks-spot-${random_string.suffix.result}"
|
||||
@@ -24,10 +25,11 @@ resource "random_string" "suffix" {
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "1.60.0"
|
||||
version = "2.6.0"
|
||||
|
||||
name = "test-vpc-spot"
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = ["${data.aws_availability_zones.available.names}"]
|
||||
azs = data.aws_availability_zones.available.names
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
tags = {
|
||||
@@ -37,19 +39,14 @@ module "vpc" {
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
cluster_name = "${local.cluster_name}"
|
||||
subnets = ["${module.vpc.public_subnets}"]
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
worker_group_count = 0
|
||||
worker_group_launch_template_mixed_count = 1
|
||||
cluster_name = local.cluster_name
|
||||
subnets = module.vpc.public_subnets
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
worker_groups_launch_template_mixed = [
|
||||
{
|
||||
name = "spot-1"
|
||||
override_instance_type_1 = "m5.large"
|
||||
override_instance_type_2 = "c5.large"
|
||||
override_instance_type_3 = "t3.large"
|
||||
override_instance_type_4 = "r5.large"
|
||||
override_instance_types = ["m5.large", "c5.large", "t3.large", "r5.large"]
|
||||
spot_instance_pools = 4
|
||||
asg_max_size = 5
|
||||
asg_desired_capacity = 5
|
||||
@@ -58,3 +55,4 @@ module "eks" {
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -1,24 +1,25 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = "${module.eks.cluster_endpoint}"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = "${module.eks.cluster_security_group_id}"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = "${module.eks.kubeconfig}"
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = "${module.eks.config_map_aws_auth}"
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "region" {
|
||||
description = "AWS region."
|
||||
value = "${var.region}"
|
||||
value = var.region
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
variable "region" {
|
||||
default = "us-west-2"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
resource "local_file" "kubeconfig" {
|
||||
count = "${var.write_kubeconfig ? 1 : 0}"
|
||||
content = "${data.template_file.kubeconfig.rendered}"
|
||||
count = var.write_kubeconfig ? 1 : 0
|
||||
content = data.template_file.kubeconfig.rendered
|
||||
filename = "${var.config_output_path}kubeconfig_${var.cluster_name}"
|
||||
}
|
||||
|
||||
|
||||
136
local.tf
136
local.tf
@@ -1,74 +1,91 @@
|
||||
locals {
|
||||
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
|
||||
asg_tags = null_resource.tags_as_list_of_maps.*.triggers
|
||||
|
||||
# Followed recommendation http://67bricks.com/blog/?p=85
|
||||
# to workaround terraform not supporting short circut evaluation
|
||||
cluster_security_group_id = "${coalesce(join("", aws_security_group.cluster.*.id), var.cluster_security_group_id)}"
|
||||
cluster_security_group_id = coalesce(
|
||||
join("", aws_security_group.cluster.*.id),
|
||||
var.cluster_security_group_id,
|
||||
)
|
||||
|
||||
cluster_iam_role_name = "${coalesce(join("", aws_iam_role.cluster.*.name), var.cluster_iam_role_name)}"
|
||||
cluster_iam_role_arn = "${coalesce(join("", aws_iam_role.cluster.*.arn), join("", data.aws_iam_role.custom_cluster_iam_role.*.arn))}"
|
||||
cluster_iam_role_name = coalesce(
|
||||
join("", aws_iam_role.cluster.*.name),
|
||||
var.cluster_iam_role_name,
|
||||
"aws-eks"
|
||||
)
|
||||
cluster_iam_role_arn = coalesce(
|
||||
join("", aws_iam_role.cluster.*.arn),
|
||||
join("", data.aws_iam_role.custom_cluster_iam_role.*.arn),
|
||||
"aws-eks"
|
||||
)
|
||||
|
||||
worker_security_group_id = "${coalesce(join("", aws_security_group.workers.*.id), var.worker_security_group_id)}"
|
||||
default_iam_role_id = "${element(concat(aws_iam_role.workers.*.id, list("")), 0)}"
|
||||
kubeconfig_name = "${var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name}"
|
||||
worker_security_group_id = coalesce(
|
||||
join("", aws_security_group.workers.*.id),
|
||||
var.worker_security_group_id,
|
||||
)
|
||||
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
|
||||
kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name
|
||||
|
||||
worker_group_count = length(var.worker_groups)
|
||||
worker_group_launch_template_count = length(var.worker_groups_launch_template)
|
||||
worker_group_launch_template_mixed_count = length(var.worker_groups_launch_template_mixed)
|
||||
|
||||
workers_group_defaults_defaults = {
|
||||
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
|
||||
ami_id = "${data.aws_ami.eks_worker.id}" # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI.
|
||||
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group.
|
||||
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
|
||||
asg_min_size = "1" # Minimum worker capacity in the autoscaling group.
|
||||
asg_force_delete = false # Enable forced deletion for the autoscaling group.
|
||||
instance_type = "m4.large" # Size of the workers instances.
|
||||
spot_price = "" # Cost of spot instance.
|
||||
placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated".
|
||||
root_volume_size = "100" # root volume size of workers instances.
|
||||
root_volume_type = "gp2" # root volume type of workers instances, can be 'standard', 'gp2', or 'io1'
|
||||
root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
|
||||
key_name = "" # The key name that should be used for the instances in the autoscaling group
|
||||
pre_userdata = "" # userdata to pre-append to the default userdata.
|
||||
bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI.
|
||||
additional_userdata = "" # userdata to append to the default userdata.
|
||||
ebs_optimized = true # sets whether to use ebs optimization on supported types.
|
||||
enable_monitoring = true # Enables/disables detailed monitoring.
|
||||
public_ip = false # Associate a public ip address with a worker
|
||||
kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints.
|
||||
subnets = "${join(",", var.subnets)}" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789
|
||||
autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling.
|
||||
additional_security_group_ids = "" # A comma delimited list of additional security group ids to include in worker launch config
|
||||
protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
|
||||
iam_instance_profile_name = "" # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
|
||||
iam_role_id = "${local.default_iam_role_id}" # A custom IAM role id. Incompatible with iam_instance_profile_name.
|
||||
suspended_processes = "AZRebalance" # A comma delimited string of processes to to suspend. i.e. AZRebalance,HealthCheck,ReplaceUnhealthy
|
||||
target_group_arns = "" # A comma delimited list of ALB target group ARNs to be associated to the ASG
|
||||
enabled_metrics = "" # A comma delimited list of metrics to be collected i.e. GroupMinSize,GroupMaxSize,GroupDesiredCapacity
|
||||
placement_group = "" # The name of the placement group into which to launch the instances, if any.
|
||||
service_linked_role_arn = "" # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS
|
||||
termination_policies = "" # A comma delimited list of policies to decide how the instances in the auto scale group should be terminated.
|
||||
|
||||
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
|
||||
tags = [] # A list of map defining extra tags to be applied to the worker group ASG.
|
||||
ami_id = data.aws_ami.eks_worker.id # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI.
|
||||
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group.
|
||||
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
|
||||
asg_min_size = "1" # Minimum worker capacity in the autoscaling group.
|
||||
asg_force_delete = false # Enable forced deletion for the autoscaling group.
|
||||
instance_type = "m4.large" # Size of the workers instances.
|
||||
spot_price = "" # Cost of spot instance.
|
||||
placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated".
|
||||
root_volume_size = "100" # root volume size of workers instances.
|
||||
root_volume_type = "gp2" # root volume type of workers instances, can be 'standard', 'gp2', or 'io1'
|
||||
root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
|
||||
key_name = "" # The key name that should be used for the instances in the autoscaling group
|
||||
pre_userdata = "" # userdata to pre-append to the default userdata.
|
||||
bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI.
|
||||
additional_userdata = "" # userdata to append to the default userdata.
|
||||
ebs_optimized = true # sets whether to use ebs optimization on supported types.
|
||||
enable_monitoring = true # Enables/disables detailed monitoring.
|
||||
public_ip = false # Associate a public ip address with a worker
|
||||
kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints.
|
||||
subnets = var.subnets # A list of subnets to place the worker nodes in. i.e. ["subnet-123", "subnet-456", "subnet-789"]
|
||||
autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling.
|
||||
additional_security_group_ids = [] # A list of additional security group ids to include in worker launch config
|
||||
protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
|
||||
iam_instance_profile_name = "" # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
|
||||
iam_role_id = local.default_iam_role_id # A custom IAM role id. Incompatible with iam_instance_profile_name.
|
||||
suspended_processes = ["AZRebalance"] # A list of processes to to suspend. i.e. ["AZRebalance", "HealthCheck", "ReplaceUnhealthy"]
|
||||
target_group_arns = [] # A list of ALB target group ARNs to be associated to the ASG
|
||||
enabled_metrics = [] # A list of metrics to be collected i.e. ["GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity"]
|
||||
placement_group = "" # The name of the placement group into which to launch the instances, if any.
|
||||
service_linked_role_arn = "" # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS
|
||||
termination_policies = [] # A list of policies to decide how the instances in the auto scale group should be terminated.
|
||||
# Settings for launch templates
|
||||
root_block_device_name = "${data.aws_ami.eks_worker.root_device_name}" # Root device name for workers. If non is provided, will assume default AMI was used.
|
||||
root_kms_key_id = "" # The KMS key to use when encrypting the root storage device
|
||||
launch_template_version = "$Latest" # The lastest version of the launch template to use in the autoscaling group
|
||||
launch_template_placement_tenancy = "default" # The placement tenancy for instances
|
||||
launch_template_placement_group = "" # The name of the placement group into which to launch the instances, if any.
|
||||
root_encrypted = "" # Whether the volume should be encrypted or not
|
||||
eni_delete = true # Delete the ENI on termination (if set to false you will have to manually delete before destroying)
|
||||
|
||||
root_block_device_name = data.aws_ami.eks_worker.root_device_name # Root device name for workers. If non is provided, will assume default AMI was used.
|
||||
root_kms_key_id = "" # The KMS key to use when encrypting the root storage device
|
||||
launch_template_version = "$Latest" # The lastest version of the launch template to use in the autoscaling group
|
||||
launch_template_placement_tenancy = "default" # The placement tenancy for instances
|
||||
launch_template_placement_group = "" # The name of the placement group into which to launch the instances, if any.
|
||||
root_encrypted = "" # Whether the volume should be encrypted or not
|
||||
eni_delete = true # Delete the ENI on termination (if set to false you will have to manually delete before destroying)
|
||||
# Settings for launch templates with mixed instances policy
|
||||
override_instance_type_1 = "m5.large" # Override instance type 1 for mixed instances policy
|
||||
override_instance_type_2 = "c5.large" # Override instance type 2 for mixed instances policy
|
||||
override_instance_type_3 = "t3.large" # Override instance type 3 for mixed instances policy
|
||||
override_instance_type_4 = "r5.large" # Override instance type 4 for mixed instances policy
|
||||
on_demand_allocation_strategy = "prioritized" # Strategy to use when launching on-demand instances. Valid values: prioritized.
|
||||
on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
|
||||
on_demand_percentage_above_base_capacity = "0" # Percentage split between on-demand and Spot instances above the base on-demand capacity
|
||||
spot_allocation_strategy = "lowest-price" # The only valid value is lowest-price, which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify.
|
||||
spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
|
||||
spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
|
||||
override_instance_types = ["m5.large", "c5.large", "t3.large", "r5.large"] # A list of override instance types for mixed instances policy
|
||||
on_demand_allocation_strategy = "prioritized" # Strategy to use when launching on-demand instances. Valid values: prioritized.
|
||||
on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
|
||||
on_demand_percentage_above_base_capacity = "0" # Percentage split between on-demand and Spot instances above the base on-demand capacity
|
||||
spot_allocation_strategy = "lowest-price" # The only valid value is lowest-price, which is also the default value. The Auto Scaling group selects the cheapest Spot pools and evenly allocates your Spot capacity across the number of Spot pools that you specify.
|
||||
spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
|
||||
spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
|
||||
}
|
||||
|
||||
workers_group_defaults = "${merge(local.workers_group_defaults_defaults, var.workers_group_defaults)}"
|
||||
workers_group_defaults = merge(
|
||||
local.workers_group_defaults_defaults,
|
||||
var.workers_group_defaults,
|
||||
)
|
||||
|
||||
ebs_optimized = {
|
||||
"c1.medium" = false
|
||||
@@ -222,3 +239,4 @@ locals {
|
||||
"x1e.32xlarge" = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
70
outputs.tf
70
outputs.tf
@@ -1,114 +1,138 @@
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster."
|
||||
value = "${aws_eks_cluster.this.id}"
|
||||
value = aws_eks_cluster.this.id
|
||||
}
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster."
|
||||
value = "${aws_eks_cluster.this.arn}"
|
||||
value = aws_eks_cluster.this.arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster."
|
||||
value = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
value = aws_eks_cluster.this.certificate_authority[0].data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "The endpoint for your EKS Kubernetes API."
|
||||
value = "${aws_eks_cluster.this.endpoint}"
|
||||
value = aws_eks_cluster.this.endpoint
|
||||
}
|
||||
|
||||
output "cluster_version" {
|
||||
description = "The Kubernetes server version for the EKS cluster."
|
||||
value = "${aws_eks_cluster.this.version}"
|
||||
value = aws_eks_cluster.this.version
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ID attached to the EKS cluster."
|
||||
value = "${local.cluster_security_group_id}"
|
||||
value = local.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
value = data.template_file.config_map_aws_auth.rendered
|
||||
}
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster."
|
||||
value = "${local.cluster_iam_role_name}"
|
||||
value = local.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster."
|
||||
value = "${local.cluster_iam_role_arn}"
|
||||
value = local.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
description = "kubectl config file contents for this EKS cluster."
|
||||
value = "${data.template_file.kubeconfig.rendered}"
|
||||
value = data.template_file.kubeconfig.rendered
|
||||
}
|
||||
|
||||
output "kubeconfig_filename" {
|
||||
description = "The filename of the generated kubectl config."
|
||||
value = "${element(concat(local_file.kubeconfig.*.filename, list("")), 0)}"
|
||||
value = concat(local_file.kubeconfig.*.filename, [""])[0]
|
||||
}
|
||||
|
||||
output "workers_asg_arns" {
|
||||
description = "IDs of the autoscaling groups containing workers."
|
||||
value = "${concat(aws_autoscaling_group.workers.*.arn, aws_autoscaling_group.workers_launch_template.*.arn, aws_autoscaling_group.workers_launch_template_mixed.*.arn)}"
|
||||
value = concat(
|
||||
aws_autoscaling_group.workers.*.arn,
|
||||
aws_autoscaling_group.workers_launch_template.*.arn,
|
||||
aws_autoscaling_group.workers_launch_template_mixed.*.arn,
|
||||
)
|
||||
}
|
||||
|
||||
output "workers_asg_names" {
|
||||
description = "Names of the autoscaling groups containing workers."
|
||||
value = "${concat(aws_autoscaling_group.workers.*.id, aws_autoscaling_group.workers_launch_template.*.id, aws_autoscaling_group.workers_launch_template_mixed.*.id)}"
|
||||
value = concat(
|
||||
aws_autoscaling_group.workers.*.id,
|
||||
aws_autoscaling_group.workers_launch_template.*.id,
|
||||
aws_autoscaling_group.workers_launch_template_mixed.*.id,
|
||||
)
|
||||
}
|
||||
|
||||
output "workers_user_data" {
|
||||
description = "User data of worker groups"
|
||||
value = "${concat(data.template_file.userdata.*.rendered, data.template_file.launch_template_userdata.*.rendered)}"
|
||||
value = concat(
|
||||
data.template_file.userdata.*.rendered,
|
||||
data.template_file.launch_template_userdata.*.rendered,
|
||||
)
|
||||
}
|
||||
|
||||
output "workers_default_ami_id" {
|
||||
description = "ID of the default worker group AMI"
|
||||
value = "${data.aws_ami.eks_worker.id}"
|
||||
value = data.aws_ami.eks_worker.id
|
||||
}
|
||||
|
||||
output "workers_launch_template_ids" {
|
||||
description = "IDs of the worker launch templates."
|
||||
value = "${aws_launch_template.workers_launch_template.*.id}"
|
||||
value = aws_launch_template.workers_launch_template.*.id
|
||||
}
|
||||
|
||||
output "workers_launch_template_arns" {
|
||||
description = "ARNs of the worker launch templates."
|
||||
value = "${aws_launch_template.workers_launch_template.*.arn}"
|
||||
value = aws_launch_template.workers_launch_template.*.arn
|
||||
}
|
||||
|
||||
output "workers_launch_template_latest_versions" {
|
||||
description = "Latest versions of the worker launch templates."
|
||||
value = "${aws_launch_template.workers_launch_template.*.latest_version}"
|
||||
value = aws_launch_template.workers_launch_template.*.latest_version
|
||||
}
|
||||
|
||||
output "worker_security_group_id" {
|
||||
description = "Security group ID attached to the EKS workers."
|
||||
value = "${local.worker_security_group_id}"
|
||||
value = local.worker_security_group_id
|
||||
}
|
||||
|
||||
output "worker_iam_instance_profile_arns" {
|
||||
description = "default IAM instance profile ARN for EKS worker groups"
|
||||
value = "${aws_iam_instance_profile.workers.*.arn}"
|
||||
value = aws_iam_instance_profile.workers.*.arn
|
||||
}
|
||||
|
||||
output "worker_iam_instance_profile_names" {
|
||||
description = "default IAM instance profile name for EKS worker groups"
|
||||
value = "${aws_iam_instance_profile.workers.*.name}"
|
||||
value = aws_iam_instance_profile.workers.*.name
|
||||
}
|
||||
|
||||
output "worker_iam_role_name" {
|
||||
description = "default IAM role name for EKS worker groups"
|
||||
value = "${element(coalescelist(aws_iam_role.workers.*.name, data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name, data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name, data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.role_name), 0)}"
|
||||
value = coalescelist(
|
||||
aws_iam_role.workers.*.name,
|
||||
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.role_name,
|
||||
[""]
|
||||
)[0]
|
||||
}
|
||||
|
||||
output "worker_iam_role_arn" {
|
||||
description = "default IAM role ARN for EKS worker groups"
|
||||
value = "${element(coalescelist(aws_iam_role.workers.*.arn, data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_arn, data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_arn, data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.role_arn), 0)}"
|
||||
value = coalescelist(
|
||||
aws_iam_role.workers.*.arn,
|
||||
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_arn,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_arn,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.role_arn,
|
||||
[""]
|
||||
)[0]
|
||||
}
|
||||
|
||||
|
||||
131
variables.tf
131
variables.tf
@@ -1,35 +1,41 @@
|
||||
variable "cluster_enabled_log_types" {
|
||||
default = []
|
||||
description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "cluster_log_retention_in_days" {
|
||||
default = "90"
|
||||
default = 90
|
||||
description = "Number of days to retain log events. Default retention - 90 days."
|
||||
type = number
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster. Also used as a prefix in names of related resources."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "cluster_security_group_id" {
|
||||
description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32."
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_version" {
|
||||
description = "Kubernetes version to use for the EKS cluster."
|
||||
type = string
|
||||
default = "1.12"
|
||||
}
|
||||
|
||||
variable "config_output_path" {
|
||||
description = "Where to save the Kubectl config file (if `write_kubeconfig = true`). Should end in a forward slash `/` ."
|
||||
type = string
|
||||
default = "./"
|
||||
}
|
||||
|
||||
variable "write_kubeconfig" {
|
||||
description = "Whether to write a Kubectl config file containing the cluster configuration. Saved to `config_output_path`."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
@@ -40,242 +46,197 @@ variable "manage_aws_auth" {
|
||||
|
||||
variable "write_aws_auth_config" {
|
||||
description = "Whether to write the aws-auth configmap file."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "map_accounts" {
|
||||
description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format."
|
||||
type = "list"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "map_accounts_count" {
|
||||
description = "The count of accounts in the map_accounts list."
|
||||
type = "string"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "map_roles" {
|
||||
description = "Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format."
|
||||
type = "list"
|
||||
type = list(map(string))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "map_roles_count" {
|
||||
description = "The count of roles in the map_roles list."
|
||||
type = "string"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "map_users" {
|
||||
description = "Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format."
|
||||
type = "list"
|
||||
type = list(map(string))
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "map_users_count" {
|
||||
description = "The count of roles in the map_users list."
|
||||
type = "string"
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "subnets" {
|
||||
description = "A list of subnets to place the EKS cluster and workers within."
|
||||
type = "list"
|
||||
type = list(string)
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A map of tags to add to all resources."
|
||||
type = "map"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
description = "VPC where the cluster and workers will be deployed."
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "worker_groups" {
|
||||
description = "A list of maps defining worker group configurations to be defined using AWS Launch Configurations. See workers_group_defaults for valid keys."
|
||||
type = "list"
|
||||
|
||||
default = [
|
||||
{
|
||||
"name" = "default"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "worker_group_count" {
|
||||
description = "The number of maps contained within the worker_groups list."
|
||||
type = "string"
|
||||
default = "1"
|
||||
type = any
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "workers_group_defaults" {
|
||||
description = "Override default values for target groups. See workers_group_defaults_defaults in local.tf for valid keys."
|
||||
type = "map"
|
||||
type = any
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "worker_group_tags" {
|
||||
description = "A map defining extra tags to be applied to the worker group ASG."
|
||||
type = "map"
|
||||
|
||||
default = {
|
||||
default = []
|
||||
}
|
||||
}
|
||||
|
||||
variable "worker_groups_launch_template" {
|
||||
description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys."
|
||||
type = "list"
|
||||
|
||||
default = [
|
||||
{
|
||||
"name" = "default"
|
||||
},
|
||||
]
|
||||
type = any
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "worker_groups_launch_template_mixed" {
|
||||
description = "A list of maps defining worker group configurations to be defined using AWS Launch Templates. See workers_group_defaults for valid keys."
|
||||
type = "list"
|
||||
|
||||
default = [
|
||||
{
|
||||
"name" = "default"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "worker_group_launch_template_mixed_count" {
|
||||
description = "The number of maps contained within the worker_groups_launch_template_mixed list."
|
||||
type = "string"
|
||||
default = "0"
|
||||
}
|
||||
|
||||
variable "worker_group_launch_template_count" {
|
||||
description = "The number of maps contained within the worker_groups_launch_template list."
|
||||
type = "string"
|
||||
default = "0"
|
||||
type = any
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "worker_security_group_id" {
|
||||
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster."
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "worker_ami_name_filter" {
|
||||
description = "Additional name filter for AWS EKS worker AMI. Default behaviour will get latest for the cluster_version but could be set to a release from amazon-eks-ami, e.g. \"v20190220\""
|
||||
type = string
|
||||
default = "v*"
|
||||
}
|
||||
|
||||
variable "worker_additional_security_group_ids" {
|
||||
description = "A list of additional security group ids to attach to worker instances"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "worker_sg_ingress_from_port" {
|
||||
description = "Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443)."
|
||||
default = "1025"
|
||||
type = number
|
||||
default = 1025
|
||||
}
|
||||
|
||||
variable "workers_additional_policies" {
|
||||
description = "Additional policies to be added to workers"
|
||||
type = "list"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "workers_additional_policies_count" {
|
||||
default = 0
|
||||
}
|
||||
|
||||
variable "kubeconfig_aws_authenticator_command" {
|
||||
description = "Command to use to fetch AWS EKS credentials."
|
||||
type = string
|
||||
default = "aws-iam-authenticator"
|
||||
}
|
||||
|
||||
variable "kubeconfig_aws_authenticator_command_args" {
|
||||
description = "Default arguments passed to the authenticator command. Defaults to [token -i $cluster_name]."
|
||||
type = "list"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "kubeconfig_aws_authenticator_additional_args" {
|
||||
description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]."
|
||||
type = "list"
|
||||
type = list(string)
|
||||
default = []
|
||||
}
|
||||
|
||||
variable "kubeconfig_aws_authenticator_env_variables" {
|
||||
description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}."
|
||||
type = "map"
|
||||
type = map(string)
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "kubeconfig_name" {
|
||||
description = "Override the default name used for items kubeconfig."
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_create_timeout" {
|
||||
description = "Timeout value when creating the EKS cluster."
|
||||
type = string
|
||||
default = "15m"
|
||||
}
|
||||
|
||||
variable "cluster_delete_timeout" {
|
||||
description = "Timeout value when deleting the EKS cluster."
|
||||
type = string
|
||||
default = "15m"
|
||||
}
|
||||
|
||||
variable "local_exec_interpreter" {
|
||||
description = "Command to run for local-exec resources. Must be a shell-style interpreter. If you are on Windows Git Bash is a good choice."
|
||||
type = "list"
|
||||
type = list(string)
|
||||
default = ["/bin/sh", "-c"]
|
||||
}
|
||||
|
||||
variable "cluster_create_security_group" {
|
||||
description = "Whether to create a security group for the cluster or attach the cluster to `cluster_security_group_id`."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "worker_create_security_group" {
|
||||
description = "Whether to create a security group for the workers or attach the workers to `worker_security_group_id`."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "permissions_boundary" {
|
||||
description = "If provided, all IAM roles will be created with this permissions boundary attached."
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "iam_path" {
|
||||
description = "If provided, all IAM roles will be created on this path."
|
||||
type = string
|
||||
default = "/"
|
||||
}
|
||||
|
||||
variable "cluster_endpoint_private_access" {
|
||||
description = "Indicates whether or not the Amazon EKS private API server endpoint is enabled."
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "cluster_endpoint_public_access" {
|
||||
description = "Indicates whether or not the Amazon EKS public API server endpoint is enabled."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "manage_cluster_iam_resources" {
|
||||
description = "Whether to let the module manage cluster IAM resources. If set to false, cluster_iam_role_name must be specified."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "cluster_iam_role_name" {
|
||||
description = "IAM role name for the cluster. Only applicable if manage_cluster_iam_resources is set to false."
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "manage_worker_iam_resources" {
|
||||
description = "Whether to let the module manage worker IAM resources. If set to false, iam_instance_profile_name must be specified for workers."
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
|
||||
4
versions.tf
Normal file
4
versions.tf
Normal file
@@ -0,0 +1,4 @@
|
||||
|
||||
terraform {
|
||||
required_version = ">= 0.12"
|
||||
}
|
||||
331
workers.tf
331
workers.tf
@@ -1,59 +1,202 @@
|
||||
# Worker Groups using Launch Configurations
|
||||
|
||||
resource "aws_autoscaling_group" "workers" {
|
||||
count = "${var.worker_group_count}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
|
||||
desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", local.workers_group_defaults["asg_desired_capacity"])}"
|
||||
max_size = "${lookup(var.worker_groups[count.index], "asg_max_size", local.workers_group_defaults["asg_max_size"])}"
|
||||
min_size = "${lookup(var.worker_groups[count.index], "asg_min_size", local.workers_group_defaults["asg_min_size"])}"
|
||||
force_delete = "${lookup(var.worker_groups[count.index], "asg_force_delete", local.workers_group_defaults["asg_force_delete"])}"
|
||||
target_group_arns = ["${compact(split(",", coalesce(lookup(var.worker_groups[count.index], "target_group_arns", ""), local.workers_group_defaults["target_group_arns"])))}"]
|
||||
service_linked_role_arn = "${lookup(var.worker_groups[count.index], "service_linked_role_arn", local.workers_group_defaults["service_linked_role_arn"])}"
|
||||
launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}"
|
||||
vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups[count.index], "subnets", ""), local.workers_group_defaults["subnets"]))}"]
|
||||
protect_from_scale_in = "${lookup(var.worker_groups[count.index], "protect_from_scale_in", local.workers_group_defaults["protect_from_scale_in"])}"
|
||||
suspended_processes = ["${compact(split(",", coalesce(lookup(var.worker_groups[count.index], "suspended_processes", ""), local.workers_group_defaults["suspended_processes"])))}"]
|
||||
enabled_metrics = ["${compact(split(",", coalesce(lookup(var.worker_groups[count.index], "enabled_metrics", ""), local.workers_group_defaults["enabled_metrics"])))}"]
|
||||
placement_group = "${lookup(var.worker_groups[count.index], "placement_group", local.workers_group_defaults["placement_group"])}"
|
||||
termination_policies = ["${compact(split(",", coalesce(lookup(var.worker_groups[count.index], "termination_policies", ""), local.workers_group_defaults["termination_policies"])))}"]
|
||||
count = local.worker_group_count
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
|
||||
desired_capacity = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"asg_desired_capacity",
|
||||
local.workers_group_defaults["asg_desired_capacity"],
|
||||
)
|
||||
max_size = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"asg_max_size",
|
||||
local.workers_group_defaults["asg_max_size"],
|
||||
)
|
||||
min_size = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"asg_min_size",
|
||||
local.workers_group_defaults["asg_min_size"],
|
||||
)
|
||||
force_delete = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"asg_force_delete",
|
||||
local.workers_group_defaults["asg_force_delete"],
|
||||
)
|
||||
target_group_arns = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"target_group_arns",
|
||||
local.workers_group_defaults["target_group_arns"]
|
||||
)
|
||||
service_linked_role_arn = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"service_linked_role_arn",
|
||||
local.workers_group_defaults["service_linked_role_arn"],
|
||||
)
|
||||
launch_configuration = aws_launch_configuration.workers.*.id[count.index]
|
||||
vpc_zone_identifier = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"subnets",
|
||||
local.workers_group_defaults["subnets"]
|
||||
)
|
||||
protect_from_scale_in = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"protect_from_scale_in",
|
||||
local.workers_group_defaults["protect_from_scale_in"],
|
||||
)
|
||||
suspended_processes = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"suspended_processes",
|
||||
local.workers_group_defaults["suspended_processes"]
|
||||
)
|
||||
enabled_metrics = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"enabled_metrics",
|
||||
local.workers_group_defaults["enabled_metrics"]
|
||||
)
|
||||
placement_group = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"placement_group",
|
||||
local.workers_group_defaults["placement_group"],
|
||||
)
|
||||
termination_policies = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"termination_policies",
|
||||
local.workers_group_defaults["termination_policies"]
|
||||
)
|
||||
|
||||
tags = ["${concat(
|
||||
list(
|
||||
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
|
||||
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
|
||||
map("key", "k8s.io/cluster-autoscaler/${lookup(var.worker_groups[count.index], "autoscaling_enabled", local.workers_group_defaults["autoscaling_enabled"]) == 1 ? "enabled" : "disabled"}", "value", "true", "propagate_at_launch", false),
|
||||
map("key", "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}", "value", "", "propagate_at_launch", false),
|
||||
map("key", "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage", "value", "${lookup(var.worker_groups[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}Gi", "propagate_at_launch", false)
|
||||
),
|
||||
tags = concat(
|
||||
[
|
||||
{
|
||||
"key" = "Name"
|
||||
"value" = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg"
|
||||
"propagate_at_launch" = true
|
||||
},
|
||||
{
|
||||
"key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}"
|
||||
"value" = "owned"
|
||||
"propagate_at_launch" = true
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${lookup(
|
||||
var.worker_groups[count.index],
|
||||
"autoscaling_enabled",
|
||||
local.workers_group_defaults["autoscaling_enabled"],
|
||||
) ? "enabled" : "disabled"}"
|
||||
"value" = "true"
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}"
|
||||
"value" = aws_eks_cluster.this.name
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage"
|
||||
"value" = "${lookup(
|
||||
var.worker_groups[count.index],
|
||||
"root_volume_size",
|
||||
local.workers_group_defaults["root_volume_size"],
|
||||
)}Gi"
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
],
|
||||
local.asg_tags,
|
||||
var.worker_group_tags[contains(keys(var.worker_group_tags), "${lookup(var.worker_groups[count.index], "name", count.index)}") ? "${lookup(var.worker_groups[count.index], "name", count.index)}" : "default"])
|
||||
}"]
|
||||
lookup(
|
||||
var.worker_groups[count.index],
|
||||
"tags",
|
||||
local.workers_group_defaults["tags"]
|
||||
)
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = ["desired_capacity"]
|
||||
ignore_changes = [desired_capacity]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "workers" {
|
||||
count = "${var.worker_group_count}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
|
||||
associate_public_ip_address = "${lookup(var.worker_groups[count.index], "public_ip", local.workers_group_defaults["public_ip"])}"
|
||||
security_groups = ["${local.worker_security_group_id}", "${var.worker_additional_security_group_ids}", "${compact(split(",", lookup(var.worker_groups[count.index], "additional_security_group_ids", local.workers_group_defaults["additional_security_group_ids"])))}"]
|
||||
iam_instance_profile = "${element(coalescelist(aws_iam_instance_profile.workers.*.id, data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.name), count.index)}"
|
||||
image_id = "${lookup(var.worker_groups[count.index], "ami_id", local.workers_group_defaults["ami_id"])}"
|
||||
instance_type = "${lookup(var.worker_groups[count.index], "instance_type", local.workers_group_defaults["instance_type"])}"
|
||||
key_name = "${lookup(var.worker_groups[count.index], "key_name", local.workers_group_defaults["key_name"])}"
|
||||
user_data_base64 = "${base64encode(element(data.template_file.userdata.*.rendered, count.index))}"
|
||||
ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups[count.index], "instance_type", local.workers_group_defaults["instance_type"]), false))}"
|
||||
enable_monitoring = "${lookup(var.worker_groups[count.index], "enable_monitoring", local.workers_group_defaults["enable_monitoring"])}"
|
||||
spot_price = "${lookup(var.worker_groups[count.index], "spot_price", local.workers_group_defaults["spot_price"])}"
|
||||
placement_tenancy = "${lookup(var.worker_groups[count.index], "placement_tenancy", local.workers_group_defaults["placement_tenancy"])}"
|
||||
count = local.worker_group_count
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
|
||||
associate_public_ip_address = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"public_ip",
|
||||
local.workers_group_defaults["public_ip"],
|
||||
)
|
||||
security_groups = flatten([
|
||||
local.worker_security_group_id,
|
||||
var.worker_additional_security_group_ids,
|
||||
lookup(
|
||||
var.worker_groups[count.index],
|
||||
"additional_security_group_ids",
|
||||
local.workers_group_defaults["additional_security_group_ids"]
|
||||
)
|
||||
])
|
||||
iam_instance_profile = coalescelist(
|
||||
aws_iam_instance_profile.workers.*.id,
|
||||
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.name,
|
||||
)[count.index]
|
||||
image_id = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"ami_id",
|
||||
local.workers_group_defaults["ami_id"],
|
||||
)
|
||||
instance_type = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"instance_type",
|
||||
local.workers_group_defaults["instance_type"],
|
||||
)
|
||||
key_name = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"key_name",
|
||||
local.workers_group_defaults["key_name"],
|
||||
)
|
||||
user_data_base64 = base64encode(data.template_file.userdata.*.rendered[count.index])
|
||||
ebs_optimized = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"ebs_optimized",
|
||||
lookup(
|
||||
local.ebs_optimized,
|
||||
lookup(
|
||||
var.worker_groups[count.index],
|
||||
"instance_type",
|
||||
local.workers_group_defaults["instance_type"],
|
||||
),
|
||||
false,
|
||||
),
|
||||
)
|
||||
enable_monitoring = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"enable_monitoring",
|
||||
local.workers_group_defaults["enable_monitoring"],
|
||||
)
|
||||
spot_price = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"spot_price",
|
||||
local.workers_group_defaults["spot_price"],
|
||||
)
|
||||
placement_tenancy = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"placement_tenancy",
|
||||
local.workers_group_defaults["placement_tenancy"],
|
||||
)
|
||||
|
||||
root_block_device {
|
||||
volume_size = "${lookup(var.worker_groups[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}"
|
||||
volume_type = "${lookup(var.worker_groups[count.index], "root_volume_type", local.workers_group_defaults["root_volume_type"])}"
|
||||
iops = "${lookup(var.worker_groups[count.index], "root_iops", local.workers_group_defaults["root_iops"])}"
|
||||
volume_size = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"root_volume_size",
|
||||
local.workers_group_defaults["root_volume_size"],
|
||||
)
|
||||
volume_type = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"root_volume_type",
|
||||
local.workers_group_defaults["root_volume_type"],
|
||||
)
|
||||
iops = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"root_iops",
|
||||
local.workers_group_defaults["root_iops"],
|
||||
)
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
@@ -63,19 +206,24 @@ resource "aws_launch_configuration" "workers" {
|
||||
}
|
||||
|
||||
resource "aws_security_group" "workers" {
|
||||
count = "${var.worker_create_security_group ? 1 : 0}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}"
|
||||
count = var.worker_create_security_group ? 1 : 0
|
||||
name_prefix = aws_eks_cluster.this.name
|
||||
description = "Security group for all nodes in the cluster."
|
||||
vpc_id = "${var.vpc_id}"
|
||||
tags = "${merge(var.tags, map("Name", "${aws_eks_cluster.this.name}-eks_worker_sg", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "owned"
|
||||
))}"
|
||||
vpc_id = var.vpc_id
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
"Name" = "${aws_eks_cluster.this.name}-eks_worker_sg"
|
||||
"kubernetes.io/cluster/${aws_eks_cluster.this.name}" = "owned"
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_egress_internet" {
|
||||
count = "${var.worker_create_security_group ? 1 : 0}"
|
||||
count = var.worker_create_security_group ? 1 : 0
|
||||
description = "Allow nodes all egress to the Internet."
|
||||
protocol = "-1"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
security_group_id = aws_security_group.workers[0].id
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
@@ -83,112 +231,116 @@ resource "aws_security_group_rule" "workers_egress_internet" {
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_ingress_self" {
|
||||
count = "${var.worker_create_security_group ? 1 : 0}"
|
||||
count = var.worker_create_security_group ? 1 : 0
|
||||
description = "Allow node to communicate with each other."
|
||||
protocol = "-1"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
source_security_group_id = "${aws_security_group.workers.id}"
|
||||
security_group_id = aws_security_group.workers[0].id
|
||||
source_security_group_id = aws_security_group.workers[0].id
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_ingress_cluster" {
|
||||
count = "${var.worker_create_security_group ? 1 : 0}"
|
||||
count = var.worker_create_security_group ? 1 : 0
|
||||
description = "Allow workers pods to receive communication from the cluster control plane."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
source_security_group_id = "${local.cluster_security_group_id}"
|
||||
from_port = "${var.worker_sg_ingress_from_port}"
|
||||
security_group_id = aws_security_group.workers[0].id
|
||||
source_security_group_id = local.cluster_security_group_id
|
||||
from_port = var.worker_sg_ingress_from_port
|
||||
to_port = 65535
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
|
||||
count = "${var.worker_create_security_group ? (var.worker_sg_ingress_from_port > 10250 ? 1 : 0) : 0}"
|
||||
count = var.worker_create_security_group ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0
|
||||
description = "Allow workers Kubelets to receive communication from the cluster control plane."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
source_security_group_id = "${local.cluster_security_group_id}"
|
||||
security_group_id = aws_security_group.workers[0].id
|
||||
source_security_group_id = local.cluster_security_group_id
|
||||
from_port = 10250
|
||||
to_port = 10250
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_ingress_cluster_https" {
|
||||
count = "${var.worker_create_security_group ? 1 : 0}"
|
||||
count = var.worker_create_security_group ? 1 : 0
|
||||
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
source_security_group_id = "${local.cluster_security_group_id}"
|
||||
security_group_id = aws_security_group.workers[0].id
|
||||
source_security_group_id = local.cluster_security_group_id
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "ingress"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "workers" {
|
||||
count = "${var.manage_worker_iam_resources ? 1 : 0}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}"
|
||||
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
|
||||
permissions_boundary = "${var.permissions_boundary}"
|
||||
path = "${var.iam_path}"
|
||||
count = var.manage_worker_iam_resources ? 1 : 0
|
||||
name_prefix = aws_eks_cluster.this.name
|
||||
assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
|
||||
permissions_boundary = var.permissions_boundary
|
||||
path = var.iam_path
|
||||
force_detach_policies = true
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "workers" {
|
||||
count = "${var.manage_worker_iam_resources ? var.worker_group_count : 0}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}"
|
||||
role = "${lookup(var.worker_groups[count.index], "iam_role_id", lookup(local.workers_group_defaults, "iam_role_id"))}"
|
||||
count = var.manage_worker_iam_resources ? local.worker_group_count : 0
|
||||
name_prefix = aws_eks_cluster.this.name
|
||||
role = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"iam_role_id",
|
||||
local.workers_group_defaults["iam_role_id"],
|
||||
)
|
||||
|
||||
path = "${var.iam_path}"
|
||||
path = var.iam_path
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
|
||||
count = "${var.manage_worker_iam_resources ? 1 : 0}"
|
||||
count = var.manage_worker_iam_resources ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
role = aws_iam_role.workers[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
|
||||
count = "${var.manage_worker_iam_resources ? 1 : 0}"
|
||||
count = var.manage_worker_iam_resources ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
role = aws_iam_role.workers[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
|
||||
count = "${var.manage_worker_iam_resources ? 1 : 0}"
|
||||
count = var.manage_worker_iam_resources ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
role = aws_iam_role.workers[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
|
||||
count = "${var.manage_worker_iam_resources ? var.workers_additional_policies_count : 0}"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
policy_arn = "${var.workers_additional_policies[count.index]}"
|
||||
count = var.manage_worker_iam_resources ? length(var.workers_additional_policies) : 0
|
||||
role = aws_iam_role.workers[0].name
|
||||
policy_arn = var.workers_additional_policies[count.index]
|
||||
}
|
||||
|
||||
resource "null_resource" "tags_as_list_of_maps" {
|
||||
count = "${length(keys(var.tags))}"
|
||||
count = length(keys(var.tags))
|
||||
|
||||
triggers = {
|
||||
key = "${element(keys(var.tags), count.index)}"
|
||||
value = "${element(values(var.tags), count.index)}"
|
||||
key = keys(var.tags)[count.index]
|
||||
value = values(var.tags)[count.index]
|
||||
propagate_at_launch = "true"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
|
||||
count = "${var.manage_worker_iam_resources ? 1 : 0}"
|
||||
policy_arn = "${aws_iam_policy.worker_autoscaling.arn}"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
count = var.manage_worker_iam_resources ? 1 : 0
|
||||
policy_arn = aws_iam_policy.worker_autoscaling[0].arn
|
||||
role = aws_iam_role.workers[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "worker_autoscaling" {
|
||||
count = "${var.manage_worker_iam_resources ? 1 : 0}"
|
||||
count = var.manage_worker_iam_resources ? 1 : 0
|
||||
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}"
|
||||
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}"
|
||||
policy = "${data.aws_iam_policy_document.worker_autoscaling.json}"
|
||||
path = "${var.iam_path}"
|
||||
policy = data.aws_iam_policy_document.worker_autoscaling.json
|
||||
path = var.iam_path
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "worker_autoscaling" {
|
||||
@@ -232,3 +384,4 @@ data "aws_iam_policy_document" "worker_autoscaling" {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,92 +1,264 @@
|
||||
# Worker Groups using Launch Templates
|
||||
|
||||
resource "aws_autoscaling_group" "workers_launch_template" {
|
||||
count = "${var.worker_group_launch_template_count}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups_launch_template[count.index], "name", count.index)}"
|
||||
desired_capacity = "${lookup(var.worker_groups_launch_template[count.index], "asg_desired_capacity", local.workers_group_defaults["asg_desired_capacity"])}"
|
||||
max_size = "${lookup(var.worker_groups_launch_template[count.index], "asg_max_size", local.workers_group_defaults["asg_max_size"])}"
|
||||
min_size = "${lookup(var.worker_groups_launch_template[count.index], "asg_min_size", local.workers_group_defaults["asg_min_size"])}"
|
||||
force_delete = "${lookup(var.worker_groups_launch_template[count.index], "asg_force_delete", local.workers_group_defaults["asg_force_delete"])}"
|
||||
target_group_arns = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template[count.index], "target_group_arns", ""), local.workers_group_defaults["target_group_arns"])))}"]
|
||||
service_linked_role_arn = "${lookup(var.worker_groups_launch_template[count.index], "service_linked_role_arn", local.workers_group_defaults["service_linked_role_arn"])}"
|
||||
vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups_launch_template[count.index], "subnets", ""), local.workers_group_defaults["subnets"]))}"]
|
||||
protect_from_scale_in = "${lookup(var.worker_groups_launch_template[count.index], "protect_from_scale_in", local.workers_group_defaults["protect_from_scale_in"])}"
|
||||
suspended_processes = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template[count.index], "suspended_processes", ""), local.workers_group_defaults["suspended_processes"])))}"]
|
||||
enabled_metrics = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template[count.index], "enabled_metrics", ""), local.workers_group_defaults["enabled_metrics"])))}"]
|
||||
placement_group = "${lookup(var.worker_groups_launch_template[count.index], "placement_group", local.workers_group_defaults["placement_group"])}"
|
||||
termination_policies = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template[count.index], "termination_policies", ""), local.workers_group_defaults["termination_policies"])))}"]
|
||||
count = local.worker_group_launch_template_count
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"name",
|
||||
count.index,
|
||||
)}"
|
||||
desired_capacity = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"asg_desired_capacity",
|
||||
local.workers_group_defaults["asg_desired_capacity"],
|
||||
)
|
||||
max_size = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"asg_max_size",
|
||||
local.workers_group_defaults["asg_max_size"],
|
||||
)
|
||||
min_size = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"asg_min_size",
|
||||
local.workers_group_defaults["asg_min_size"],
|
||||
)
|
||||
force_delete = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"asg_force_delete",
|
||||
local.workers_group_defaults["asg_force_delete"],
|
||||
)
|
||||
target_group_arns = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"target_group_arns",
|
||||
local.workers_group_defaults["target_group_arns"]
|
||||
)
|
||||
service_linked_role_arn = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"service_linked_role_arn",
|
||||
local.workers_group_defaults["service_linked_role_arn"],
|
||||
)
|
||||
vpc_zone_identifier = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"subnets",
|
||||
local.workers_group_defaults["subnets"]
|
||||
)
|
||||
protect_from_scale_in = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"protect_from_scale_in",
|
||||
local.workers_group_defaults["protect_from_scale_in"],
|
||||
)
|
||||
suspended_processes = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"suspended_processes",
|
||||
local.workers_group_defaults["suspended_processes"]
|
||||
)
|
||||
enabled_metrics = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"enabled_metrics",
|
||||
local.workers_group_defaults["enabled_metrics"]
|
||||
)
|
||||
placement_group = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"placement_group",
|
||||
local.workers_group_defaults["placement_group"],
|
||||
)
|
||||
termination_policies = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"termination_policies",
|
||||
local.workers_group_defaults["termination_policies"]
|
||||
)
|
||||
|
||||
launch_template {
|
||||
id = "${element(aws_launch_template.workers_launch_template.*.id, count.index)}"
|
||||
version = "${lookup(var.worker_groups_launch_template[count.index], "launch_template_version", local.workers_group_defaults["launch_template_version"])}"
|
||||
id = aws_launch_template.workers_launch_template.*.id[count.index]
|
||||
version = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"launch_template_version",
|
||||
local.workers_group_defaults["launch_template_version"],
|
||||
)
|
||||
}
|
||||
|
||||
tags = ["${concat(
|
||||
list(
|
||||
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups_launch_template[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
|
||||
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
|
||||
map("key", "k8s.io/cluster-autoscaler/${lookup(var.worker_groups_launch_template[count.index], "autoscaling_enabled", local.workers_group_defaults["autoscaling_enabled"]) == 1 ? "enabled" : "disabled"}", "value", "true", "propagate_at_launch", false),
|
||||
map("key", "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}", "value", "", "propagate_at_launch", false),
|
||||
map("key", "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage", "value", "${lookup(var.worker_groups_launch_template[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}Gi", "propagate_at_launch", false)
|
||||
),
|
||||
tags = concat(
|
||||
[
|
||||
{
|
||||
"key" = "Name"
|
||||
"value" = "${aws_eks_cluster.this.name}-${lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"name",
|
||||
count.index,
|
||||
)}-eks_asg"
|
||||
"propagate_at_launch" = true
|
||||
},
|
||||
{
|
||||
"key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}"
|
||||
"value" = "owned"
|
||||
"propagate_at_launch" = true
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"autoscaling_enabled",
|
||||
local.workers_group_defaults["autoscaling_enabled"],
|
||||
) ? "enabled" : "disabled"}"
|
||||
"value" = "true"
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}"
|
||||
"value" = aws_eks_cluster.this.name
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage"
|
||||
"value" = "${lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_volume_size",
|
||||
local.workers_group_defaults["root_volume_size"],
|
||||
)}Gi"
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
],
|
||||
local.asg_tags,
|
||||
var.worker_group_tags[contains(keys(var.worker_group_tags), "${lookup(var.worker_groups_launch_template[count.index], "name", count.index)}") ? "${lookup(var.worker_groups_launch_template[count.index], "name", count.index)}" : "default"])
|
||||
}"]
|
||||
lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"tags",
|
||||
local.workers_group_defaults["tags"]
|
||||
)
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = ["desired_capacity"]
|
||||
ignore_changes = [desired_capacity]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_launch_template" "workers_launch_template" {
|
||||
count = "${var.worker_group_launch_template_count}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups_launch_template[count.index], "name", count.index)}"
|
||||
count = local.worker_group_launch_template_count
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"name",
|
||||
count.index,
|
||||
)}"
|
||||
|
||||
network_interfaces {
|
||||
associate_public_ip_address = "${lookup(var.worker_groups_launch_template[count.index], "public_ip", local.workers_group_defaults["public_ip"])}"
|
||||
delete_on_termination = "${lookup(var.worker_groups_launch_template[count.index], "eni_delete", local.workers_group_defaults["eni_delete"])}"
|
||||
|
||||
security_groups = [
|
||||
"${local.worker_security_group_id}",
|
||||
"${var.worker_additional_security_group_ids}",
|
||||
"${compact(split(",", lookup(var.worker_groups_launch_template[count.index], "additional_security_group_ids", local.workers_group_defaults["additional_security_group_ids"])))}",
|
||||
]
|
||||
associate_public_ip_address = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"public_ip",
|
||||
local.workers_group_defaults["public_ip"],
|
||||
)
|
||||
delete_on_termination = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"eni_delete",
|
||||
local.workers_group_defaults["eni_delete"],
|
||||
)
|
||||
security_groups = flatten([
|
||||
local.worker_security_group_id,
|
||||
var.worker_additional_security_group_ids,
|
||||
lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"additional_security_group_ids",
|
||||
local.workers_group_defaults["additional_security_group_ids"],
|
||||
),
|
||||
])
|
||||
}
|
||||
|
||||
iam_instance_profile {
|
||||
name = "${element(coalescelist(aws_iam_instance_profile.workers_launch_template.*.name, data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.name), count.index)}"
|
||||
name = coalescelist(
|
||||
aws_iam_instance_profile.workers_launch_template.*.name,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.name,
|
||||
)[count.index]
|
||||
}
|
||||
|
||||
image_id = "${lookup(var.worker_groups_launch_template[count.index], "ami_id", local.workers_group_defaults["ami_id"])}"
|
||||
instance_type = "${lookup(var.worker_groups_launch_template[count.index], "instance_type", local.workers_group_defaults["instance_type"])}"
|
||||
key_name = "${lookup(var.worker_groups_launch_template[count.index], "key_name", local.workers_group_defaults["key_name"])}"
|
||||
user_data = "${base64encode(element(data.template_file.launch_template_userdata.*.rendered, count.index))}"
|
||||
ebs_optimized = "${lookup(var.worker_groups_launch_template[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups_launch_template[count.index], "instance_type", local.workers_group_defaults["instance_type"]), false))}"
|
||||
image_id = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"ami_id",
|
||||
local.workers_group_defaults["ami_id"],
|
||||
)
|
||||
instance_type = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"instance_type",
|
||||
local.workers_group_defaults["instance_type"],
|
||||
)
|
||||
key_name = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"key_name",
|
||||
local.workers_group_defaults["key_name"],
|
||||
)
|
||||
user_data = base64encode(
|
||||
data.template_file.launch_template_userdata.*.rendered[count.index],
|
||||
)
|
||||
ebs_optimized = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"ebs_optimized",
|
||||
lookup(
|
||||
local.ebs_optimized,
|
||||
lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"instance_type",
|
||||
local.workers_group_defaults["instance_type"],
|
||||
),
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
monitoring {
|
||||
enabled = "${lookup(var.worker_groups_launch_template[count.index], "enable_monitoring", local.workers_group_defaults["enable_monitoring"])}"
|
||||
enabled = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"enable_monitoring",
|
||||
local.workers_group_defaults["enable_monitoring"],
|
||||
)
|
||||
}
|
||||
|
||||
placement {
|
||||
tenancy = "${lookup(var.worker_groups_launch_template[count.index], "launch_template_placement_tenancy", local.workers_group_defaults["launch_template_placement_tenancy"])}"
|
||||
group_name = "${lookup(var.worker_groups_launch_template[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"])}"
|
||||
tenancy = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"launch_template_placement_tenancy",
|
||||
local.workers_group_defaults["launch_template_placement_tenancy"],
|
||||
)
|
||||
group_name = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"launch_template_placement_group",
|
||||
local.workers_group_defaults["launch_template_placement_group"],
|
||||
)
|
||||
}
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "${lookup(var.worker_groups_launch_template[count.index], "root_block_device_name", local.workers_group_defaults["root_block_device_name"])}"
|
||||
device_name = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_block_device_name",
|
||||
local.workers_group_defaults["root_block_device_name"],
|
||||
)
|
||||
|
||||
ebs {
|
||||
volume_size = "${lookup(var.worker_groups_launch_template[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}"
|
||||
volume_type = "${lookup(var.worker_groups_launch_template[count.index], "root_volume_type", local.workers_group_defaults["root_volume_type"])}"
|
||||
iops = "${lookup(var.worker_groups_launch_template[count.index], "root_iops", local.workers_group_defaults["root_iops"])}"
|
||||
encrypted = "${lookup(var.worker_groups_launch_template[count.index], "root_encrypted", local.workers_group_defaults["root_encrypted"])}"
|
||||
kms_key_id = "${lookup(var.worker_groups_launch_template[count.index], "root_kms_key_id", local.workers_group_defaults["root_kms_key_id"])}"
|
||||
volume_size = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_volume_size",
|
||||
local.workers_group_defaults["root_volume_size"],
|
||||
)
|
||||
volume_type = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_volume_type",
|
||||
local.workers_group_defaults["root_volume_type"],
|
||||
)
|
||||
iops = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_iops",
|
||||
local.workers_group_defaults["root_iops"],
|
||||
)
|
||||
encrypted = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_encrypted",
|
||||
local.workers_group_defaults["root_encrypted"],
|
||||
)
|
||||
kms_key_id = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"root_kms_key_id",
|
||||
local.workers_group_defaults["root_kms_key_id"],
|
||||
)
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
tags = "${var.tags}"
|
||||
tags = var.tags
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
@@ -94,8 +266,13 @@ resource "aws_launch_template" "workers_launch_template" {
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "workers_launch_template" {
|
||||
count = "${var.manage_worker_iam_resources ? var.worker_group_launch_template_count : 0}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}"
|
||||
role = "${lookup(var.worker_groups_launch_template[count.index], "iam_role_id", lookup(local.workers_group_defaults, "iam_role_id"))}"
|
||||
path = "${var.iam_path}"
|
||||
count = var.manage_worker_iam_resources ? local.worker_group_launch_template_count : 0
|
||||
name_prefix = aws_eks_cluster.this.name
|
||||
role = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"iam_role_id",
|
||||
local.workers_group_defaults["iam_role_id"],
|
||||
)
|
||||
path = var.iam_path
|
||||
}
|
||||
|
||||
|
||||
@@ -1,123 +1,328 @@
|
||||
# Worker Groups using Launch Templates with mixed instances policy
|
||||
|
||||
resource "aws_autoscaling_group" "workers_launch_template_mixed" {
|
||||
count = "${var.worker_group_launch_template_mixed_count}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups_launch_template_mixed[count.index], "name", count.index)}"
|
||||
desired_capacity = "${lookup(var.worker_groups_launch_template_mixed[count.index], "asg_desired_capacity", local.workers_group_defaults["asg_desired_capacity"])}"
|
||||
max_size = "${lookup(var.worker_groups_launch_template_mixed[count.index], "asg_max_size", local.workers_group_defaults["asg_max_size"])}"
|
||||
min_size = "${lookup(var.worker_groups_launch_template_mixed[count.index], "asg_min_size", local.workers_group_defaults["asg_min_size"])}"
|
||||
force_delete = "${lookup(var.worker_groups_launch_template_mixed[count.index], "asg_force_delete", local.workers_group_defaults["asg_force_delete"])}"
|
||||
target_group_arns = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template_mixed[count.index], "target_group_arns", ""), local.workers_group_defaults["target_group_arns"])))}"]
|
||||
service_linked_role_arn = "${lookup(var.worker_groups_launch_template_mixed[count.index], "service_linked_role_arn", local.workers_group_defaults["service_linked_role_arn"])}"
|
||||
vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups_launch_template_mixed[count.index], "subnets", ""), local.workers_group_defaults["subnets"]))}"]
|
||||
protect_from_scale_in = "${lookup(var.worker_groups_launch_template_mixed[count.index], "protect_from_scale_in", local.workers_group_defaults["protect_from_scale_in"])}"
|
||||
suspended_processes = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template_mixed[count.index], "suspended_processes", ""), local.workers_group_defaults["suspended_processes"])))}"]
|
||||
enabled_metrics = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template_mixed[count.index], "enabled_metrics", ""), local.workers_group_defaults["enabled_metrics"])))}"]
|
||||
placement_group = "${lookup(var.worker_groups_launch_template_mixed[count.index], "placement_group", local.workers_group_defaults["placement_group"])}"
|
||||
termination_policies = ["${compact(split(",", coalesce(lookup(var.worker_groups_launch_template_mixed[count.index], "termination_policies", ""), local.workers_group_defaults["termination_policies"])))}"]
|
||||
count = local.worker_group_launch_template_mixed_count
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"name",
|
||||
count.index,
|
||||
)}"
|
||||
desired_capacity = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"asg_desired_capacity",
|
||||
local.workers_group_defaults["asg_desired_capacity"],
|
||||
)
|
||||
max_size = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"asg_max_size",
|
||||
local.workers_group_defaults["asg_max_size"],
|
||||
)
|
||||
min_size = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"asg_min_size",
|
||||
local.workers_group_defaults["asg_min_size"],
|
||||
)
|
||||
force_delete = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"asg_force_delete",
|
||||
local.workers_group_defaults["asg_force_delete"],
|
||||
)
|
||||
target_group_arns = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"target_group_arns",
|
||||
local.workers_group_defaults["target_group_arns"]
|
||||
)
|
||||
service_linked_role_arn = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"service_linked_role_arn",
|
||||
local.workers_group_defaults["service_linked_role_arn"],
|
||||
)
|
||||
vpc_zone_identifier = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"subnets",
|
||||
local.workers_group_defaults["subnets"]
|
||||
)
|
||||
protect_from_scale_in = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"protect_from_scale_in",
|
||||
local.workers_group_defaults["protect_from_scale_in"],
|
||||
)
|
||||
suspended_processes = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"suspended_processes",
|
||||
local.workers_group_defaults["suspended_processes"]
|
||||
)
|
||||
enabled_metrics = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"enabled_metrics",
|
||||
local.workers_group_defaults["enabled_metrics"]
|
||||
)
|
||||
placement_group = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"placement_group",
|
||||
local.workers_group_defaults["placement_group"],
|
||||
)
|
||||
termination_policies = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"termination_policies",
|
||||
local.workers_group_defaults["termination_policies"]
|
||||
)
|
||||
|
||||
mixed_instances_policy {
|
||||
instances_distribution {
|
||||
on_demand_allocation_strategy = "${lookup(var.worker_groups_launch_template_mixed[count.index], "on_demand_allocation_strategy", local.workers_group_defaults["on_demand_allocation_strategy"])}"
|
||||
on_demand_base_capacity = "${lookup(var.worker_groups_launch_template_mixed[count.index], "on_demand_base_capacity", local.workers_group_defaults["on_demand_base_capacity"])}"
|
||||
on_demand_percentage_above_base_capacity = "${lookup(var.worker_groups_launch_template_mixed[count.index], "on_demand_percentage_above_base_capacity", local.workers_group_defaults["on_demand_percentage_above_base_capacity"])}"
|
||||
spot_allocation_strategy = "${lookup(var.worker_groups_launch_template_mixed[count.index], "spot_allocation_strategy", local.workers_group_defaults["spot_allocation_strategy"])}"
|
||||
spot_instance_pools = "${lookup(var.worker_groups_launch_template_mixed[count.index], "spot_instance_pools", local.workers_group_defaults["spot_instance_pools"])}"
|
||||
spot_max_price = "${lookup(var.worker_groups_launch_template_mixed[count.index], "spot_max_price", local.workers_group_defaults["spot_max_price"])}"
|
||||
on_demand_allocation_strategy = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"on_demand_allocation_strategy",
|
||||
local.workers_group_defaults["on_demand_allocation_strategy"],
|
||||
)
|
||||
on_demand_base_capacity = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"on_demand_base_capacity",
|
||||
local.workers_group_defaults["on_demand_base_capacity"],
|
||||
)
|
||||
on_demand_percentage_above_base_capacity = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"on_demand_percentage_above_base_capacity",
|
||||
local.workers_group_defaults["on_demand_percentage_above_base_capacity"],
|
||||
)
|
||||
spot_allocation_strategy = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"spot_allocation_strategy",
|
||||
local.workers_group_defaults["spot_allocation_strategy"],
|
||||
)
|
||||
spot_instance_pools = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"spot_instance_pools",
|
||||
local.workers_group_defaults["spot_instance_pools"],
|
||||
)
|
||||
spot_max_price = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"spot_max_price",
|
||||
local.workers_group_defaults["spot_max_price"],
|
||||
)
|
||||
}
|
||||
|
||||
launch_template {
|
||||
launch_template_specification {
|
||||
launch_template_id = "${element(aws_launch_template.workers_launch_template_mixed.*.id, count.index)}"
|
||||
version = "${lookup(var.worker_groups_launch_template_mixed[count.index], "launch_template_version", local.workers_group_defaults["launch_template_version"])}"
|
||||
launch_template_id = aws_launch_template.workers_launch_template_mixed.*.id[count.index]
|
||||
version = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"launch_template_version",
|
||||
local.workers_group_defaults["launch_template_version"],
|
||||
)
|
||||
}
|
||||
|
||||
override {
|
||||
instance_type = "${lookup(var.worker_groups_launch_template_mixed[count.index], "override_instance_type_1", local.workers_group_defaults["override_instance_type_1"])}"
|
||||
dynamic "override" {
|
||||
for_each = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"override_instance_types",
|
||||
local.workers_group_defaults["override_instance_types"]
|
||||
)
|
||||
|
||||
content {
|
||||
instance_type = override.value
|
||||
}
|
||||
}
|
||||
|
||||
override {
|
||||
instance_type = "${lookup(var.worker_groups_launch_template_mixed[count.index], "override_instance_type_2", local.workers_group_defaults["override_instance_type_2"])}"
|
||||
}
|
||||
|
||||
override {
|
||||
instance_type = "${lookup(var.worker_groups_launch_template_mixed[count.index], "override_instance_type_3", local.workers_group_defaults["override_instance_type_3"])}"
|
||||
}
|
||||
|
||||
override {
|
||||
instance_type = "${lookup(var.worker_groups_launch_template_mixed[count.index], "override_instance_type_4", local.workers_group_defaults["override_instance_type_4"])}"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = ["${concat(
|
||||
list(
|
||||
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups_launch_template_mixed[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
|
||||
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
|
||||
map("key", "k8s.io/cluster-autoscaler/${lookup(var.worker_groups_launch_template_mixed[count.index], "autoscaling_enabled", local.workers_group_defaults["autoscaling_enabled"]) == 1 ? "enabled" : "disabled"}", "value", "true", "propagate_at_launch", false),
|
||||
map("key", "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}", "value", "", "propagate_at_launch", false),
|
||||
map("key", "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage", "value", "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}Gi", "propagate_at_launch", false)
|
||||
),
|
||||
tags = concat(
|
||||
[
|
||||
{
|
||||
"key" = "Name"
|
||||
"value" = "${aws_eks_cluster.this.name}-${lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"name",
|
||||
count.index,
|
||||
)}-eks_asg"
|
||||
"propagate_at_launch" = true
|
||||
},
|
||||
{
|
||||
"key" = "kubernetes.io/cluster/${aws_eks_cluster.this.name}"
|
||||
"value" = "owned"
|
||||
"propagate_at_launch" = true
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"autoscaling_enabled",
|
||||
local.workers_group_defaults["autoscaling_enabled"],
|
||||
) ? "enabled" : "disabled"}"
|
||||
"value" = "true"
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${aws_eks_cluster.this.name}"
|
||||
"value" = aws_eks_cluster.this.name
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/node-template/resources/ephemeral-storage"
|
||||
"value" = "${lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_volume_size",
|
||||
local.workers_group_defaults["root_volume_size"],
|
||||
)}Gi"
|
||||
"propagate_at_launch" = false
|
||||
},
|
||||
],
|
||||
local.asg_tags,
|
||||
var.worker_group_tags[contains(keys(var.worker_group_tags), "${lookup(var.worker_groups_launch_template_mixed[count.index], "name", count.index)}") ? "${lookup(var.worker_groups_launch_template_mixed[count.index], "name", count.index)}" : "default"])
|
||||
}"]
|
||||
lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"tags",
|
||||
local.workers_group_defaults["tags"]
|
||||
)
|
||||
)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
ignore_changes = ["desired_capacity"]
|
||||
ignore_changes = [desired_capacity]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_launch_template" "workers_launch_template_mixed" {
|
||||
count = "${var.worker_group_launch_template_mixed_count}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups_launch_template_mixed[count.index], "name", count.index)}"
|
||||
count = local.worker_group_launch_template_mixed_count
|
||||
name_prefix = "${aws_eks_cluster.this.name}-${lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"name",
|
||||
count.index,
|
||||
)}"
|
||||
|
||||
network_interfaces {
|
||||
associate_public_ip_address = "${lookup(var.worker_groups_launch_template_mixed[count.index], "public_ip", local.workers_group_defaults["public_ip"])}"
|
||||
delete_on_termination = "${lookup(var.worker_groups_launch_template_mixed[count.index], "eni_delete", local.workers_group_defaults["eni_delete"])}"
|
||||
security_groups = ["${local.worker_security_group_id}", "${var.worker_additional_security_group_ids}", "${compact(split(",", lookup(var.worker_groups_launch_template_mixed[count.index], "additional_security_group_ids", local.workers_group_defaults["additional_security_group_ids"])))}"]
|
||||
associate_public_ip_address = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"public_ip",
|
||||
local.workers_group_defaults["public_ip"],
|
||||
)
|
||||
delete_on_termination = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"eni_delete",
|
||||
local.workers_group_defaults["eni_delete"],
|
||||
)
|
||||
security_groups = flatten([
|
||||
local.worker_security_group_id,
|
||||
var.worker_additional_security_group_ids,
|
||||
lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"additional_security_group_ids",
|
||||
local.workers_group_defaults["additional_security_group_ids"]
|
||||
)
|
||||
])
|
||||
}
|
||||
|
||||
iam_instance_profile {
|
||||
name = "${element(coalescelist(aws_iam_instance_profile.workers_launch_template_mixed.*.name, data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.name), count.index)}"
|
||||
name = coalescelist(
|
||||
aws_iam_instance_profile.workers_launch_template_mixed.*.name,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_mixed_iam_instance_profile.*.name,
|
||||
)[count.index]
|
||||
}
|
||||
|
||||
image_id = "${lookup(var.worker_groups_launch_template_mixed[count.index], "ami_id", local.workers_group_defaults["ami_id"])}"
|
||||
instance_type = "${lookup(var.worker_groups_launch_template_mixed[count.index], "instance_type", local.workers_group_defaults["instance_type"])}"
|
||||
key_name = "${lookup(var.worker_groups_launch_template_mixed[count.index], "key_name", local.workers_group_defaults["key_name"])}"
|
||||
user_data = "${base64encode(element(data.template_file.workers_launch_template_mixed.*.rendered, count.index))}"
|
||||
ebs_optimized = "${lookup(var.worker_groups_launch_template_mixed[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups_launch_template_mixed[count.index], "instance_type", local.workers_group_defaults["instance_type"]), false))}"
|
||||
image_id = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"ami_id",
|
||||
local.workers_group_defaults["ami_id"],
|
||||
)
|
||||
instance_type = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"instance_type",
|
||||
local.workers_group_defaults["instance_type"],
|
||||
)
|
||||
key_name = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"key_name",
|
||||
local.workers_group_defaults["key_name"],
|
||||
)
|
||||
user_data = base64encode(
|
||||
data.template_file.workers_launch_template_mixed.*.rendered[count.index],
|
||||
)
|
||||
ebs_optimized = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"ebs_optimized",
|
||||
lookup(
|
||||
local.ebs_optimized,
|
||||
lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"instance_type",
|
||||
local.workers_group_defaults["instance_type"],
|
||||
),
|
||||
false,
|
||||
),
|
||||
)
|
||||
|
||||
monitoring {
|
||||
enabled = "${lookup(var.worker_groups_launch_template_mixed[count.index], "enable_monitoring", local.workers_group_defaults["enable_monitoring"])}"
|
||||
enabled = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"enable_monitoring",
|
||||
local.workers_group_defaults["enable_monitoring"],
|
||||
)
|
||||
}
|
||||
|
||||
placement {
|
||||
tenancy = "${lookup(var.worker_groups_launch_template_mixed[count.index], "launch_template_placement_tenancy", local.workers_group_defaults["launch_template_placement_tenancy"])}"
|
||||
group_name = "${lookup(var.worker_groups_launch_template_mixed[count.index], "launch_template_placement_group", local.workers_group_defaults["launch_template_placement_group"])}"
|
||||
tenancy = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"launch_template_placement_tenancy",
|
||||
local.workers_group_defaults["launch_template_placement_tenancy"],
|
||||
)
|
||||
group_name = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"launch_template_placement_group",
|
||||
local.workers_group_defaults["launch_template_placement_group"],
|
||||
)
|
||||
}
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_block_device_name", local.workers_group_defaults["root_block_device_name"])}"
|
||||
device_name = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_block_device_name",
|
||||
local.workers_group_defaults["root_block_device_name"],
|
||||
)
|
||||
|
||||
ebs {
|
||||
volume_size = "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}"
|
||||
volume_type = "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_volume_type", local.workers_group_defaults["root_volume_type"])}"
|
||||
iops = "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_iops", local.workers_group_defaults["root_iops"])}"
|
||||
encrypted = "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_encrypted", local.workers_group_defaults["root_encrypted"])}"
|
||||
kms_key_id = "${lookup(var.worker_groups_launch_template_mixed[count.index], "root_kms_key_id", local.workers_group_defaults["root_kms_key_id"])}"
|
||||
volume_size = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_volume_size",
|
||||
local.workers_group_defaults["root_volume_size"],
|
||||
)
|
||||
volume_type = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_volume_type",
|
||||
local.workers_group_defaults["root_volume_type"],
|
||||
)
|
||||
iops = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_iops",
|
||||
local.workers_group_defaults["root_iops"],
|
||||
)
|
||||
encrypted = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_encrypted",
|
||||
local.workers_group_defaults["root_encrypted"],
|
||||
)
|
||||
kms_key_id = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"root_kms_key_id",
|
||||
local.workers_group_defaults["root_kms_key_id"],
|
||||
)
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
tags = var.tags
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "workers_launch_template_mixed" {
|
||||
count = "${var.manage_worker_iam_resources ? var.worker_group_launch_template_mixed_count : 0}"
|
||||
name_prefix = "${aws_eks_cluster.this.name}"
|
||||
role = "${lookup(var.worker_groups_launch_template_mixed[count.index], "iam_role_id", lookup(local.workers_group_defaults, "iam_role_id"))}"
|
||||
path = "${var.iam_path}"
|
||||
count = var.manage_worker_iam_resources ? local.worker_group_launch_template_mixed_count : 0
|
||||
name_prefix = aws_eks_cluster.this.name
|
||||
role = lookup(
|
||||
var.worker_groups_launch_template_mixed[count.index],
|
||||
"iam_role_id",
|
||||
local.workers_group_defaults["iam_role_id"],
|
||||
)
|
||||
path = var.iam_path
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user