Adding new mixed type of worker group with instance overrides and mixed instances policy (#371)

* Adding new mixed type of worker group with instance overrides and mixed instances policy

* moving all count and lifecycle rule parameters to top/bottom

* adding custom IAM parts

* updating doc with new options

* fixes for spot instances
This commit is contained in:
Max Williams
2019-05-07 16:50:42 +02:00
committed by GitHub
parent 2439c25771
commit ae2f8e58db
13 changed files with 285 additions and 117 deletions

View File

@@ -1,6 +1,7 @@
# Worker Groups using Launch Configurations
resource "aws_autoscaling_group" "workers" {
count = "${var.worker_group_count}"
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", local.workers_group_defaults["asg_desired_capacity"])}"
max_size = "${lookup(var.worker_groups[count.index], "asg_max_size", local.workers_group_defaults["asg_max_size"])}"
@@ -13,7 +14,6 @@ resource "aws_autoscaling_group" "workers" {
protect_from_scale_in = "${lookup(var.worker_groups[count.index], "protect_from_scale_in", local.workers_group_defaults["protect_from_scale_in"])}"
suspended_processes = ["${compact(split(",", coalesce(lookup(var.worker_groups[count.index], "suspended_processes", ""), local.workers_group_defaults["suspended_processes"])))}"]
enabled_metrics = ["${compact(split(",", coalesce(lookup(var.worker_groups[count.index], "enabled_metrics", ""), local.workers_group_defaults["enabled_metrics"])))}"]
count = "${var.worker_group_count}"
placement_group = "${lookup(var.worker_groups[count.index], "placement_group", local.workers_group_defaults["placement_group"])}"
tags = ["${concat(
@@ -30,12 +30,12 @@ resource "aws_autoscaling_group" "workers" {
lifecycle {
create_before_destroy = true
ignore_changes = ["desired_capacity"]
ignore_changes = ["desired_capacity"]
}
}
resource "aws_launch_configuration" "workers" {
count = "${var.worker_group_count}"
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
associate_public_ip_address = "${lookup(var.worker_groups[count.index], "public_ip", local.workers_group_defaults["public_ip"])}"
security_groups = ["${local.worker_security_group_id}", "${var.worker_additional_security_group_ids}", "${compact(split(",",lookup(var.worker_groups[count.index],"additional_security_group_ids", local.workers_group_defaults["additional_security_group_ids"])))}"]
@@ -48,11 +48,6 @@ resource "aws_launch_configuration" "workers" {
enable_monitoring = "${lookup(var.worker_groups[count.index], "enable_monitoring", local.workers_group_defaults["enable_monitoring"])}"
spot_price = "${lookup(var.worker_groups[count.index], "spot_price", local.workers_group_defaults["spot_price"])}"
placement_tenancy = "${lookup(var.worker_groups[count.index], "placement_tenancy", local.workers_group_defaults["placement_tenancy"])}"
count = "${var.worker_group_count}"
lifecycle {
create_before_destroy = true
}
root_block_device {
volume_size = "${lookup(var.worker_groups[count.index], "root_volume_size", local.workers_group_defaults["root_volume_size"])}"
@@ -60,18 +55,23 @@ resource "aws_launch_configuration" "workers" {
iops = "${lookup(var.worker_groups[count.index], "root_iops", local.workers_group_defaults["root_iops"])}"
delete_on_termination = true
}
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "workers" {
count = "${var.worker_create_security_group ? 1 : 0}"
name_prefix = "${aws_eks_cluster.this.name}"
description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}"
count = "${var.worker_create_security_group ? 1 : 0}"
tags = "${merge(var.tags, map("Name", "${aws_eks_cluster.this.name}-eks_worker_sg", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "owned"
))}"
}
resource "aws_security_group_rule" "workers_egress_internet" {
count = "${var.worker_create_security_group ? 1 : 0}"
description = "Allow nodes all egress to the Internet."
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
@@ -79,10 +79,10 @@ resource "aws_security_group_rule" "workers_egress_internet" {
from_port = 0
to_port = 0
type = "egress"
count = "${var.worker_create_security_group ? 1 : 0}"
}
resource "aws_security_group_rule" "workers_ingress_self" {
count = "${var.worker_create_security_group ? 1 : 0}"
description = "Allow node to communicate with each other."
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
@@ -90,10 +90,10 @@ resource "aws_security_group_rule" "workers_ingress_self" {
from_port = 0
to_port = 65535
type = "ingress"
count = "${var.worker_create_security_group ? 1 : 0}"
}
resource "aws_security_group_rule" "workers_ingress_cluster" {
count = "${var.worker_create_security_group ? 1 : 0}"
description = "Allow workers pods to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}"
@@ -101,10 +101,10 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
from_port = "${var.worker_sg_ingress_from_port}"
to_port = 65535
type = "ingress"
count = "${var.worker_create_security_group ? 1 : 0}"
}
resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
count = "${var.worker_create_security_group ? (var.worker_sg_ingress_from_port > 10250 ? 1 : 0) : 0}"
description = "Allow workers Kubelets to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}"
@@ -112,10 +112,10 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" {
from_port = 10250
to_port = 10250
type = "ingress"
count = "${var.worker_create_security_group ? (var.worker_sg_ingress_from_port > 10250 ? 1 : 0) : 0}"
}
resource "aws_security_group_rule" "workers_ingress_cluster_https" {
count = "${var.worker_create_security_group ? 1 : 0}"
description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane."
protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}"
@@ -123,41 +123,41 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" {
from_port = 443
to_port = 443
type = "ingress"
count = "${var.worker_create_security_group ? 1 : 0}"
}
resource "aws_iam_role" "workers" {
count = "${var.manage_worker_iam_resources ? 1 : 0}"
name_prefix = "${aws_eks_cluster.this.name}"
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
permissions_boundary = "${var.permissions_boundary}"
path = "${var.iam_path}"
force_detach_policies = true
count = "${var.manage_worker_iam_resources ? 1 : 0}"
}
resource "aws_iam_instance_profile" "workers" {
count = "${var.manage_worker_iam_resources ? var.worker_group_count : 0}"
name_prefix = "${aws_eks_cluster.this.name}"
role = "${lookup(var.worker_groups[count.index], "iam_role_id", lookup(local.workers_group_defaults, "iam_role_id"))}"
count = "${var.manage_worker_iam_resources ? var.worker_group_count : 0}"
path = "${var.iam_path}"
path = "${var.iam_path}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
count = "${var.manage_worker_iam_resources ? 1 : 0}"
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.workers.name}"
count = "${var.manage_worker_iam_resources ? 1 : 0}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
count = "${var.manage_worker_iam_resources ? 1 : 0}"
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.workers.name}"
count = "${var.manage_worker_iam_resources ? 1 : 0}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
count = "${var.manage_worker_iam_resources ? 1 : 0}"
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.workers.name}"
count = "${var.manage_worker_iam_resources ? 1 : 0}"
}
resource "aws_iam_role_policy_attachment" "workers_additional_policies" {
@@ -177,17 +177,17 @@ resource "null_resource" "tags_as_list_of_maps" {
}
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
count = "${var.manage_worker_iam_resources ? 1 : 0}"
policy_arn = "${aws_iam_policy.worker_autoscaling.arn}"
role = "${aws_iam_role.workers.name}"
count = "${var.manage_worker_iam_resources ? 1 : 0}"
}
resource "aws_iam_policy" "worker_autoscaling" {
count = "${var.manage_worker_iam_resources ? 1 : 0}"
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}"
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}"
policy = "${data.aws_iam_policy_document.worker_autoscaling.json}"
path = "${var.iam_path}"
count = "${var.manage_worker_iam_resources ? 1 : 0}"
}
data "aws_iam_policy_document" "worker_autoscaling" {