From 28f7e9dd41579820684896ee624ff59963875f56 Mon Sep 17 00:00:00 2001 From: Max Williams Date: Mon, 13 Aug 2018 10:04:02 +0200 Subject: [PATCH] initial commit --- README.md | 5 +++- data.tf | 2 +- docs/autoscaling.md | 25 +++++++++++++++++++ main.tf | 4 ++++ variables.tf | 7 +----- workers.tf | 58 +++++++++++++++++++++++++++++++++++++++++++-- 6 files changed, 91 insertions(+), 10 deletions(-) create mode 100644 docs/autoscaling.md diff --git a/README.md b/README.md index 9f45a61..781cd6d 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,10 @@ module "eks" { } ``` +## Other documentation + +- [Autoscaling](docs/autoscaling.md): How to enabled worker node autoscaling. + ## Release schedule Generally the maintainers will try to release the module once every 2 weeks to @@ -109,7 +113,6 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes | | tags | A map of tags to add to all resources. | map | `` | no | | vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes | -| worker_group_count | The number of maps contained within the worker_groups list. | string | `1` | no | | worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `` | no | | worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no | | worker_sg_ingress_from_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `1025` | no | diff --git a/data.tf b/data.tf index 3671d1a..a04e894 100644 --- a/data.tf +++ b/data.tf @@ -71,7 +71,7 @@ EOF data "template_file" "userdata" { template = "${file("${path.module}/templates/userdata.sh.tpl")}" - count = "${var.worker_group_count}" + count = "${length(var.worker_groups)}" vars { region = "${data.aws_region.current.name}" diff --git a/docs/autoscaling.md b/docs/autoscaling.md new file mode 100644 index 0000000..14e1fd7 --- /dev/null +++ b/docs/autoscaling.md @@ -0,0 +1,25 @@ +# Autoscaling + +Autoscaling of worker nodes can be easily enabled by setting the `autoscaling_enabled` variable to `true` for a worker group in the `worker_groups` map. +This will add the required tags to the autoscaling group for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler). + +You will also need to install the cluster-autoscaler into your cluster. The easiest way to do this is with [helm](https://helm.sh/). + +The [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler) for the cluster-autoscaler requires some specific settings to work in an EKS cluster. These settings are supplied via YAML values file when installing the helm chart. Here is an example values file: + +```yaml +rbac: + create: true + +sslCertPath: /etc/ssl/certs/ca-bundle.crt + +autoDiscovery: + clusterName: YOUR_CLUSTER_NAME + enabled: true +``` + +To install the chart, simply run helm with the `--values` option: + +``` +helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml +``` diff --git a/main.tf b/main.tf index efa289f..872eec3 100644 --- a/main.tf +++ b/main.tf @@ -32,6 +32,10 @@ * } * ``` +* ## Other documentation +* +* - [Autoscaling](docs/autoscaling.md): How to enabled worker node autoscaling. + * ## Release schedule * Generally the maintainers will try to release the module once every 2 weeks to diff --git a/variables.tf b/variables.tf index b2363da..bc672b6 100644 --- a/variables.tf +++ b/variables.tf @@ -69,12 +69,6 @@ variable "worker_groups" { }] } -variable "worker_group_count" { - description = "The number of maps contained within the worker_groups list." - type = "string" - default = "1" -} - variable "workers_group_defaults" { description = "Default values for target groups as defined by the list of maps." type = "map" @@ -98,6 +92,7 @@ variable "workers_group_defaults" { public_ip = false # Associate a public ip address with a worker kubelet_node_labels = "" # This string is passed directly to kubelet via --node-labels= if set. It should be comma delimited with no spaces. If left empty no --node-labels switch is added. subnets = "" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789 + autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling. } } diff --git a/workers.tf b/workers.tf index 070ee86..61258ea 100644 --- a/workers.tf +++ b/workers.tf @@ -5,12 +5,13 @@ resource "aws_autoscaling_group" "workers" { min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}" launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}" vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups[count.index], "subnets", ""), join(",", var.subnets)))}"] - count = "${var.worker_group_count}" + count = "${length(var.worker_groups)}" tags = ["${concat( list( map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true), map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true), + map("key", "k8s.io/cluster-autoscaler/${lookup(var.worker_groups[count.index], "autoscaling_enabled", count.index) == 1 ? "enabled" : "disabled" }", "value", "true", "propagate_at_launch", false), ), local.asg_tags) }"] @@ -32,7 +33,7 @@ resource "aws_launch_configuration" "workers" { ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}" enable_monitoring = "${lookup(var.worker_groups[count.index], "enable_monitoring", lookup(var.workers_group_defaults, "enable_monitoring"))}" spot_price = "${lookup(var.worker_groups[count.index], "spot_price", lookup(var.workers_group_defaults, "spot_price"))}" - count = "${var.worker_group_count}" + count = "${length(var.worker_groups)}" lifecycle { create_before_destroy = true @@ -122,3 +123,56 @@ resource "null_resource" "tags_as_list_of_maps" { "propagate_at_launch", "true" )}" } + +resource "aws_iam_role_policy_attachment" "workers_autoscaling" { + policy_arn = "${aws_iam_policy.worker_autoscaling.arn}" + role = "${aws_iam_role.workers.name}" +} + +resource "aws_iam_policy" "worker_autoscaling" { + name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}" + description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}" + policy = "${data.aws_iam_policy_document.worker_autoscaling.json}" +} + +data "aws_iam_policy_document" "worker_autoscaling" { + statement { + sid = "eksWorkerAutoscalingAll" + effect = "Allow" + + actions = [ + "autoscaling:DescribeAutoScalingGroups", + "autoscaling:DescribeAutoScalingInstances", + "autoscaling:DescribeLaunchConfigurations", + "autoscaling:DescribeTags", + "autoscaling:GetAsgForInstance", + ] + + resources = ["*"] + } + + statement { + sid = "eksWorkerAutoscalingOwn" + effect = "Allow" + + actions = [ + "autoscaling:SetDesiredCapacity", + "autoscaling:TerminateInstanceInAutoScalingGroup", + "autoscaling:UpdateAutoScalingGroup", + ] + + resources = ["*"] + + condition { + test = "StringEquals" + variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this.name}" + values = ["owned"] + } + + condition { + test = "StringEquals" + variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled" + values = ["true"] + } + } +}