initial commit

This commit is contained in:
Max Williams
2018-08-13 10:04:02 +02:00
parent 2e2dd0e215
commit 28f7e9dd41
6 changed files with 91 additions and 10 deletions

View File

@@ -31,6 +31,10 @@ module "eks" {
} }
``` ```
## Other documentation
- [Autoscaling](docs/autoscaling.md): How to enabled worker node autoscaling.
## Release schedule ## Release schedule
Generally the maintainers will try to release the module once every 2 weeks to Generally the maintainers will try to release the module once every 2 weeks to
@@ -109,7 +113,6 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes | | subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes |
| tags | A map of tags to add to all resources. | map | `<map>` | no | | tags | A map of tags to add to all resources. | map | `<map>` | no |
| vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes | | vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes |
| worker_group_count | The number of maps contained within the worker_groups list. | string | `1` | no |
| worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `<list>` | no | | worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `<list>` | no |
| worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no | | worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no |
| worker_sg_ingress_from_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `1025` | no | | worker_sg_ingress_from_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `1025` | no |

View File

@@ -71,7 +71,7 @@ EOF
data "template_file" "userdata" { data "template_file" "userdata" {
template = "${file("${path.module}/templates/userdata.sh.tpl")}" template = "${file("${path.module}/templates/userdata.sh.tpl")}"
count = "${var.worker_group_count}" count = "${length(var.worker_groups)}"
vars { vars {
region = "${data.aws_region.current.name}" region = "${data.aws_region.current.name}"

25
docs/autoscaling.md Normal file
View File

@@ -0,0 +1,25 @@
# Autoscaling
Autoscaling of worker nodes can be easily enabled by setting the `autoscaling_enabled` variable to `true` for a worker group in the `worker_groups` map.
This will add the required tags to the autoscaling group for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler).
You will also need to install the cluster-autoscaler into your cluster. The easiest way to do this is with [helm](https://helm.sh/).
The [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler) for the cluster-autoscaler requires some specific settings to work in an EKS cluster. These settings are supplied via YAML values file when installing the helm chart. Here is an example values file:
```yaml
rbac:
create: true
sslCertPath: /etc/ssl/certs/ca-bundle.crt
autoDiscovery:
clusterName: YOUR_CLUSTER_NAME
enabled: true
```
To install the chart, simply run helm with the `--values` option:
```
helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
```

View File

@@ -32,6 +32,10 @@
* } * }
* ``` * ```
* ## Other documentation
*
* - [Autoscaling](docs/autoscaling.md): How to enabled worker node autoscaling.
* ## Release schedule * ## Release schedule
* Generally the maintainers will try to release the module once every 2 weeks to * Generally the maintainers will try to release the module once every 2 weeks to

View File

@@ -69,12 +69,6 @@ variable "worker_groups" {
}] }]
} }
variable "worker_group_count" {
description = "The number of maps contained within the worker_groups list."
type = "string"
default = "1"
}
variable "workers_group_defaults" { variable "workers_group_defaults" {
description = "Default values for target groups as defined by the list of maps." description = "Default values for target groups as defined by the list of maps."
type = "map" type = "map"
@@ -98,6 +92,7 @@ variable "workers_group_defaults" {
public_ip = false # Associate a public ip address with a worker public_ip = false # Associate a public ip address with a worker
kubelet_node_labels = "" # This string is passed directly to kubelet via --node-labels= if set. It should be comma delimited with no spaces. If left empty no --node-labels switch is added. kubelet_node_labels = "" # This string is passed directly to kubelet via --node-labels= if set. It should be comma delimited with no spaces. If left empty no --node-labels switch is added.
subnets = "" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789 subnets = "" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789
autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling.
} }
} }

View File

@@ -5,12 +5,13 @@ resource "aws_autoscaling_group" "workers" {
min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}" min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}"
launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}" launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}"
vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups[count.index], "subnets", ""), join(",", var.subnets)))}"] vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups[count.index], "subnets", ""), join(",", var.subnets)))}"]
count = "${var.worker_group_count}" count = "${length(var.worker_groups)}"
tags = ["${concat( tags = ["${concat(
list( list(
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true), map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true), map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
map("key", "k8s.io/cluster-autoscaler/${lookup(var.worker_groups[count.index], "autoscaling_enabled", count.index) == 1 ? "enabled" : "disabled" }", "value", "true", "propagate_at_launch", false),
), ),
local.asg_tags) local.asg_tags)
}"] }"]
@@ -32,7 +33,7 @@ resource "aws_launch_configuration" "workers" {
ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}" ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}"
enable_monitoring = "${lookup(var.worker_groups[count.index], "enable_monitoring", lookup(var.workers_group_defaults, "enable_monitoring"))}" enable_monitoring = "${lookup(var.worker_groups[count.index], "enable_monitoring", lookup(var.workers_group_defaults, "enable_monitoring"))}"
spot_price = "${lookup(var.worker_groups[count.index], "spot_price", lookup(var.workers_group_defaults, "spot_price"))}" spot_price = "${lookup(var.worker_groups[count.index], "spot_price", lookup(var.workers_group_defaults, "spot_price"))}"
count = "${var.worker_group_count}" count = "${length(var.worker_groups)}"
lifecycle { lifecycle {
create_before_destroy = true create_before_destroy = true
@@ -122,3 +123,56 @@ resource "null_resource" "tags_as_list_of_maps" {
"propagate_at_launch", "true" "propagate_at_launch", "true"
)}" )}"
} }
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
policy_arn = "${aws_iam_policy.worker_autoscaling.arn}"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_policy" "worker_autoscaling" {
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}"
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}"
policy = "${data.aws_iam_policy_document.worker_autoscaling.json}"
}
data "aws_iam_policy_document" "worker_autoscaling" {
statement {
sid = "eksWorkerAutoscalingAll"
effect = "Allow"
actions = [
"autoscaling:DescribeAutoScalingGroups",
"autoscaling:DescribeAutoScalingInstances",
"autoscaling:DescribeLaunchConfigurations",
"autoscaling:DescribeTags",
"autoscaling:GetAsgForInstance",
]
resources = ["*"]
}
statement {
sid = "eksWorkerAutoscalingOwn"
effect = "Allow"
actions = [
"autoscaling:SetDesiredCapacity",
"autoscaling:TerminateInstanceInAutoScalingGroup",
"autoscaling:UpdateAutoScalingGroup",
]
resources = ["*"]
condition {
test = "StringEquals"
variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this.name}"
values = ["owned"]
}
condition {
test = "StringEquals"
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
values = ["true"]
}
}
}