mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-03-13 05:45:12 +01:00
Merge pull request #93 from max-rocket-internet/autoscaling_policies
Adding autoscaling setting, policy and documentation
This commit is contained in:
@@ -12,6 +12,7 @@ project adheres to [Semantic Versioning](http://semver.org/).
|
||||
- add spot_price option to aws_launch_configuration
|
||||
- add enable_monitoring option to aws_launch_configuration
|
||||
- add t3 instance class settings
|
||||
- Added autoscaling policies into module that are optionally attached when enabled for a worker group. (by @max-rocket-internet)
|
||||
|
||||
### Changed
|
||||
|
||||
|
||||
@@ -31,6 +31,10 @@ module "eks" {
|
||||
}
|
||||
```
|
||||
|
||||
## Other documentation
|
||||
|
||||
- [Autoscaling](docs/autoscaling.md): How to enabled worker node autoscaling.
|
||||
|
||||
## Release schedule
|
||||
|
||||
Generally the maintainers will try to release the module once every 2 weeks to
|
||||
|
||||
25
docs/autoscaling.md
Normal file
25
docs/autoscaling.md
Normal file
@@ -0,0 +1,25 @@
|
||||
# Autoscaling
|
||||
|
||||
Autoscaling of worker nodes can be easily enabled by setting the `autoscaling_enabled` variable to `true` for a worker group in the `worker_groups` map.
|
||||
This will add the required tags to the autoscaling group for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler).
|
||||
|
||||
You will also need to install the cluster-autoscaler into your cluster. The easiest way to do this is with [helm](https://helm.sh/).
|
||||
|
||||
The [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler) for the cluster-autoscaler requires some specific settings to work in an EKS cluster. These settings are supplied via YAML values file when installing the helm chart. Here is an example values file:
|
||||
|
||||
```yaml
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
sslCertPath: /etc/ssl/certs/ca-bundle.crt
|
||||
|
||||
autoDiscovery:
|
||||
clusterName: YOUR_CLUSTER_NAME
|
||||
enabled: true
|
||||
```
|
||||
|
||||
To install the chart, simply run helm with the `--values` option:
|
||||
|
||||
```
|
||||
helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
|
||||
```
|
||||
4
main.tf
4
main.tf
@@ -32,6 +32,10 @@
|
||||
* }
|
||||
* ```
|
||||
|
||||
* ## Other documentation
|
||||
*
|
||||
* - [Autoscaling](docs/autoscaling.md): How to enabled worker node autoscaling.
|
||||
|
||||
* ## Release schedule
|
||||
|
||||
* Generally the maintainers will try to release the module once every 2 weeks to
|
||||
|
||||
@@ -98,6 +98,7 @@ variable "workers_group_defaults" {
|
||||
public_ip = false # Associate a public ip address with a worker
|
||||
kubelet_node_labels = "" # This string is passed directly to kubelet via --node-labels= if set. It should be comma delimited with no spaces. If left empty no --node-labels switch is added.
|
||||
subnets = "" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789
|
||||
autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
54
workers.tf
54
workers.tf
@@ -11,6 +11,7 @@ resource "aws_autoscaling_group" "workers" {
|
||||
list(
|
||||
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
|
||||
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
|
||||
map("key", "k8s.io/cluster-autoscaler/${lookup(var.worker_groups[count.index], "autoscaling_enabled", count.index) == 1 ? "enabled" : "disabled" }", "value", "true", "propagate_at_launch", false),
|
||||
),
|
||||
local.asg_tags)
|
||||
}"]
|
||||
@@ -122,3 +123,56 @@ resource "null_resource" "tags_as_list_of_maps" {
|
||||
"propagate_at_launch", "true"
|
||||
)}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
|
||||
policy_arn = "${aws_iam_policy.worker_autoscaling.arn}"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "worker_autoscaling" {
|
||||
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}"
|
||||
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}"
|
||||
policy = "${data.aws_iam_policy_document.worker_autoscaling.json}"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "worker_autoscaling" {
|
||||
statement {
|
||||
sid = "eksWorkerAutoscalingAll"
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
"autoscaling:GetAsgForInstance",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "eksWorkerAutoscalingOwn"
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
||||
"autoscaling:UpdateAutoScalingGroup",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${aws_eks_cluster.this.name}"
|
||||
values = ["owned"]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user