mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-02-25 03:54:52 +01:00
* Disable management of autoscaling IAM policy by default * remove completely * update changelog notes * fix pre-commit stuff * misc updates * fmt * fix changelog * Removal of tags and update docs * Change after updating terraform-docs * Add second tag
92 lines
3.0 KiB
Markdown
92 lines
3.0 KiB
Markdown
# Autoscaling
|
|
|
|
To enable worker node autoscaling you will need to do a few things:
|
|
|
|
- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
|
|
- Install the cluster-autoscaler
|
|
- Give the cluster-autoscaler access via an IAM policy
|
|
|
|
It's probably easiest to follow the example in [examples/irsa](../examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
|
|
|
|
If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
|
|
|
|
```hcl
|
|
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
|
|
policy_arn = aws_iam_policy.worker_autoscaling.arn
|
|
role = module.my_cluster.worker_iam_role_name[0]
|
|
}
|
|
|
|
resource "aws_iam_policy" "worker_autoscaling" {
|
|
name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
|
|
description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
|
|
policy = data.aws_iam_policy_document.worker_autoscaling.json
|
|
path = var.iam_path
|
|
}
|
|
|
|
data "aws_iam_policy_document" "worker_autoscaling" {
|
|
statement {
|
|
sid = "eksWorkerAutoscalingAll"
|
|
effect = "Allow"
|
|
|
|
actions = [
|
|
"autoscaling:DescribeAutoScalingGroups",
|
|
"autoscaling:DescribeAutoScalingInstances",
|
|
"autoscaling:DescribeLaunchConfigurations",
|
|
"autoscaling:DescribeTags",
|
|
"ec2:DescribeLaunchTemplateVersions",
|
|
]
|
|
|
|
resources = ["*"]
|
|
}
|
|
|
|
statement {
|
|
sid = "eksWorkerAutoscalingOwn"
|
|
effect = "Allow"
|
|
|
|
actions = [
|
|
"autoscaling:SetDesiredCapacity",
|
|
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
|
"autoscaling:UpdateAutoScalingGroup",
|
|
]
|
|
|
|
resources = ["*"]
|
|
|
|
condition {
|
|
test = "StringEquals"
|
|
variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
|
|
values = ["owned"]
|
|
}
|
|
|
|
condition {
|
|
test = "StringEquals"
|
|
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
|
|
values = ["true"]
|
|
}
|
|
}
|
|
}
|
|
```
|
|
|
|
And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
|
|
|
|
```yaml
|
|
rbac:
|
|
create: true
|
|
|
|
cloudProvider: aws
|
|
awsRegion: YOUR_AWS_REGION
|
|
|
|
autoDiscovery:
|
|
clusterName: YOUR_CLUSTER_NAME
|
|
enabled: true
|
|
```
|
|
|
|
To install the chart, simply run helm with the `--values` option:
|
|
|
|
```
|
|
helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
|
|
```
|
|
|
|
## Notes
|
|
|
|
There is a variable `asg_desired_capacity` given in the `local.tf` file, currently it can be used to change the desired worker(s) capacity in the autoscaling group but currently it is being ignored in terraform to reduce the [complexities](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/510#issuecomment-531700442) and the feature of scaling up and down the cluster nodes is being handled by the cluster autoscaler.
|