mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-01-16 00:34:31 +01:00
Support for EKS Managed Node Groups (#602)
* Finished first cut of managed node groups * Updated formatting and extra fields. * Updating Changelog and README * Fixing formatting * Fixing docs. * Updating required Version * Updating changelog * Adding example for managed node groups * Managed IAM Roles for Nodegroups now have correct policies. Tags can now be added to node groups. * Fixing bug where people could set source_security_group_ids without setting ssh key causing a race condition within the aws provider. * Adding lifecycle create_before_destroy * Adding random pet names for create_before_destroy * Updating per comments. * Updating required versions of terraform * Updating per comments. * Updating vars * Updating minimum version for terraform * Change worker_groups_managed_node_groups to node_groups * Using for_each on the random_pet * Adding changes recommended by @eytanhanig * Update node_groups.tf
This commit is contained in:
committed by
Max Williams
parent
614d815c3c
commit
cf3dcc5c58
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -56,7 +56,7 @@ jobs:
|
||||
name: Minimum version check
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: hashicorp/terraform:0.12.2
|
||||
image: hashicorp/terraform:0.12.6
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Validate Code
|
||||
|
||||
@@ -10,12 +10,14 @@ project adheres to [Semantic Versioning](http://semver.org/).
|
||||
## [[v7.?.?](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v7.0.0...HEAD)] - 2019-??-??]
|
||||
|
||||
- Test against minimum versions specified in `versions.tf` (by @dpiddockcmp)
|
||||
- Support for AWS EKS Managed Node Groups. (by @wmorgan6796)
|
||||
- Updated instance_profile_names and instance_profile_arns outputs to also consider launch template as well as asg (by @ankitwal)
|
||||
- **Breaking:** Configure the aws-auth configmap using the terraform kubernetes providers. Read the [docs](docs/upgrading-to-aws-auth-kubernetes-provider.md) for more info (by @sdehaes)
|
||||
- Updated application of `aws-auth` configmap to create `kube_config.yaml` and `aws_auth_configmap.yaml` in sequence (and not parallel) to `kubectl apply` (by @knittingdev)
|
||||
- Exit with error code when `aws-auth` configmap is unable to be updated (by @knittingdev)
|
||||
- Fix deprecated interpolation-only expression (by @angelabad)
|
||||
- Fix broken terraform plan/apply on a cluster < 1.14 (by @hodduc)
|
||||
- Updated required version of AWS Provider to >= v2.38.0 for Managed Node Groups (by @wmorgan6796)
|
||||
|
||||
#### Important notes
|
||||
|
||||
|
||||
@@ -145,6 +145,7 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
|
||||
| map\_accounts | Additional AWS account numbers to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | list(string) | `[]` | no |
|
||||
| map\_roles | Additional IAM roles to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | object | `[]` | no |
|
||||
| map\_users | Additional IAM users to add to the aws-auth configmap. See examples/basic/variables.tf for example format. | object | `[]` | no |
|
||||
| node\_groups | A list of maps defining node group configurations to be defined using AWS EKS Managed Node Groups. See workers_group_defaults for valid keys. | any | `[]` | no |
|
||||
| permissions\_boundary | If provided, all IAM roles will be created with this permissions boundary attached. | string | `"null"` | no |
|
||||
| subnets | A list of subnets to place the EKS cluster and workers within. | list(string) | n/a | yes |
|
||||
| tags | A map of tags to add to all resources. | map(string) | `{}` | no |
|
||||
|
||||
104
examples/managed_node_groups/main.tf
Normal file
104
examples/managed_node_groups/main.tf
Normal file
@@ -0,0 +1,104 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.6"
|
||||
}
|
||||
|
||||
provider "aws" {
|
||||
version = ">= 2.28.1"
|
||||
region = var.region
|
||||
}
|
||||
|
||||
provider "random" {
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
provider "local" {
|
||||
version = "~> 1.2"
|
||||
}
|
||||
|
||||
provider "null" {
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
provider "template" {
|
||||
version = "~> 2.1"
|
||||
}
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
locals {
|
||||
cluster_name = "test-eks-${random_string.suffix.result}"
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 2.6"
|
||||
|
||||
name = "test-vpc"
|
||||
cidr = "172.16.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
|
||||
public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
tags = {
|
||||
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
|
||||
}
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
}
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
cluster_name = local.cluster_name
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
tags = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
node_groups = [
|
||||
{
|
||||
name = "example"
|
||||
|
||||
node_group_desired_capacity = 1
|
||||
node_group_max_capacity = 10
|
||||
node_group_min_capacity = 1
|
||||
|
||||
instance_type = "m5.large"
|
||||
node_group_k8s_labels = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
node_group_additional_tags = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
map_roles = var.map_roles
|
||||
map_users = var.map_users
|
||||
map_accounts = var.map_accounts
|
||||
}
|
||||
25
examples/managed_node_groups/outputs.tf
Normal file
25
examples/managed_node_groups/outputs.tf
Normal file
@@ -0,0 +1,25 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "region" {
|
||||
description = "AWS region."
|
||||
value = var.region
|
||||
}
|
||||
|
||||
52
examples/managed_node_groups/variables.tf
Normal file
52
examples/managed_node_groups/variables.tf
Normal file
@@ -0,0 +1,52 @@
|
||||
variable "region" {
|
||||
default = "us-west-2"
|
||||
}
|
||||
|
||||
variable "map_accounts" {
|
||||
description = "Additional AWS account numbers to add to the aws-auth configmap."
|
||||
type = list(string)
|
||||
|
||||
default = [
|
||||
"777777777777",
|
||||
"888888888888",
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_roles" {
|
||||
description = "Additional IAM roles to add to the aws-auth configmap."
|
||||
type = list(object({
|
||||
rolearn = string
|
||||
username = string
|
||||
groups = list(string)
|
||||
}))
|
||||
|
||||
default = [
|
||||
{
|
||||
rolearn = "arn:aws:iam::66666666666:role/role1"
|
||||
username = "role1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_users" {
|
||||
description = "Additional IAM users to add to the aws-auth configmap."
|
||||
type = list(object({
|
||||
userarn = string
|
||||
username = string
|
||||
groups = list(string)
|
||||
}))
|
||||
|
||||
default = [
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user1"
|
||||
username = "user1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user2"
|
||||
username = "user2"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
}
|
||||
17
local.tf
17
local.tf
@@ -16,8 +16,9 @@ locals {
|
||||
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
|
||||
kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name
|
||||
|
||||
worker_group_count = length(var.worker_groups)
|
||||
worker_group_launch_template_count = length(var.worker_groups_launch_template)
|
||||
worker_group_count = length(var.worker_groups)
|
||||
worker_group_launch_template_count = length(var.worker_groups_launch_template)
|
||||
worker_group_managed_node_group_count = length(var.node_groups)
|
||||
|
||||
default_ami_id_linux = data.aws_ami.eks_worker.id
|
||||
default_ami_id_windows = data.aws_ami.eks_worker_windows.id
|
||||
@@ -79,6 +80,15 @@ locals {
|
||||
spot_allocation_strategy = "lowest-price" # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
|
||||
spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
|
||||
spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
|
||||
ami_type = "AL2_x86_64" # AMI Type to use for the Managed Node Groups. Can be either: AL2_x86_64 or AL2_x86_64_GPU
|
||||
ami_release_version = "" # AMI Release Version of the Managed Node Groups
|
||||
source_security_group_id = [] # Source Security Group IDs to allow SSH Access to the Nodes. NOTE: IF LEFT BLANK, AND A KEY IS SPECIFIED, THE SSH PORT WILL BE OPENNED TO THE WORLD
|
||||
node_group_k8s_labels = {} # Kubernetes Labels to apply to the nodes within the Managed Node Group
|
||||
node_group_desired_capacity = 1 # Desired capacity of the Node Group
|
||||
node_group_min_capacity = 1 # Min capacity of the Node Group (Minimum value allowed is 1)
|
||||
node_group_max_capacity = 3 # Max capacity of the Node Group
|
||||
node_group_iam_role_arn = "" # IAM role to use for Managed Node Groups instead of default one created by the automation
|
||||
node_group_additional_tags = {} # Additional tags to be applied to the Node Groups
|
||||
}
|
||||
|
||||
workers_group_defaults = merge(
|
||||
@@ -123,4 +133,7 @@ locals {
|
||||
"t2.small",
|
||||
"t2.xlarge"
|
||||
]
|
||||
|
||||
node_groups = { for node_group in var.node_groups : node_group["name"] => node_group }
|
||||
|
||||
}
|
||||
|
||||
103
node_groups.tf
Normal file
103
node_groups.tf
Normal file
@@ -0,0 +1,103 @@
|
||||
resource "aws_iam_role" "node_groups" {
|
||||
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
|
||||
name = "${var.workers_role_name != "" ? var.workers_role_name : aws_eks_cluster.this.name}-managed-node-groups"
|
||||
assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json
|
||||
permissions_boundary = var.permissions_boundary
|
||||
path = var.iam_path
|
||||
force_detach_policies = true
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "node_groups_AmazonEKSWorkerNodePolicy" {
|
||||
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
|
||||
role = aws_iam_role.node_groups[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "node_groups_AmazonEKS_CNI_Policy" {
|
||||
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
|
||||
role = aws_iam_role.node_groups[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "node_groups_AmazonEC2ContainerRegistryReadOnly" {
|
||||
count = local.worker_group_managed_node_group_count > 0 ? 1 : 0
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
role = aws_iam_role.node_groups[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "node_groups_additional_policies" {
|
||||
for_each = toset(var.workers_additional_policies)
|
||||
|
||||
role = aws_iam_role.node_groups[0].name
|
||||
policy_arn = each.key
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "node_groups_autoscaling" {
|
||||
count = var.manage_worker_autoscaling_policy && var.attach_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0
|
||||
policy_arn = aws_iam_policy.node_groups_autoscaling[0].arn
|
||||
role = aws_iam_role.node_groups[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "node_groups_autoscaling" {
|
||||
count = var.manage_worker_autoscaling_policy && local.worker_group_managed_node_group_count > 0 ? 1 : 0
|
||||
name_prefix = "eks-worker-autoscaling-${aws_eks_cluster.this.name}"
|
||||
description = "EKS worker node autoscaling policy for cluster ${aws_eks_cluster.this.name}"
|
||||
policy = data.aws_iam_policy_document.worker_autoscaling.json
|
||||
path = var.iam_path
|
||||
}
|
||||
|
||||
resource "random_pet" "node_groups" {
|
||||
for_each = local.node_groups
|
||||
|
||||
separator = "-"
|
||||
length = 2
|
||||
|
||||
keepers = {
|
||||
instance_type = lookup(each.value, "instance_type", local.workers_group_defaults["instance_type"])
|
||||
|
||||
ec2_ssh_key = lookup(each.value, "key_name", local.workers_group_defaults["key_name"])
|
||||
|
||||
source_security_group_ids = join("-", compact(
|
||||
lookup(each.value, "source_security_group_ids", local.workers_group_defaults["source_security_group_id"]
|
||||
)))
|
||||
|
||||
node_group_name = join("-", [var.cluster_name, each.value["name"]])
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_eks_node_group" "workers" {
|
||||
for_each = local.node_groups
|
||||
|
||||
node_group_name = join("-", [var.cluster_name, each.key, random_pet.node_groups[each.key].id])
|
||||
|
||||
cluster_name = var.cluster_name
|
||||
node_role_arn = lookup(each.value, "iam_role_arn", aws_iam_role.node_groups[0].arn)
|
||||
subnet_ids = lookup(each.value, "subnets", local.workers_group_defaults["subnets"])
|
||||
|
||||
scaling_config {
|
||||
desired_size = lookup(each.value, "node_group_desired_capacity", local.workers_group_defaults["asg_desired_capacity"])
|
||||
max_size = lookup(each.value, "node_group_max_capacity", local.workers_group_defaults["asg_max_size"])
|
||||
min_size = lookup(each.value, "node_group_min_capacity", local.workers_group_defaults["asg_min_size"])
|
||||
}
|
||||
|
||||
ami_type = lookup(each.value, "ami_type", null)
|
||||
disk_size = lookup(each.value, "root_volume_size", null)
|
||||
instance_types = [lookup(each.value, "instance_type", null)]
|
||||
labels = lookup(each.value, "node_group_k8s_labels", null)
|
||||
release_version = lookup(each.value, "ami_release_version", null)
|
||||
|
||||
# This sometimes breaks idempotency as described in https://github.com/terraform-providers/terraform-provider-aws/issues/11063
|
||||
remote_access {
|
||||
ec2_ssh_key = lookup(each.value, "key_name", "") != "" ? each.value["key_name"] : null
|
||||
source_security_group_ids = lookup(each.value, "key_name", "") != "" ? lookup(each.value, "source_security_group_ids", []) : null
|
||||
}
|
||||
|
||||
version = aws_eks_cluster.this.version
|
||||
|
||||
tags = lookup(each.value, "node_group_additional_tags", null)
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
@@ -293,3 +293,9 @@ variable "attach_worker_cni_policy" {
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "node_groups" {
|
||||
description = "A list of maps defining node group configurations to be defined using AWS EKS Managed Node Groups. See workers_group_defaults for valid keys."
|
||||
type = any
|
||||
default = []
|
||||
}
|
||||
@@ -1,8 +1,8 @@
|
||||
terraform {
|
||||
required_version = ">= 0.12.2"
|
||||
required_version = ">= 0.12.6"
|
||||
|
||||
required_providers {
|
||||
aws = ">= 2.31.0"
|
||||
aws = ">= 2.38.0"
|
||||
local = ">= 1.2"
|
||||
null = ">= 2.1"
|
||||
template = ">= 2.1"
|
||||
|
||||
Reference in New Issue
Block a user