Files
terraform-aws-eks/examples/self_managed_node_group/main.tf
Robbie Blaine 07be37dc7a chore: Fix typo in nodeadm examples (#2986)
* Fix `nodeadm` example typo

* `apiVersion: node.eks.aws/v1alpha` -> `apiVersion: node.eks.aws/v1alpha1`

* Revert whitespace changes

* `tofu apply` in `examples/user_data`

* chore: Fix file extension type for MIME multipart user data

* chore: Fix line endings based off this https://github.com/hashicorp/terraform/issues/32910

---------

Co-authored-by: Bryant Biggs <bryantbiggs@gmail.com>
2024-03-25 17:01:59 -04:00

432 lines
11 KiB
HCL

provider "aws" {
region = local.region
}
data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.29"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
# EKS Module
################################################################################
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
enable_cluster_creator_admin_permissions = true
# Enable EFA support by adding necessary security group rules
# to the shared node security group
enable_efa_support = true
cluster_addons = {
coredns = {
most_recent = true
}
kube-proxy = {
most_recent = true
}
vpc-cni = {
most_recent = true
}
}
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
# External encryption key
create_kms_key = false
cluster_encryption_config = {
resources = ["secrets"]
provider_key_arn = module.kms.key_arn
}
self_managed_node_group_defaults = {
# enable discovery of autoscaling groups by cluster-autoscaler
autoscaling_group_tags = {
"k8s.io/cluster-autoscaler/enabled" : true,
"k8s.io/cluster-autoscaler/${local.name}" : "owned",
}
}
self_managed_node_groups = {
# Default node group - as provisioned by the module defaults
default_node_group = {}
# AL2023 node group utilizing new user data format which utilizes nodeadm
# to join nodes to the cluster (instead of /etc/eks/bootstrap.sh)
al2023_nodeadm = {
platform = "al2023"
cloudinit_pre_nodeadm = [
{
content_type = "application/node.eks.aws"
content = <<-EOT
---
apiVersion: node.eks.aws/v1alpha1
kind: NodeConfig
spec:
kubelet:
config:
shutdownGracePeriod: 30s
featureGates:
DisableKubeletCloudCredentialProviders: true
EOT
}
]
}
# Bottlerocket node group
bottlerocket = {
name = "bottlerocket-self-mng"
platform = "bottlerocket"
ami_id = data.aws_ami.eks_default_bottlerocket.id
instance_type = "m5.large"
desired_size = 2
key_name = module.key_pair.key_pair_name
bootstrap_extra_args = <<-EOT
# The admin host container provides SSH access and runs with "superpowers".
# It is disabled by default, but can be disabled explicitly.
[settings.host-containers.admin]
enabled = false
# The control host container provides out-of-band access via SSM.
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
enabled = true
# extra args added
[settings.kernel]
lockdown = "integrity"
[settings.kubernetes.node-labels]
label1 = "foo"
label2 = "bar"
[settings.kubernetes.node-taints]
dedicated = "experimental:PreferNoSchedule"
special = "true:NoSchedule"
EOT
}
mixed = {
name = "mixed"
min_size = 1
max_size = 5
desired_size = 2
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
on_demand_base_capacity = 0
on_demand_percentage_above_base_capacity = 20
spot_allocation_strategy = "capacity-optimized"
}
override = [
{
instance_type = "m5.large"
weighted_capacity = "1"
},
{
instance_type = "m6i.large"
weighted_capacity = "2"
},
]
}
}
# Complete
complete = {
name = "complete-self-mng"
use_name_prefix = false
subnet_ids = module.vpc.public_subnets
min_size = 1
max_size = 7
desired_size = 1
ami_id = data.aws_ami.eks_default.id
pre_bootstrap_user_data = <<-EOT
export FOO=bar
EOT
post_bootstrap_user_data = <<-EOT
echo "you are free little kubelet!"
EOT
instance_type = "m6i.large"
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
launch_template_description = "Self managed node group example launch template"
ebs_optimized = true
enable_monitoring = true
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 75
volume_type = "gp3"
iops = 3000
throughput = 150
encrypted = true
kms_key_id = module.ebs_kms_key.key_arn
delete_on_termination = true
}
}
}
instance_attributes = {
name = "instance-attributes"
min_size = 1
max_size = 2
desired_size = 1
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
instance_type = null
# launch template configuration
instance_requirements = {
cpu_manufacturers = ["intel"]
instance_generations = ["current", "previous"]
spot_max_price_percentage_over_lowest_price = 100
vcpu_count = {
min = 1
}
allowed_instance_types = ["t*", "m*"]
}
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
on_demand_base_capacity = 0
on_demand_percentage_above_base_capacity = 0
on_demand_allocation_strategy = "lowest-price"
spot_allocation_strategy = "price-capacity-optimized"
}
# ASG configuration
override = [
{
instance_requirements = {
cpu_manufacturers = ["intel"]
instance_generations = ["current", "previous"]
spot_max_price_percentage_over_lowest_price = 100
vcpu_count = {
min = 1
}
allowed_instance_types = ["t*", "m*"]
}
}
]
}
}
metadata_options = {
http_endpoint = "enabled"
http_tokens = "required"
http_put_response_hop_limit = 2
instance_metadata_tags = "disabled"
}
create_iam_role = true
iam_role_name = "self-managed-node-group-complete-example"
iam_role_use_name_prefix = false
iam_role_description = "Self managed node group complete example role"
iam_role_tags = {
Purpose = "Protector of the kubelet"
}
iam_role_additional_policies = {
AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
additional = aws_iam_policy.additional.arn
}
tags = {
ExtraTag = "Self managed node group complete example"
}
}
efa = {
# Disabling automatic creation due to instance type/quota availability
# Can be enabled when appropriate for testing/validation
create = false
instance_type = "trn1n.32xlarge"
enable_efa_support = true
pre_bootstrap_user_data = <<-EOT
# Mount NVME instance store volumes since they are typically
# available on instances that support EFA
setup-local-disks raid0
EOT
min_size = 2
max_size = 2
desired_size = 2
}
}
tags = local.tags
}
module "disabled_self_managed_node_group" {
source = "../../modules/self-managed-node-group"
create = false
# Hard requirement
cluster_service_cidr = ""
}
################################################################################
# Supporting Resources
################################################################################
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 5.0"
name = local.name
cidr = local.vpc_cidr
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
data "aws_ami" "eks_default" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amazon-eks-node-${local.cluster_version}-v*"]
}
}
data "aws_ami" "eks_default_bottlerocket" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
}
}
module "key_pair" {
source = "terraform-aws-modules/key-pair/aws"
version = "~> 2.0"
key_name_prefix = local.name
create_private_key = true
tags = local.tags
}
module "ebs_kms_key" {
source = "terraform-aws-modules/kms/aws"
version = "~> 2.0"
description = "Customer managed key to encrypt EKS managed node group volumes"
# Policy
key_administrators = [
data.aws_caller_identity.current.arn
]
key_service_roles_for_autoscaling = [
# required for the ASG to manage encrypted volumes for nodes
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
# required for the cluster / persistentvolume-controller to create encrypted PVCs
module.eks.cluster_iam_role_arn,
]
# Aliases
aliases = ["eks/${local.name}/ebs"]
tags = local.tags
}
module "kms" {
source = "terraform-aws-modules/kms/aws"
version = "~> 2.1"
aliases = ["eks/${local.name}"]
description = "${local.name} cluster encryption key"
enable_default_policy = true
key_owners = [data.aws_caller_identity.current.arn]
tags = local.tags
}
resource "aws_iam_policy" "additional" {
name = "${local.name}-additional"
description = "Example usage of node additional policy"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"ec2:Describe*",
]
Effect = "Allow"
Resource = "*"
},
]
})
tags = local.tags
}