Files
terraform-aws-eks/examples/self_managed_node_group/main.tf
2023-10-06 09:20:13 -04:00

408 lines
11 KiB
HCL

provider "aws" {
region = local.region
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
}
}
data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.27"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
# EKS Module
################################################################################
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
most_recent = true
}
kube-proxy = {
most_recent = true
}
vpc-cni = {
most_recent = true
}
}
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
# Self managed node groups will not automatically create the aws-auth configmap so we need to
create_aws_auth_configmap = true
manage_aws_auth_configmap = true
self_managed_node_group_defaults = {
# enable discovery of autoscaling groups by cluster-autoscaler
autoscaling_group_tags = {
"k8s.io/cluster-autoscaler/enabled" : true,
"k8s.io/cluster-autoscaler/${local.name}" : "owned",
}
}
self_managed_node_groups = {
# Default node group - as provisioned by the module defaults
default_node_group = {}
# Bottlerocket node group
bottlerocket = {
name = "bottlerocket-self-mng"
platform = "bottlerocket"
ami_id = data.aws_ami.eks_default_bottlerocket.id
instance_type = "m5.large"
desired_size = 2
key_name = module.key_pair.key_pair_name
bootstrap_extra_args = <<-EOT
# The admin host container provides SSH access and runs with "superpowers".
# It is disabled by default, but can be disabled explicitly.
[settings.host-containers.admin]
enabled = false
# The control host container provides out-of-band access via SSM.
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
enabled = true
# extra args added
[settings.kernel]
lockdown = "integrity"
[settings.kubernetes.node-labels]
label1 = "foo"
label2 = "bar"
[settings.kubernetes.node-taints]
dedicated = "experimental:PreferNoSchedule"
special = "true:NoSchedule"
EOT
}
mixed = {
name = "mixed"
min_size = 1
max_size = 5
desired_size = 2
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
on_demand_base_capacity = 0
on_demand_percentage_above_base_capacity = 20
spot_allocation_strategy = "capacity-optimized"
}
override = [
{
instance_type = "m5.large"
weighted_capacity = "1"
},
{
instance_type = "m6i.large"
weighted_capacity = "2"
},
]
}
}
efa = {
min_size = 1
max_size = 2
desired_size = 1
# aws ec2 describe-instance-types --region eu-west-1 --filters Name=network-info.efa-supported,Values=true --query "InstanceTypes[*].[InstanceType]" --output text | sort
instance_type = "c5n.9xlarge"
post_bootstrap_user_data = <<-EOT
# Install EFA
curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz
tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer
./efa_installer.sh -y --minimal
fi_info -p efa -t FI_EP_RDM
# Disable ptrace
sysctl -w kernel.yama.ptrace_scope=0
EOT
network_interfaces = [
{
description = "EFA interface example"
delete_on_termination = true
device_index = 0
associate_public_ip_address = false
interface_type = "efa"
}
]
}
# Complete
complete = {
name = "complete-self-mng"
use_name_prefix = false
subnet_ids = module.vpc.public_subnets
min_size = 1
max_size = 7
desired_size = 1
ami_id = data.aws_ami.eks_default.id
pre_bootstrap_user_data = <<-EOT
export FOO=bar
EOT
post_bootstrap_user_data = <<-EOT
echo "you are free little kubelet!"
EOT
instance_type = "m6i.large"
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
launch_template_description = "Self managed node group example launch template"
ebs_optimized = true
enable_monitoring = true
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 75
volume_type = "gp3"
iops = 3000
throughput = 150
encrypted = true
kms_key_id = module.ebs_kms_key.key_arn
delete_on_termination = true
}
}
}
instance_attributes = {
name = "instance-attributes"
min_size = 1
max_size = 2
desired_size = 1
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
instance_type = null
# launch template configuration
instance_requirements = {
cpu_manufacturers = ["intel"]
instance_generations = ["current", "previous"]
spot_max_price_percentage_over_lowest_price = 100
vcpu_count = {
min = 1
}
allowed_instance_types = ["t*", "m*"]
}
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
on_demand_base_capacity = 0
on_demand_percentage_above_base_capacity = 0
on_demand_allocation_strategy = "lowest-price"
spot_allocation_strategy = "price-capacity-optimized"
}
# ASG configuration
override = [
{
instance_requirements = {
cpu_manufacturers = ["intel"]
instance_generations = ["current", "previous"]
spot_max_price_percentage_over_lowest_price = 100
vcpu_count = {
min = 1
}
allowed_instance_types = ["t*", "m*"]
}
}
]
}
}
metadata_options = {
http_endpoint = "enabled"
http_tokens = "required"
http_put_response_hop_limit = 2
instance_metadata_tags = "disabled"
}
create_iam_role = true
iam_role_name = "self-managed-node-group-complete-example"
iam_role_use_name_prefix = false
iam_role_description = "Self managed node group complete example role"
iam_role_tags = {
Purpose = "Protector of the kubelet"
}
iam_role_additional_policies = {
AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
additional = aws_iam_policy.additional.arn
}
timeouts = {
create = "80m"
update = "80m"
delete = "80m"
}
tags = {
ExtraTag = "Self managed node group complete example"
}
}
}
tags = local.tags
}
################################################################################
# Supporting Resources
################################################################################
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 4.0"
name = local.name
cidr = local.vpc_cidr
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
data "aws_ami" "eks_default" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amazon-eks-node-${local.cluster_version}-v*"]
}
}
data "aws_ami" "eks_default_bottlerocket" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
}
}
module "key_pair" {
source = "terraform-aws-modules/key-pair/aws"
version = "~> 2.0"
key_name_prefix = local.name
create_private_key = true
tags = local.tags
}
module "ebs_kms_key" {
source = "terraform-aws-modules/kms/aws"
version = "~> 1.5"
description = "Customer managed key to encrypt EKS managed node group volumes"
# Policy
key_administrators = [
data.aws_caller_identity.current.arn
]
key_service_roles_for_autoscaling = [
# required for the ASG to manage encrypted volumes for nodes
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
# required for the cluster / persistentvolume-controller to create encrypted PVCs
module.eks.cluster_iam_role_arn,
]
# Aliases
aliases = ["eks/${local.name}/ebs"]
tags = local.tags
}
resource "aws_iam_policy" "additional" {
name = "${local.name}-additional"
description = "Example usage of node additional policy"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"ec2:Describe*",
]
Effect = "Allow"
Resource = "*"
},
]
})
tags = local.tags
}