Files
terraform-aws-eks/examples/self_managed_node_group/main.tf

475 lines
14 KiB
HCL

provider "aws" {
region = local.region
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
}
}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.22"
region = "eu-west-1"
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
data "aws_caller_identity" "current" {}
################################################################################
# EKS Module
################################################################################
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
}
}
cluster_encryption_config = [{
provider_key_arn = aws_kms_key.eks.arn
resources = ["secrets"]
}]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
# Self managed node groups will not automatically create the aws-auth configmap so we need to
create_aws_auth_configmap = true
manage_aws_auth_configmap = true
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
description = "To node 1025-65535"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "egress"
source_node_security_group = true
}
}
# Extend node-to-node security group rules
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
egress_all = {
description = "Node all egress"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
self_managed_node_group_defaults = {
create_security_group = false
# enable discovery of autoscaling groups by cluster-autoscaler
autoscaling_group_tags = {
"k8s.io/cluster-autoscaler/enabled" : true,
"k8s.io/cluster-autoscaler/${local.name}" : "owned",
}
}
self_managed_node_groups = {
# Default node group - as provisioned by the module defaults
default_node_group = {}
# Bottlerocket node group
bottlerocket = {
name = "bottlerocket-self-mng"
platform = "bottlerocket"
ami_id = data.aws_ami.eks_default_bottlerocket.id
instance_type = "m5.large"
desired_size = 2
key_name = aws_key_pair.this.key_name
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
bootstrap_extra_args = <<-EOT
# The admin host container provides SSH access and runs with "superpowers".
# It is disabled by default, but can be disabled explicitly.
[settings.host-containers.admin]
enabled = false
# The control host container provides out-of-band access via SSM.
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
enabled = true
[settings.kubernetes.node-labels]
ingress = "allowed"
EOT
}
mixed = {
name = "mixed"
min_size = 1
max_size = 5
desired_size = 2
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
use_mixed_instances_policy = true
mixed_instances_policy = {
instances_distribution = {
on_demand_base_capacity = 0
on_demand_percentage_above_base_capacity = 20
spot_allocation_strategy = "capacity-optimized"
}
override = [
{
instance_type = "m5.large"
weighted_capacity = "1"
},
{
instance_type = "m6i.large"
weighted_capacity = "2"
},
]
}
}
efa = {
min_size = 1
max_size = 2
desired_size = 1
# aws ec2 describe-instance-types --region eu-west-1 --filters Name=network-info.efa-supported,Values=true --query "InstanceTypes[*].[InstanceType]" --output text | sort
instance_type = "c5n.9xlarge"
post_bootstrap_user_data = <<-EOT
# Install EFA
curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz
tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer
./efa_installer.sh -y --minimal
fi_info -p efa -t FI_EP_RDM
# Disable ptrace
sysctl -w kernel.yama.ptrace_scope=0
EOT
network_interfaces = [
{
description = "EFA interface example"
delete_on_termination = true
device_index = 0
associate_public_ip_address = false
interface_type = "efa"
}
]
}
# Complete
complete = {
name = "complete-self-mng"
use_name_prefix = false
subnet_ids = module.vpc.public_subnets
min_size = 1
max_size = 7
desired_size = 1
ami_id = data.aws_ami.eks_default.id
bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
pre_bootstrap_user_data = <<-EOT
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
EOT
post_bootstrap_user_data = <<-EOT
echo "you are free little kubelet!"
EOT
instance_type = "m6i.large"
launch_template_name = "self-managed-ex"
launch_template_use_name_prefix = true
launch_template_description = "Self managed node group example launch template"
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
enable_monitoring = true
block_device_mappings = {
xvda = {
device_name = "/dev/xvda"
ebs = {
volume_size = 75
volume_type = "gp3"
iops = 3000
throughput = 150
encrypted = true
kms_key_id = aws_kms_key.ebs.arn
delete_on_termination = true
}
}
}
metadata_options = {
http_endpoint = "enabled"
http_tokens = "required"
http_put_response_hop_limit = 2
instance_metadata_tags = "disabled"
}
capacity_reservation_specification = {
capacity_reservation_target = {
capacity_reservation_id = aws_ec2_capacity_reservation.targeted.id
}
}
create_iam_role = true
iam_role_name = "self-managed-node-group-complete-example"
iam_role_use_name_prefix = false
iam_role_description = "Self managed node group complete example role"
iam_role_tags = {
Purpose = "Protector of the kubelet"
}
iam_role_additional_policies = [
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
]
create_security_group = true
security_group_name = "self-managed-node-group-complete-example"
security_group_use_name_prefix = false
security_group_description = "Self managed node group complete example security group"
security_group_rules = {
phoneOut = {
description = "Hello CloudFlare"
protocol = "udp"
from_port = 53
to_port = 53
type = "egress"
cidr_blocks = ["1.1.1.1/32"]
}
phoneHome = {
description = "Hello cluster"
protocol = "udp"
from_port = 53
to_port = 53
type = "egress"
source_cluster_security_group = true # bit of reflection lookup
}
}
security_group_tags = {
Purpose = "Protector of the kubelet"
}
timeouts = {
create = "80m"
update = "80m"
delete = "80m"
}
tags = {
ExtraTag = "Self managed node group complete example"
}
}
}
tags = local.tags
}
################################################################################
# Supporting Resources
################################################################################
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_flow_log = true
create_flow_log_cloudwatch_iam_role = true
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
resource "aws_security_group" "additional" {
name_prefix = "${local.name}-additional"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
tags = local.tags
}
resource "aws_kms_key" "eks" {
description = "EKS Secret Encryption Key"
deletion_window_in_days = 7
enable_key_rotation = true
tags = local.tags
}
data "aws_ami" "eks_default" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["amazon-eks-node-${local.cluster_version}-v*"]
}
}
data "aws_ami" "eks_default_bottlerocket" {
most_recent = true
owners = ["amazon"]
filter {
name = "name"
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
}
}
resource "tls_private_key" "this" {
algorithm = "RSA"
}
resource "aws_key_pair" "this" {
key_name = local.name
public_key = tls_private_key.this.public_key_openssh
}
resource "aws_kms_key" "ebs" {
description = "Customer managed key to encrypt self managed node group volumes"
deletion_window_in_days = 7
policy = data.aws_iam_policy_document.ebs.json
}
resource "aws_ec2_capacity_reservation" "targeted" {
instance_type = "m6i.large"
instance_platform = "Linux/UNIX"
availability_zone = "${local.region}a"
instance_count = 1
instance_match_criteria = "targeted"
}
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
data "aws_iam_policy_document" "ebs" {
# Copy of default KMS policy that lets you manage it
statement {
sid = "Enable IAM User Permissions"
actions = ["kms:*"]
resources = ["*"]
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
}
# Required for EKS
statement {
sid = "Allow service-linked role use of the CMK"
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
]
resources = ["*"]
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
}
statement {
sid = "Allow attachment of persistent resources"
actions = ["kms:CreateGrant"]
resources = ["*"]
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
condition {
test = "Bool"
variable = "kms:GrantIsForAWSResource"
values = ["true"]
}
}
}