mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-01-15 08:14:12 +01:00
599 lines
17 KiB
HCL
599 lines
17 KiB
HCL
provider "aws" {
|
|
region = local.region
|
|
}
|
|
|
|
locals {
|
|
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
|
cluster_version = "1.21"
|
|
region = "eu-west-1"
|
|
|
|
tags = {
|
|
Example = local.name
|
|
GithubRepo = "terraform-aws-eks"
|
|
GithubOrg = "terraform-aws-modules"
|
|
}
|
|
}
|
|
|
|
data "aws_caller_identity" "current" {}
|
|
|
|
################################################################################
|
|
# EKS Module
|
|
################################################################################
|
|
|
|
module "eks" {
|
|
source = "../.."
|
|
|
|
cluster_name = local.name
|
|
cluster_version = local.cluster_version
|
|
cluster_endpoint_private_access = true
|
|
cluster_endpoint_public_access = true
|
|
|
|
# IPV6
|
|
cluster_ip_family = "ipv6"
|
|
create_cni_ipv6_iam_policy = true
|
|
|
|
cluster_addons = {
|
|
coredns = {
|
|
resolve_conflicts = "OVERWRITE"
|
|
}
|
|
kube-proxy = {}
|
|
vpc-cni = {
|
|
resolve_conflicts = "OVERWRITE"
|
|
}
|
|
}
|
|
|
|
cluster_encryption_config = [{
|
|
provider_key_arn = aws_kms_key.eks.arn
|
|
resources = ["secrets"]
|
|
}]
|
|
|
|
cluster_security_group_additional_rules = {
|
|
admin_access = {
|
|
description = "Admin ingress to Kubernetes API"
|
|
cidr_blocks = ["10.97.0.0/30"]
|
|
protocol = "tcp"
|
|
from_port = 443
|
|
to_port = 443
|
|
type = "ingress"
|
|
}
|
|
}
|
|
|
|
vpc_id = module.vpc.vpc_id
|
|
subnet_ids = module.vpc.private_subnets
|
|
|
|
enable_irsa = true
|
|
|
|
eks_managed_node_group_defaults = {
|
|
ami_type = "AL2_x86_64"
|
|
disk_size = 50
|
|
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
|
|
}
|
|
|
|
eks_managed_node_groups = {
|
|
# Default node group - as provided by AWS EKS
|
|
default_node_group = {
|
|
# By default, the module creates a launch template to ensure tags are propagated to instances, etc.,
|
|
# so we need to disable it to use the default template provided by the AWS EKS managed node group service
|
|
create_launch_template = false
|
|
launch_template_name = ""
|
|
|
|
# Remote access cannot be specified with a launch template
|
|
remote_access = {
|
|
ec2_ssh_key = aws_key_pair.this.key_name
|
|
source_security_group_ids = [aws_security_group.remote_access.id]
|
|
}
|
|
}
|
|
|
|
# Default node group - as provided by AWS EKS using Bottlerocket
|
|
bottlerocket_default = {
|
|
# By default, the module creates a launch template to ensure tags are propagated to instances, etc.,
|
|
# so we need to disable it to use the default template provided by the AWS EKS managed node group service
|
|
create_launch_template = false
|
|
launch_template_name = ""
|
|
|
|
ami_type = "BOTTLEROCKET_x86_64"
|
|
platform = "bottlerocket"
|
|
}
|
|
|
|
# Adds to the AWS provided user data
|
|
bottlerocket_add = {
|
|
ami_type = "BOTTLEROCKET_x86_64"
|
|
platform = "bottlerocket"
|
|
|
|
# this will get added to what AWS provides
|
|
bootstrap_extra_args = <<-EOT
|
|
# extra args added
|
|
[settings.kernel]
|
|
lockdown = "integrity"
|
|
EOT
|
|
}
|
|
|
|
# Custom AMI, using module provided bootstrap data
|
|
bottlerocket_custom = {
|
|
# Current bottlerocket AMI
|
|
ami_id = "ami-0ff61e0bcfc81dc94"
|
|
platform = "bottlerocket"
|
|
|
|
# use module user data template to boostrap
|
|
enable_bootstrap_user_data = true
|
|
# this will get added to the template
|
|
bootstrap_extra_args = <<-EOT
|
|
# extra args added
|
|
[settings.kernel]
|
|
lockdown = "integrity"
|
|
|
|
[settings.kubernetes.node-labels]
|
|
"label1" = "foo"
|
|
"label2" = "bar"
|
|
|
|
[settings.kubernetes.node-taints]
|
|
"dedicated" = "experimental:PreferNoSchedule"
|
|
"special" = "true:NoSchedule"
|
|
EOT
|
|
}
|
|
|
|
# Use existing/external launch template
|
|
external_lt = {
|
|
create_launch_template = false
|
|
launch_template_name = aws_launch_template.external.name
|
|
launch_template_version = aws_launch_template.external.default_version
|
|
}
|
|
|
|
# Use a custom AMI
|
|
custom_ami = {
|
|
ami_type = "AL2_ARM_64"
|
|
# Current default AMI used by managed node groups - pseudo "custom"
|
|
ami_id = "ami-01dc0aa438e3214c2" # ARM
|
|
|
|
# This will ensure the boostrap user data is used to join the node
|
|
# By default, EKS managed node groups will not append bootstrap script;
|
|
# this adds it back in using the default template provided by the module
|
|
# Note: this assumes the AMI provided is an EKS optimized AMI derivative
|
|
enable_bootstrap_user_data = true
|
|
|
|
instance_types = ["t4g.medium"]
|
|
}
|
|
|
|
# Complete
|
|
complete = {
|
|
name = "complete-eks-mng"
|
|
use_name_prefix = true
|
|
|
|
subnet_ids = module.vpc.private_subnets
|
|
|
|
min_size = 1
|
|
max_size = 7
|
|
desired_size = 1
|
|
|
|
ami_id = "ami-0caf35bc73450c396"
|
|
enable_bootstrap_user_data = true
|
|
bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
|
|
|
|
pre_bootstrap_user_data = <<-EOT
|
|
export CONTAINER_RUNTIME="containerd"
|
|
export USE_MAX_PODS=false
|
|
EOT
|
|
|
|
post_bootstrap_user_data = <<-EOT
|
|
echo "you are free little kubelet!"
|
|
EOT
|
|
|
|
capacity_type = "SPOT"
|
|
disk_size = 256
|
|
force_update_version = true
|
|
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large", "m3.large", "m4.large"]
|
|
labels = {
|
|
GithubRepo = "terraform-aws-eks"
|
|
GithubOrg = "terraform-aws-modules"
|
|
}
|
|
|
|
taints = [
|
|
{
|
|
key = "dedicated"
|
|
value = "gpuGroup"
|
|
effect = "NO_SCHEDULE"
|
|
}
|
|
]
|
|
|
|
update_config = {
|
|
max_unavailable_percentage = 50 # or set `max_unavailable`
|
|
}
|
|
|
|
description = "EKS managed node group example launch template"
|
|
|
|
ebs_optimized = true
|
|
vpc_security_group_ids = [aws_security_group.additional.id]
|
|
disable_api_termination = false
|
|
enable_monitoring = true
|
|
|
|
block_device_mappings = {
|
|
xvda = {
|
|
device_name = "/dev/xvda"
|
|
ebs = {
|
|
volume_size = 75
|
|
volume_type = "gp3"
|
|
iops = 3000
|
|
throughput = 150
|
|
encrypted = true
|
|
kms_key_id = aws_kms_key.ebs.arn
|
|
delete_on_termination = true
|
|
}
|
|
}
|
|
}
|
|
|
|
metadata_options = {
|
|
http_endpoint = "enabled"
|
|
http_tokens = "required"
|
|
http_put_response_hop_limit = 2
|
|
}
|
|
|
|
create_iam_role = true
|
|
iam_role_name = "eks-managed-node-group-complete-example"
|
|
iam_role_use_name_prefix = false
|
|
iam_role_description = "EKS managed node group complete example role"
|
|
iam_role_tags = {
|
|
Purpose = "Protector of the kubelet"
|
|
}
|
|
iam_role_additional_policies = [
|
|
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
|
]
|
|
|
|
create_security_group = true
|
|
security_group_name = "eks-managed-node-group-complete-example"
|
|
security_group_use_name_prefix = false
|
|
security_group_description = "EKS managed node group complete example security group"
|
|
security_group_rules = {
|
|
phoneOut = {
|
|
description = "Hello CloudFlare"
|
|
protocol = "udp"
|
|
from_port = 53
|
|
to_port = 53
|
|
type = "egress"
|
|
cidr_blocks = ["1.1.1.1/32"]
|
|
}
|
|
phoneHome = {
|
|
description = "Hello cluster"
|
|
protocol = "udp"
|
|
from_port = 53
|
|
to_port = 53
|
|
type = "egress"
|
|
source_cluster_security_group = true # bit of reflection lookup
|
|
}
|
|
}
|
|
security_group_tags = {
|
|
Purpose = "Protector of the kubelet"
|
|
}
|
|
|
|
tags = {
|
|
ExtraTag = "EKS managed node group complete example"
|
|
}
|
|
}
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
# References to resources that do not exist yet when creating a cluster will cause a plan failure due to https://github.com/hashicorp/terraform/issues/4149
|
|
# There are two options users can take
|
|
# 1. Create the dependent resources before the cluster => `terraform apply -target <your policy or your security group> and then `terraform apply`
|
|
# Note: this is the route users will have to take for adding additonal security groups to nodes since there isn't a separate "security group attachment" resource
|
|
# 2. For addtional IAM policies, users can attach the policies outside of the cluster definition as demonstrated below
|
|
resource "aws_iam_role_policy_attachment" "additional" {
|
|
for_each = module.eks.eks_managed_node_groups
|
|
|
|
policy_arn = aws_iam_policy.node_additional.arn
|
|
role = each.value.iam_role_name
|
|
}
|
|
|
|
################################################################################
|
|
# aws-auth configmap
|
|
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
|
# so we need to ensure fargate profiles and self-managed node roles are added
|
|
################################################################################
|
|
|
|
data "aws_eks_cluster_auth" "this" {
|
|
name = module.eks.cluster_id
|
|
}
|
|
|
|
locals {
|
|
kubeconfig = yamlencode({
|
|
apiVersion = "v1"
|
|
kind = "Config"
|
|
current-context = "terraform"
|
|
clusters = [{
|
|
name = module.eks.cluster_id
|
|
cluster = {
|
|
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
|
server = module.eks.cluster_endpoint
|
|
}
|
|
}]
|
|
contexts = [{
|
|
name = "terraform"
|
|
context = {
|
|
cluster = module.eks.cluster_id
|
|
user = "terraform"
|
|
}
|
|
}]
|
|
users = [{
|
|
name = "terraform"
|
|
user = {
|
|
token = data.aws_eks_cluster_auth.this.token
|
|
}
|
|
}]
|
|
})
|
|
}
|
|
|
|
resource "null_resource" "patch" {
|
|
triggers = {
|
|
kubeconfig = base64encode(local.kubeconfig)
|
|
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
|
|
}
|
|
|
|
provisioner "local-exec" {
|
|
interpreter = ["/bin/bash", "-c"]
|
|
environment = {
|
|
KUBECONFIG = self.triggers.kubeconfig
|
|
}
|
|
command = self.triggers.cmd_patch
|
|
}
|
|
}
|
|
|
|
################################################################################
|
|
# Supporting Resources
|
|
################################################################################
|
|
|
|
module "vpc" {
|
|
source = "terraform-aws-modules/vpc/aws"
|
|
version = "~> 3.0"
|
|
|
|
name = local.name
|
|
cidr = "10.0.0.0/16"
|
|
|
|
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
|
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
|
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
|
|
|
enable_ipv6 = true
|
|
assign_ipv6_address_on_creation = true
|
|
create_egress_only_igw = true
|
|
|
|
public_subnet_ipv6_prefixes = [0, 1, 2]
|
|
private_subnet_ipv6_prefixes = [3, 4, 5]
|
|
|
|
enable_nat_gateway = true
|
|
single_nat_gateway = true
|
|
enable_dns_hostnames = true
|
|
|
|
enable_flow_log = true
|
|
create_flow_log_cloudwatch_iam_role = true
|
|
create_flow_log_cloudwatch_log_group = true
|
|
|
|
public_subnet_tags = {
|
|
"kubernetes.io/cluster/${local.name}" = "shared"
|
|
"kubernetes.io/role/elb" = 1
|
|
}
|
|
|
|
private_subnet_tags = {
|
|
"kubernetes.io/cluster/${local.name}" = "shared"
|
|
"kubernetes.io/role/internal-elb" = 1
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_security_group" "additional" {
|
|
name_prefix = "${local.name}-additional"
|
|
vpc_id = module.vpc.vpc_id
|
|
|
|
ingress {
|
|
from_port = 22
|
|
to_port = 22
|
|
protocol = "tcp"
|
|
cidr_blocks = [
|
|
"10.0.0.0/8",
|
|
"172.16.0.0/12",
|
|
"192.168.0.0/16",
|
|
]
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_kms_key" "eks" {
|
|
description = "EKS Secret Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_kms_key" "ebs" {
|
|
description = "Customer managed key to encrypt EKS managed node group volumes"
|
|
deletion_window_in_days = 7
|
|
policy = data.aws_iam_policy_document.ebs.json
|
|
}
|
|
|
|
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
|
|
data "aws_iam_policy_document" "ebs" {
|
|
# Copy of default KMS policy that lets you manage it
|
|
statement {
|
|
sid = "Enable IAM User Permissions"
|
|
actions = ["kms:*"]
|
|
resources = ["*"]
|
|
|
|
principals {
|
|
type = "AWS"
|
|
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
|
|
}
|
|
}
|
|
|
|
# Required for EKS
|
|
statement {
|
|
sid = "Allow service-linked role use of the CMK"
|
|
actions = [
|
|
"kms:Encrypt",
|
|
"kms:Decrypt",
|
|
"kms:ReEncrypt*",
|
|
"kms:GenerateDataKey*",
|
|
"kms:DescribeKey"
|
|
]
|
|
resources = ["*"]
|
|
|
|
principals {
|
|
type = "AWS"
|
|
identifiers = [
|
|
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
|
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
|
]
|
|
}
|
|
}
|
|
|
|
statement {
|
|
sid = "Allow attachment of persistent resources"
|
|
actions = ["kms:CreateGrant"]
|
|
resources = ["*"]
|
|
|
|
principals {
|
|
type = "AWS"
|
|
identifiers = [
|
|
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
|
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
|
]
|
|
}
|
|
|
|
condition {
|
|
test = "Bool"
|
|
variable = "kms:GrantIsForAWSResource"
|
|
values = ["true"]
|
|
}
|
|
}
|
|
}
|
|
|
|
# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
|
|
# there are several more options one could set but you probably dont need to modify them
|
|
# you can take the default and add your custom AMI and/or custom tags
|
|
#
|
|
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
|
|
# then the default user-data for bootstrapping a cluster is merged in the copy.
|
|
|
|
resource "aws_launch_template" "external" {
|
|
name_prefix = "external-eks-ex-"
|
|
description = "EKS managed node group external launch template"
|
|
update_default_version = true
|
|
|
|
block_device_mappings {
|
|
device_name = "/dev/xvda"
|
|
|
|
ebs {
|
|
volume_size = 100
|
|
volume_type = "gp2"
|
|
delete_on_termination = true
|
|
}
|
|
}
|
|
|
|
monitoring {
|
|
enabled = true
|
|
}
|
|
|
|
network_interfaces {
|
|
associate_public_ip_address = false
|
|
delete_on_termination = true
|
|
}
|
|
|
|
# if you want to use a custom AMI
|
|
# image_id = var.ami_id
|
|
|
|
# If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
|
|
# you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
|
|
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
|
|
# user_data = base64encode(data.template_file.launch_template_userdata.rendered)
|
|
|
|
tag_specifications {
|
|
resource_type = "instance"
|
|
|
|
tags = {
|
|
Name = "external_lt"
|
|
CustomTag = "Instance custom tag"
|
|
}
|
|
}
|
|
|
|
tag_specifications {
|
|
resource_type = "volume"
|
|
|
|
tags = {
|
|
CustomTag = "Volume custom tag"
|
|
}
|
|
}
|
|
|
|
tag_specifications {
|
|
resource_type = "network-interface"
|
|
|
|
tags = {
|
|
CustomTag = "EKS example"
|
|
}
|
|
}
|
|
|
|
tags = {
|
|
CustomTag = "Launch template custom tag"
|
|
}
|
|
|
|
lifecycle {
|
|
create_before_destroy = true
|
|
}
|
|
}
|
|
|
|
resource "tls_private_key" "this" {
|
|
algorithm = "RSA"
|
|
}
|
|
|
|
resource "aws_key_pair" "this" {
|
|
key_name_prefix = local.name
|
|
public_key = tls_private_key.this.public_key_openssh
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_security_group" "remote_access" {
|
|
name_prefix = "${local.name}-remote-access"
|
|
description = "Allow remote SSH access"
|
|
vpc_id = module.vpc.vpc_id
|
|
|
|
ingress {
|
|
description = "SSH access"
|
|
from_port = 22
|
|
to_port = 22
|
|
protocol = "tcp"
|
|
cidr_blocks = ["10.0.0.0/8"]
|
|
}
|
|
|
|
egress {
|
|
from_port = 0
|
|
to_port = 0
|
|
protocol = "-1"
|
|
cidr_blocks = ["0.0.0.0/0"]
|
|
ipv6_cidr_blocks = ["::/0"]
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_iam_policy" "node_additional" {
|
|
name = "${local.name}-additional"
|
|
description = "Example usage of node additional policy"
|
|
|
|
policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [
|
|
{
|
|
Action = [
|
|
"ec2:Describe*",
|
|
]
|
|
Effect = "Allow"
|
|
Resource = "*"
|
|
},
|
|
]
|
|
})
|
|
|
|
tags = local.tags
|
|
}
|