mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-01-15 08:14:12 +01:00
393 lines
11 KiB
HCL
393 lines
11 KiB
HCL
provider "aws" {
|
|
region = local.region
|
|
}
|
|
|
|
locals {
|
|
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
|
cluster_version = "1.21"
|
|
region = "eu-west-1"
|
|
|
|
tags = {
|
|
Example = local.name
|
|
GithubRepo = "terraform-aws-eks"
|
|
GithubOrg = "terraform-aws-modules"
|
|
}
|
|
}
|
|
|
|
data "aws_caller_identity" "current" {}
|
|
|
|
################################################################################
|
|
# EKS Module
|
|
################################################################################
|
|
|
|
module "eks" {
|
|
source = "../.."
|
|
|
|
cluster_name = local.name
|
|
cluster_version = local.cluster_version
|
|
cluster_endpoint_private_access = true
|
|
cluster_endpoint_public_access = true
|
|
|
|
cluster_addons = {
|
|
coredns = {
|
|
resolve_conflicts = "OVERWRITE"
|
|
}
|
|
kube-proxy = {}
|
|
vpc-cni = {
|
|
resolve_conflicts = "OVERWRITE"
|
|
}
|
|
}
|
|
|
|
cluster_encryption_config = [{
|
|
provider_key_arn = aws_kms_key.eks.arn
|
|
resources = ["secrets"]
|
|
}]
|
|
|
|
vpc_id = module.vpc.vpc_id
|
|
subnet_ids = module.vpc.private_subnets
|
|
|
|
enable_irsa = true
|
|
|
|
self_managed_node_group_defaults = {
|
|
disk_size = 50
|
|
}
|
|
|
|
self_managed_node_groups = {
|
|
# Default node group - as provisioned by the module defaults
|
|
default_node_group = {}
|
|
|
|
# Bottlerocket node group
|
|
bottlerocket = {
|
|
name = "bottlerocket-self-mng"
|
|
|
|
platform = "bottlerocket"
|
|
ami_id = data.aws_ami.bottlerocket_ami.id
|
|
instance_type = "m5.large"
|
|
desired_size = 2
|
|
key_name = aws_key_pair.this.key_name
|
|
|
|
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
|
|
|
|
bootstrap_extra_args = <<-EOT
|
|
# The admin host container provides SSH access and runs with "superpowers".
|
|
# It is disabled by default, but can be disabled explicitly.
|
|
[settings.host-containers.admin]
|
|
enabled = false
|
|
|
|
# The control host container provides out-of-band access via SSM.
|
|
# It is enabled by default, and can be disabled if you do not expect to use SSM.
|
|
# This could leave you with no way to access the API and change settings on an existing node!
|
|
[settings.host-containers.control]
|
|
enabled = true
|
|
|
|
[settings.kubernetes.node-labels]
|
|
ingress = "allowed"
|
|
EOT
|
|
}
|
|
|
|
# Complete
|
|
complete = {
|
|
name = "complete-self-mng"
|
|
use_name_prefix = false
|
|
|
|
subnet_ids = module.vpc.public_subnets
|
|
|
|
min_size = 1
|
|
max_size = 7
|
|
desired_size = 1
|
|
|
|
ami_id = "ami-0caf35bc73450c396"
|
|
bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
|
|
|
|
pre_bootstrap_user_data = <<-EOT
|
|
export CONTAINER_RUNTIME="containerd"
|
|
export USE_MAX_PODS=false
|
|
EOT
|
|
|
|
post_bootstrap_user_data = <<-EOT
|
|
echo "you are free little kubelet!"
|
|
EOT
|
|
|
|
disk_size = 256
|
|
instance_type = "m6i.large"
|
|
|
|
launch_template_name = "self-managed-ex"
|
|
launch_template_use_name_prefix = true
|
|
launch_template_description = "Self managed node group example launch template"
|
|
|
|
ebs_optimized = true
|
|
vpc_security_group_ids = [aws_security_group.additional.id]
|
|
enable_monitoring = true
|
|
|
|
block_device_mappings = {
|
|
xvda = {
|
|
device_name = "/dev/xvda"
|
|
ebs = {
|
|
volume_size = 75
|
|
volume_type = "gp3"
|
|
iops = 3000
|
|
throughput = 150
|
|
encrypted = true
|
|
kms_key_id = aws_kms_key.ebs.arn
|
|
delete_on_termination = true
|
|
}
|
|
}
|
|
}
|
|
|
|
metadata_options = {
|
|
http_endpoint = "enabled"
|
|
http_tokens = "required"
|
|
http_put_response_hop_limit = 2
|
|
}
|
|
|
|
create_iam_role = true
|
|
iam_role_name = "self-managed-node-group-complete-example"
|
|
iam_role_use_name_prefix = false
|
|
iam_role_description = "Self managed node group complete example role"
|
|
iam_role_tags = {
|
|
Purpose = "Protector of the kubelet"
|
|
}
|
|
iam_role_additional_policies = [
|
|
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
|
]
|
|
|
|
create_security_group = true
|
|
security_group_name = "self-managed-node-group-complete-example"
|
|
security_group_use_name_prefix = false
|
|
security_group_description = "Self managed node group complete example security group"
|
|
security_group_rules = {
|
|
phoneOut = {
|
|
description = "Hello CloudFlare"
|
|
protocol = "udp"
|
|
from_port = 53
|
|
to_port = 53
|
|
type = "egress"
|
|
cidr_blocks = ["1.1.1.1/32"]
|
|
}
|
|
phoneHome = {
|
|
description = "Hello cluster"
|
|
protocol = "udp"
|
|
from_port = 53
|
|
to_port = 53
|
|
type = "egress"
|
|
source_cluster_security_group = true # bit of reflection lookup
|
|
}
|
|
}
|
|
security_group_tags = {
|
|
Purpose = "Protector of the kubelet"
|
|
}
|
|
|
|
timeouts = {
|
|
create = "80m"
|
|
update = "80m"
|
|
delete = "80m"
|
|
}
|
|
|
|
tags = {
|
|
ExtraTag = "Self managed node group complete example"
|
|
}
|
|
}
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
################################################################################
|
|
# aws-auth configmap
|
|
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
|
# so we need to ensure fargate profiles and self-managed node roles are added
|
|
################################################################################
|
|
|
|
data "aws_eks_cluster_auth" "this" {
|
|
name = module.eks.cluster_id
|
|
}
|
|
|
|
locals {
|
|
kubeconfig = yamlencode({
|
|
apiVersion = "v1"
|
|
kind = "Config"
|
|
current-context = "terraform"
|
|
clusters = [{
|
|
name = module.eks.cluster_id
|
|
cluster = {
|
|
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
|
server = module.eks.cluster_endpoint
|
|
}
|
|
}]
|
|
contexts = [{
|
|
name = "terraform"
|
|
context = {
|
|
cluster = module.eks.cluster_id
|
|
user = "terraform"
|
|
}
|
|
}]
|
|
users = [{
|
|
name = "terraform"
|
|
user = {
|
|
token = data.aws_eks_cluster_auth.this.token
|
|
}
|
|
}]
|
|
})
|
|
}
|
|
|
|
resource "null_resource" "apply" {
|
|
triggers = {
|
|
kubeconfig = base64encode(local.kubeconfig)
|
|
cmd_patch = <<-EOT
|
|
kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
|
kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
|
EOT
|
|
}
|
|
|
|
provisioner "local-exec" {
|
|
interpreter = ["/bin/bash", "-c"]
|
|
environment = {
|
|
KUBECONFIG = self.triggers.kubeconfig
|
|
}
|
|
command = self.triggers.cmd_patch
|
|
}
|
|
}
|
|
|
|
################################################################################
|
|
# Supporting Resources
|
|
################################################################################
|
|
|
|
module "vpc" {
|
|
source = "terraform-aws-modules/vpc/aws"
|
|
version = "~> 3.0"
|
|
|
|
name = local.name
|
|
cidr = "10.0.0.0/16"
|
|
|
|
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
|
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
|
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
|
|
|
enable_nat_gateway = true
|
|
single_nat_gateway = true
|
|
enable_dns_hostnames = true
|
|
|
|
enable_flow_log = true
|
|
create_flow_log_cloudwatch_iam_role = true
|
|
create_flow_log_cloudwatch_log_group = true
|
|
|
|
public_subnet_tags = {
|
|
"kubernetes.io/cluster/${local.name}" = "shared"
|
|
"kubernetes.io/role/elb" = 1
|
|
}
|
|
|
|
private_subnet_tags = {
|
|
"kubernetes.io/cluster/${local.name}" = "shared"
|
|
"kubernetes.io/role/internal-elb" = 1
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_security_group" "additional" {
|
|
name_prefix = "${local.name}-additional"
|
|
vpc_id = module.vpc.vpc_id
|
|
|
|
ingress {
|
|
from_port = 22
|
|
to_port = 22
|
|
protocol = "tcp"
|
|
cidr_blocks = [
|
|
"10.0.0.0/8",
|
|
"172.16.0.0/12",
|
|
"192.168.0.0/16",
|
|
]
|
|
}
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
resource "aws_kms_key" "eks" {
|
|
description = "EKS Secret Encryption Key"
|
|
deletion_window_in_days = 7
|
|
enable_key_rotation = true
|
|
|
|
tags = local.tags
|
|
}
|
|
|
|
data "aws_ami" "bottlerocket_ami" {
|
|
most_recent = true
|
|
owners = ["amazon"]
|
|
|
|
filter {
|
|
name = "name"
|
|
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
|
|
}
|
|
}
|
|
|
|
resource "tls_private_key" "this" {
|
|
algorithm = "RSA"
|
|
}
|
|
|
|
resource "aws_key_pair" "this" {
|
|
key_name = local.name
|
|
public_key = tls_private_key.this.public_key_openssh
|
|
}
|
|
|
|
resource "aws_kms_key" "ebs" {
|
|
description = "Customer managed key to encrypt self managed node group volumes"
|
|
deletion_window_in_days = 7
|
|
policy = data.aws_iam_policy_document.ebs.json
|
|
}
|
|
|
|
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
|
|
data "aws_iam_policy_document" "ebs" {
|
|
# Copy of default KMS policy that lets you manage it
|
|
statement {
|
|
sid = "Enable IAM User Permissions"
|
|
actions = ["kms:*"]
|
|
resources = ["*"]
|
|
|
|
principals {
|
|
type = "AWS"
|
|
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
|
|
}
|
|
}
|
|
|
|
# Required for EKS
|
|
statement {
|
|
sid = "Allow service-linked role use of the CMK"
|
|
actions = [
|
|
"kms:Encrypt",
|
|
"kms:Decrypt",
|
|
"kms:ReEncrypt*",
|
|
"kms:GenerateDataKey*",
|
|
"kms:DescribeKey"
|
|
]
|
|
resources = ["*"]
|
|
|
|
principals {
|
|
type = "AWS"
|
|
identifiers = [
|
|
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
|
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
|
]
|
|
}
|
|
}
|
|
|
|
statement {
|
|
sid = "Allow attachment of persistent resources"
|
|
actions = ["kms:CreateGrant"]
|
|
resources = ["*"]
|
|
|
|
principals {
|
|
type = "AWS"
|
|
identifiers = [
|
|
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
|
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
|
]
|
|
}
|
|
|
|
condition {
|
|
test = "Bool"
|
|
variable = "kms:GrantIsForAWSResource"
|
|
values = ["true"]
|
|
}
|
|
}
|
|
}
|