provider "aws" { region = local.region } locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.21" region = "eu-west-1" tags = { Example = local.name GithubRepo = "terraform-aws-eks" GithubOrg = "terraform-aws-modules" } } data "aws_caller_identity" "current" {} data "aws_eks_cluster_auth" "cluster" { name = module.eks.cluster_id } ################################################################################ # EKS Module ################################################################################ module "eks" { source = "../.." cluster_name = local.name cluster_version = local.cluster_version cluster_endpoint_private_access = true cluster_endpoint_public_access = true vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets enable_irsa = true # Self Managed Node Group(s) self_managed_node_groups = { refresh = { max_size = 5 desired_size = 1 instance_type = "m5.large" instance_refresh = { strategy = "Rolling" preferences = { checkpoint_delay = 600 checkpoint_percentages = [35, 70, 100] instance_warmup = 300 min_healthy_percentage = 50 } triggers = ["tag"] } propogate_tags = [{ key = "aws-node-termination-handler/managed" value = true propagate_at_launch = true }] } mixed_instance = { use_mixed_instances_policy = true mixed_instances_policy = { instances_distribution = { on_demand_base_capacity = 0 on_demand_percentage_above_base_capacity = 10 spot_allocation_strategy = "capacity-optimized" } override = [ { instance_type = "m5.large" weighted_capacity = "1" }, { instance_type = "m6i.large" weighted_capacity = "2" }, ] } propogate_tags = [{ key = "aws-node-termination-handler/managed" value = true propagate_at_launch = true }] } spot = { instance_type = "m5.large" instance_market_options = { market_type = "spot" } bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" propogate_tags = [{ key = "aws-node-termination-handler/managed" value = true propagate_at_launch = true }] } } tags = merge(local.tags, { Foo = "bar" }) } ################################################################################ # aws-auth configmap # Only EKS managed node groups automatically add roles to aws-auth configmap # so we need to ensure fargate profiles and self-managed node roles are added ################################################################################ data "aws_eks_cluster_auth" "this" { name = module.eks.cluster_id } locals { kubeconfig = yamlencode({ apiVersion = "v1" kind = "Config" current-context = "terraform" clusters = [{ name = module.eks.cluster_id cluster = { certificate-authority-data = module.eks.cluster_certificate_authority_data server = module.eks.cluster_endpoint } }] contexts = [{ name = "terraform" context = { cluster = module.eks.cluster_id user = "terraform" } }] users = [{ name = "terraform" user = { token = data.aws_eks_cluster_auth.this.token } }] }) } resource "null_resource" "apply" { triggers = { kubeconfig = base64encode(local.kubeconfig) cmd_patch = <<-EOT kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode) kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode) EOT } provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] environment = { KUBECONFIG = self.triggers.kubeconfig } command = self.triggers.cmd_patch } } ################################################################################ # Supporting Resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.0" name = local.name cidr = "10.0.0.0/16" azs = ["${local.region}a", "${local.region}b", "${local.region}c"] private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true enable_flow_log = true create_flow_log_cloudwatch_iam_role = true create_flow_log_cloudwatch_log_group = true public_subnet_tags = { "kubernetes.io/cluster/${local.name}" = "shared" "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/cluster/${local.name}" = "shared" "kubernetes.io/role/internal-elb" = 1 } tags = local.tags }