provider "aws" { region = local.region } locals { name = "ex-${replace(basename(path.cwd), "_", "-")}" cluster_version = "1.21" region = "eu-west-1" tags = { Example = local.name GithubRepo = "terraform-aws-eks" GithubOrg = "terraform-aws-modules" } } ################################################################################ # EKS Module ################################################################################ module "eks" { source = "../.." cluster_name = local.name cluster_version = local.cluster_version cluster_endpoint_private_access = true cluster_endpoint_public_access = true cluster_addons = { coredns = { resolve_conflicts = "OVERWRITE" } kube-proxy = {} vpc-cni = { resolve_conflicts = "OVERWRITE" } } cluster_encryption_config = [{ provider_key_arn = aws_kms_key.eks.arn resources = ["secrets"] }] vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets enable_irsa = true # Self Managed Node Group(s) self_managed_node_group_defaults = { vpc_security_group_ids = [aws_security_group.additional.id] iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"] } self_managed_node_groups = { spot = { instance_type = "m5.large" instance_market_options = { market_type = "spot" } pre_bootstrap_user_data = <<-EOT echo "foo" export FOO=bar EOT bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'" post_bootstrap_user_data = <<-EOT cd /tmp sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm sudo systemctl enable amazon-ssm-agent sudo systemctl start amazon-ssm-agent EOT } } # EKS Managed Node Group(s) eks_managed_node_group_defaults = { ami_type = "AL2_x86_64" disk_size = 50 instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"] vpc_security_group_ids = [aws_security_group.additional.id] } eks_managed_node_groups = { blue = {} green = { min_size = 1 max_size = 10 desired_size = 1 instance_types = ["t3.large"] capacity_type = "SPOT" labels = { Environment = "test" GithubRepo = "terraform-aws-eks" GithubOrg = "terraform-aws-modules" } taints = { dedicated = { key = "dedicated" value = "gpuGroup" effect = "NO_SCHEDULE" } } update_config = { max_unavailable_percentage = 50 # or set `max_unavailable` } tags = { ExtraTag = "example" } } } # Fargate Profile(s) fargate_profiles = { default = { name = "default" selectors = [ { namespace = "kube-system" labels = { k8s-app = "kube-dns" } }, { namespace = "default" } ] tags = { Owner = "test" } timeouts = { create = "20m" delete = "20m" } } } tags = local.tags } ################################################################################ # Sub-Module Usage on Existing/Separate Cluster ################################################################################ module "eks_managed_node_group" { source = "../../modules/eks-managed-node-group" name = "separate-eks-mng" cluster_name = module.eks.cluster_id cluster_version = local.cluster_version vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets tags = merge(local.tags, { Separate = "eks-managed-node-group" }) } module "self_managed_node_group" { source = "../../modules/self-managed-node-group" name = "separate-self-mng" cluster_name = module.eks.cluster_id cluster_version = local.cluster_version cluster_endpoint = module.eks.cluster_endpoint cluster_auth_base64 = module.eks.cluster_certificate_authority_data instance_type = "m5.large" vpc_id = module.vpc.vpc_id subnet_ids = module.vpc.private_subnets vpc_security_group_ids = [ module.eks.cluster_primary_security_group_id, module.eks.cluster_security_group_id, ] tags = merge(local.tags, { Separate = "self-managed-node-group" }) } module "fargate_profile" { source = "../../modules/fargate-profile" name = "separate-fargate-profile" cluster_name = module.eks.cluster_id subnet_ids = module.vpc.private_subnets selectors = [{ namespace = "kube-system" }] tags = merge(local.tags, { Separate = "fargate-profile" }) } ################################################################################ # Disabled creation ################################################################################ module "disabled_eks" { source = "../.." create = false } module "disabled_fargate_profile" { source = "../../modules/fargate-profile" create = false } module "disabled_eks_managed_node_group" { source = "../../modules/eks-managed-node-group" create = false } module "disabled_self_managed_node_group" { source = "../../modules/self-managed-node-group" create = false } ################################################################################ # aws-auth configmap # Only EKS managed node groups automatically add roles to aws-auth configmap # so we need to ensure fargate profiles and self-managed node roles are added ################################################################################ data "aws_eks_cluster_auth" "this" { name = module.eks.cluster_id } locals { kubeconfig = yamlencode({ apiVersion = "v1" kind = "Config" current-context = "terraform" clusters = [{ name = module.eks.cluster_id cluster = { certificate-authority-data = module.eks.cluster_certificate_authority_data server = module.eks.cluster_endpoint } }] contexts = [{ name = "terraform" context = { cluster = module.eks.cluster_id user = "terraform" } }] users = [{ name = "terraform" user = { token = data.aws_eks_cluster_auth.this.token } }] }) # we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules aws_auth_configmap_yaml = <<-EOT ${chomp(module.eks.aws_auth_configmap_yaml)} - rolearn: ${module.eks_managed_node_group.iam_role_arn} username: system:node:{{EC2PrivateDNSName}} groups: - system:bootstrappers - system:nodes - rolearn: ${module.self_managed_node_group.iam_role_arn} username: system:node:{{EC2PrivateDNSName}} groups: - system:bootstrappers - system:nodes - rolearn: ${module.fargate_profile.fargate_profile_arn} username: system:node:{{SessionName}} groups: - system:bootstrappers - system:nodes - system:node-proxier EOT } resource "null_resource" "patch" { triggers = { kubeconfig = base64encode(local.kubeconfig) cmd_patch = "kubectl patch configmap/aws-auth --patch \"${local.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)" } provisioner "local-exec" { interpreter = ["/bin/bash", "-c"] environment = { KUBECONFIG = self.triggers.kubeconfig } command = self.triggers.cmd_patch } } ################################################################################ # Supporting resources ################################################################################ module "vpc" { source = "terraform-aws-modules/vpc/aws" version = "~> 3.0" name = local.name cidr = "10.0.0.0/16" azs = ["${local.region}a", "${local.region}b", "${local.region}c"] private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] enable_nat_gateway = true single_nat_gateway = true enable_dns_hostnames = true enable_flow_log = true create_flow_log_cloudwatch_iam_role = true create_flow_log_cloudwatch_log_group = true public_subnet_tags = { "kubernetes.io/cluster/${local.name}" = "shared" "kubernetes.io/role/elb" = 1 } private_subnet_tags = { "kubernetes.io/cluster/${local.name}" = "shared" "kubernetes.io/role/internal-elb" = 1 } tags = local.tags } resource "aws_security_group" "additional" { name_prefix = "${local.name}-additional" vpc_id = module.vpc.vpc_id ingress { from_port = 22 to_port = 22 protocol = "tcp" cidr_blocks = [ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", ] } tags = local.tags } resource "aws_kms_key" "eks" { description = "EKS Secret Encryption Key" deletion_window_in_days = 7 enable_key_rotation = true tags = local.tags }