Files
terraform-aws-eks/examples/karpenter/main.tf

407 lines
11 KiB
HCL

provider "aws" {
region = local.region
}
provider "aws" {
region = "us-east-1"
alias = "virginia"
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
}
}
provider "helm" {
kubernetes {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
}
}
}
provider "kubectl" {
apply_retry_count = 5
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
load_config_file = false
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# This requires the awscli to be installed locally where Terraform is executed
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
}
}
data "aws_availability_zones" "available" {}
data "aws_ecrpublic_authorization_token" "token" {
provider = aws.virginia
}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.24"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
# EKS Module
################################################################################
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
manage_aws_auth_configmap = true
aws_auth_roles = [
# We need to add in the Karpenter node IAM role for nodes launched by Karpenter
{
rolearn = module.karpenter.role_arn
username = "system:node:{{EC2PrivateDNSName}}"
groups = [
"system:bootstrappers",
"system:nodes",
]
},
]
fargate_profiles = {
kube_system = {
name = "kube-system"
selectors = [
{ namespace = "kube-system" }
]
}
karpenter = {
name = "karpenter"
selectors = [
{ namespace = "karpenter" }
]
}
}
tags = merge(local.tags, {
# NOTE - if creating multiple security groups with this module, only tag the
# security group that Karpenter should utilize with the following tag
# (i.e. - at most, only one security group should have this tag in your account)
"karpenter.sh/discovery" = local.name
})
}
################################################################################
# Karpenter
################################################################################
module "karpenter" {
source = "../../modules/karpenter"
cluster_name = module.eks.cluster_name
irsa_oidc_provider_arn = module.eks.oidc_provider_arn
tags = local.tags
}
resource "helm_release" "karpenter" {
namespace = "karpenter"
create_namespace = true
name = "karpenter"
repository = "oci://public.ecr.aws/karpenter"
repository_username = data.aws_ecrpublic_authorization_token.token.user_name
repository_password = data.aws_ecrpublic_authorization_token.token.password
chart = "karpenter"
version = "v0.19.3"
set {
name = "settings.aws.clusterName"
value = module.eks.cluster_name
}
set {
name = "settings.aws.clusterEndpoint"
value = module.eks.cluster_endpoint
}
set {
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
value = module.karpenter.irsa_arn
}
set {
name = "settings.aws.defaultInstanceProfile"
value = module.karpenter.instance_profile_name
}
set {
name = "settings.aws.interruptionQueueName"
value = module.karpenter.queue_name
}
}
resource "kubectl_manifest" "karpenter_provisioner" {
yaml_body = <<-YAML
apiVersion: karpenter.sh/v1alpha5
kind: Provisioner
metadata:
name: default
spec:
requirements:
- key: karpenter.sh/capacity-type
operator: In
values: ["spot"]
limits:
resources:
cpu: 1000
providerRef:
name: default
ttlSecondsAfterEmpty: 30
YAML
depends_on = [
helm_release.karpenter
]
}
resource "kubectl_manifest" "karpenter_node_template" {
yaml_body = <<-YAML
apiVersion: karpenter.k8s.aws/v1alpha1
kind: AWSNodeTemplate
metadata:
name: default
spec:
subnetSelector:
karpenter.sh/discovery: ${module.eks.cluster_name}
securityGroupSelector:
karpenter.sh/discovery: ${module.eks.cluster_name}
tags:
karpenter.sh/discovery: ${module.eks.cluster_name}
YAML
depends_on = [
helm_release.karpenter
]
}
# Example deployment using the [pause image](https://www.ianlewis.org/en/almighty-pause-container)
# and starts with zero replicas
resource "kubectl_manifest" "karpenter_example_deployment" {
yaml_body = <<-YAML
apiVersion: apps/v1
kind: Deployment
metadata:
name: inflate
spec:
replicas: 0
selector:
matchLabels:
app: inflate
template:
metadata:
labels:
app: inflate
spec:
terminationGracePeriodSeconds: 0
containers:
- name: inflate
image: public.ecr.aws/eks-distro/kubernetes/pause:3.7
resources:
requests:
cpu: 1
YAML
depends_on = [
helm_release.karpenter
]
}
################################################################################
# Modify EKS CoreDNS Deployment
################################################################################
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_name
}
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_name
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_name
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
}
# Separate resource so that this is only ever executed once
resource "null_resource" "remove_default_coredns_deployment" {
triggers = {}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}
# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
}
resource "null_resource" "modify_kube_dns" {
triggers = {}
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = base64encode(local.kubeconfig)
}
# We are maintaing the existing kube-dns service and annotating it for Helm to assume control
command = <<-EOT
echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}"
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
EOT
}
depends_on = [
null_resource.remove_default_coredns_deployment
]
}
################################################################################
# CoreDNS Helm Chart (self-managed)
################################################################################
data "aws_eks_addon_version" "this" {
for_each = toset(["coredns"])
addon_name = each.value
kubernetes_version = module.eks.cluster_version
most_recent = true
}
resource "helm_release" "coredns" {
name = "coredns"
namespace = "kube-system"
create_namespace = false
description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
chart = "coredns"
version = "1.19.4"
repository = "https://coredns.github.io/helm"
# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
values = [
<<-EOT
image:
repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns
tag: ${data.aws_eks_addon_version.this["coredns"].version}
deployment:
name: coredns
annotations:
eks.amazonaws.com/compute-type: fargate
service:
name: kube-dns
annotations:
eks.amazonaws.com/compute-type: fargate
podAnnotations:
eks.amazonaws.com/compute-type: fargate
EOT
]
depends_on = [
# Need to ensure the CoreDNS updates are peformed before provisioning
null_resource.modify_kube_dns
]
}
################################################################################
# Supporting Resources
################################################################################
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = local.name
cidr = local.vpc_cidr
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_flow_log = true
create_flow_log_cloudwatch_iam_role = true
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/role/internal-elb" = 1
# Tags subnets for Karpenter auto-discovery
"karpenter.sh/discovery" = local.name
}
tags = local.tags
}