mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-03-29 13:41:47 +02:00
feat: New Karpenter sub-module for easily enabling Karpenter on EKS (#2303)
This commit is contained in:
@@ -2,13 +2,50 @@ provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
data "aws_partition" "current" {}
|
||||
provider "kubernetes" {
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
# This requires the awscli to be installed locally where Terraform is executed
|
||||
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
|
||||
}
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
# This requires the awscli to be installed locally where Terraform is executed
|
||||
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider "kubectl" {
|
||||
apply_retry_count = 5
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
load_config_file = false
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
# This requires the awscli to be installed locally where Terraform is executed
|
||||
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_name]
|
||||
}
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.22"
|
||||
cluster_version = "1.24"
|
||||
region = "eu-west-1"
|
||||
partition = data.aws_partition.current.partition
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
@@ -32,29 +69,35 @@ module "eks" {
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
node_security_group_additional_rules = {
|
||||
# Control plane invoke Karpenter webhook
|
||||
ingress_karpenter_webhook_tcp = {
|
||||
description = "Control plane invoke Karpenter webhook"
|
||||
protocol = "tcp"
|
||||
from_port = 8443
|
||||
to_port = 8443
|
||||
type = "ingress"
|
||||
source_cluster_security_group = true
|
||||
# Fargate profiles use the cluster primary security group so these are not utilized
|
||||
create_cluster_security_group = false
|
||||
create_node_security_group = false
|
||||
|
||||
manage_aws_auth_configmap = true
|
||||
aws_auth_roles = [
|
||||
# We need to add in the Karpenter node IAM role for nodes launched by Karpenter
|
||||
{
|
||||
rolearn = module.karpenter.role_arn
|
||||
username = "system:node:{{EC2PrivateDNSName}}"
|
||||
groups = [
|
||||
"system:bootstrappers",
|
||||
"system:nodes",
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
fargate_profiles = {
|
||||
kube_system = {
|
||||
name = "kube-system"
|
||||
selectors = [
|
||||
{ namespace = "kube-system" }
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
eks_managed_node_groups = {
|
||||
karpenter = {
|
||||
instance_types = ["t3.medium"]
|
||||
|
||||
min_size = 1
|
||||
max_size = 2
|
||||
desired_size = 1
|
||||
|
||||
iam_role_additional_policies = [
|
||||
# Required by Karpenter
|
||||
"arn:${local.partition}:iam::aws:policy/AmazonSSMManagedInstanceCore"
|
||||
name = "karpenter"
|
||||
selectors = [
|
||||
{ namespace = "karpenter" }
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -71,60 +114,14 @@ module "eks" {
|
||||
# Karpenter
|
||||
################################################################################
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
# This requires the awscli to be installed locally where Terraform is executed
|
||||
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
|
||||
}
|
||||
}
|
||||
}
|
||||
module "karpenter" {
|
||||
source = "../../modules/karpenter"
|
||||
|
||||
provider "kubectl" {
|
||||
apply_retry_count = 5
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
load_config_file = false
|
||||
cluster_name = module.eks.cluster_name
|
||||
irsa_oidc_provider_arn = module.eks.oidc_provider_arn
|
||||
|
||||
exec {
|
||||
api_version = "client.authentication.k8s.io/v1beta1"
|
||||
command = "aws"
|
||||
# This requires the awscli to be installed locally where Terraform is executed
|
||||
args = ["eks", "get-token", "--cluster-name", module.eks.cluster_id]
|
||||
}
|
||||
}
|
||||
|
||||
module "karpenter_irsa" {
|
||||
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
|
||||
version = "~> 4.21.1"
|
||||
|
||||
role_name = "karpenter-controller-${local.name}"
|
||||
attach_karpenter_controller_policy = true
|
||||
|
||||
karpenter_controller_cluster_id = module.eks.cluster_id
|
||||
karpenter_controller_ssm_parameter_arns = [
|
||||
"arn:${local.partition}:ssm:*:*:parameter/aws/service/*"
|
||||
]
|
||||
karpenter_controller_node_iam_role_arns = [
|
||||
module.eks.eks_managed_node_groups["karpenter"].iam_role_arn
|
||||
]
|
||||
|
||||
oidc_providers = {
|
||||
ex = {
|
||||
provider_arn = module.eks.oidc_provider_arn
|
||||
namespace_service_accounts = ["karpenter:karpenter"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_instance_profile" "karpenter" {
|
||||
name = "KarpenterNodeInstanceProfile-${local.name}"
|
||||
role = module.eks.eks_managed_node_groups["karpenter"].iam_role_name
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "helm_release" "karpenter" {
|
||||
@@ -132,54 +129,73 @@ resource "helm_release" "karpenter" {
|
||||
create_namespace = true
|
||||
|
||||
name = "karpenter"
|
||||
repository = "https://charts.karpenter.sh"
|
||||
repository = "oci://public.ecr.aws/karpenter"
|
||||
chart = "karpenter"
|
||||
version = "0.8.2"
|
||||
version = "v0.19.1"
|
||||
|
||||
set {
|
||||
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.karpenter_irsa.iam_role_arn
|
||||
name = "settings.aws.clusterName"
|
||||
value = module.eks.cluster_name
|
||||
}
|
||||
|
||||
set {
|
||||
name = "clusterName"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
set {
|
||||
name = "clusterEndpoint"
|
||||
name = "settings.aws.clusterEndpoint"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
set {
|
||||
name = "aws.defaultInstanceProfile"
|
||||
value = aws_iam_instance_profile.karpenter.name
|
||||
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.karpenter.irsa_arn
|
||||
}
|
||||
|
||||
set {
|
||||
name = "settings.aws.defaultInstanceProfile"
|
||||
value = module.karpenter.instance_profile_name
|
||||
}
|
||||
|
||||
set {
|
||||
name = "settings.aws.interruptionQueueName"
|
||||
value = module.karpenter.queue_name
|
||||
}
|
||||
}
|
||||
|
||||
# Workaround - https://github.com/hashicorp/terraform-provider-kubernetes/issues/1380#issuecomment-967022975
|
||||
resource "kubectl_manifest" "karpenter_provisioner" {
|
||||
yaml_body = <<-YAML
|
||||
apiVersion: karpenter.sh/v1alpha5
|
||||
kind: Provisioner
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
requirements:
|
||||
- key: karpenter.sh/capacity-type
|
||||
operator: In
|
||||
values: ["spot"]
|
||||
limits:
|
||||
resources:
|
||||
cpu: 1000
|
||||
provider:
|
||||
apiVersion: karpenter.sh/v1alpha5
|
||||
kind: Provisioner
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
requirements:
|
||||
- key: karpenter.sh/capacity-type
|
||||
operator: In
|
||||
values: ["spot"]
|
||||
limits:
|
||||
resources:
|
||||
cpu: 1000
|
||||
providerRef:
|
||||
name: default
|
||||
ttlSecondsAfterEmpty: 30
|
||||
YAML
|
||||
|
||||
depends_on = [
|
||||
helm_release.karpenter
|
||||
]
|
||||
}
|
||||
|
||||
resource "kubectl_manifest" "karpenter_node_template" {
|
||||
yaml_body = <<-YAML
|
||||
apiVersion: karpenter.k8s.aws/v1alpha1
|
||||
kind: AWSNodeTemplate
|
||||
metadata:
|
||||
name: default
|
||||
spec:
|
||||
subnetSelector:
|
||||
karpenter.sh/discovery: ${local.name}
|
||||
karpenter.sh/discovery: ${module.eks.cluster_name}
|
||||
securityGroupSelector:
|
||||
karpenter.sh/discovery: ${local.name}
|
||||
karpenter.sh/discovery: ${module.eks.cluster_name}
|
||||
tags:
|
||||
karpenter.sh/discovery: ${local.name}
|
||||
ttlSecondsAfterEmpty: 30
|
||||
karpenter.sh/discovery: ${module.eks.cluster_name}
|
||||
YAML
|
||||
|
||||
depends_on = [
|
||||
@@ -191,27 +207,27 @@ resource "kubectl_manifest" "karpenter_provisioner" {
|
||||
# and starts with zero replicas
|
||||
resource "kubectl_manifest" "karpenter_example_deployment" {
|
||||
yaml_body = <<-YAML
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: inflate
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
app: inflate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: inflate
|
||||
spec:
|
||||
replicas: 0
|
||||
selector:
|
||||
matchLabels:
|
||||
app: inflate
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- name: inflate
|
||||
image: public.ecr.aws/eks-distro/kubernetes/pause:3.2
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: inflate
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
containers:
|
||||
- name: inflate
|
||||
image: public.ecr.aws/eks-distro/kubernetes/pause:3.7
|
||||
resources:
|
||||
requests:
|
||||
cpu: 1
|
||||
YAML
|
||||
|
||||
depends_on = [
|
||||
@@ -219,6 +235,129 @@ resource "kubectl_manifest" "karpenter_example_deployment" {
|
||||
]
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Modify EKS CoreDNS Deployment
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster_auth" "this" {
|
||||
name = module.eks.cluster_name
|
||||
}
|
||||
|
||||
locals {
|
||||
kubeconfig = yamlencode({
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
current-context = "terraform"
|
||||
clusters = [{
|
||||
name = module.eks.cluster_name
|
||||
cluster = {
|
||||
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
||||
server = module.eks.cluster_endpoint
|
||||
}
|
||||
}]
|
||||
contexts = [{
|
||||
name = "terraform"
|
||||
context = {
|
||||
cluster = module.eks.cluster_name
|
||||
user = "terraform"
|
||||
}
|
||||
}]
|
||||
users = [{
|
||||
name = "terraform"
|
||||
user = {
|
||||
token = data.aws_eks_cluster_auth.this.token
|
||||
}
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
# Separate resource so that this is only ever executed once
|
||||
resource "null_resource" "remove_default_coredns_deployment" {
|
||||
triggers = {}
|
||||
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = base64encode(local.kubeconfig)
|
||||
}
|
||||
|
||||
# We are removing the deployment provided by the EKS service and replacing it through the self-managed CoreDNS Helm addon
|
||||
# However, we are maintaing the existing kube-dns service and annotating it for Helm to assume control
|
||||
command = <<-EOT
|
||||
kubectl --namespace kube-system delete deployment coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "modify_kube_dns" {
|
||||
triggers = {}
|
||||
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = base64encode(local.kubeconfig)
|
||||
}
|
||||
|
||||
# We are maintaing the existing kube-dns service and annotating it for Helm to assume control
|
||||
command = <<-EOT
|
||||
echo "Setting implicit dependency on ${module.eks.fargate_profiles["kube_system"].fargate_profile_pod_execution_role_arn}"
|
||||
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-name=coredns --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
kubectl --namespace kube-system annotate --overwrite service kube-dns meta.helm.sh/release-namespace=kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
kubectl --namespace kube-system label --overwrite service kube-dns app.kubernetes.io/managed-by=Helm --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
EOT
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
null_resource.remove_default_coredns_deployment
|
||||
]
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CoreDNS Helm Chart (self-managed)
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_addon_version" "this" {
|
||||
for_each = toset(["coredns"])
|
||||
|
||||
addon_name = each.value
|
||||
kubernetes_version = module.eks.cluster_version
|
||||
most_recent = true
|
||||
}
|
||||
|
||||
resource "helm_release" "coredns" {
|
||||
name = "coredns"
|
||||
namespace = "kube-system"
|
||||
create_namespace = false
|
||||
description = "CoreDNS is a DNS server that chains plugins and provides Kubernetes DNS Services"
|
||||
chart = "coredns"
|
||||
version = "1.19.4"
|
||||
repository = "https://coredns.github.io/helm"
|
||||
|
||||
# For EKS image repositories https://docs.aws.amazon.com/eks/latest/userguide/add-ons-images.html
|
||||
values = [
|
||||
<<-EOT
|
||||
image:
|
||||
repository: 602401143452.dkr.ecr.eu-west-1.amazonaws.com/eks/coredns
|
||||
tag: ${data.aws_eks_addon_version.this["coredns"].version}
|
||||
deployment:
|
||||
name: coredns
|
||||
annotations:
|
||||
eks.amazonaws.com/compute-type: fargate
|
||||
service:
|
||||
name: kube-dns
|
||||
annotations:
|
||||
eks.amazonaws.com/compute-type: fargate
|
||||
podAnnotations:
|
||||
eks.amazonaws.com/compute-type: fargate
|
||||
EOT
|
||||
]
|
||||
|
||||
depends_on = [
|
||||
# Need to ensure the CoreDNS updates are peformed before provisioning
|
||||
null_resource.modify_kube_dns
|
||||
]
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
Reference in New Issue
Block a user