feat: Add Launch Template support for Managed Node Groups (#997)

NOTES: Managed Node Groups now support Launch Templates. The Launch Template it self is not managed by this module, so you have to create it by your self and pass it's id to this module. See docs and [`examples/launch_templates_with_managed_node_groups/`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/launch_templates_with_managed_node_group) for more details.
This commit is contained in:
philicious
2020-11-02 08:19:10 +01:00
committed by GitHub
parent 62a8f46ba7
commit 127a3a8831
12 changed files with 314 additions and 11 deletions

View File

@@ -0,0 +1,77 @@
// if you have used ASGs before, that role got auto-created already and you need to import to TF state
resource "aws_iam_service_linked_role" "autoscaling" {
aws_service_name = "autoscaling.amazonaws.com"
description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
}
data "aws_caller_identity" "current" {}
// This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
data "aws_iam_policy_document" "ebs_decryption" {
// copy of default KMS policy that lets you manage it
statement {
sid = "Enable IAM User Permissions"
effect = "Allow"
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
actions = [
"kms:*"
]
resources = ["*"]
}
// required for EKS
statement {
sid = "Allow service-linked role use of the CMK"
effect = "Allow"
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", // required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, // required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
]
resources = ["*"]
}
statement {
sid = "Allow attachment of persistent resources"
effect = "Allow"
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", // required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, // required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
actions = [
"kms:CreateGrant"
]
resources = ["*"]
condition {
test = "Bool"
variable = "kms:GrantIsForAWSResource"
values = ["true"]
}
}
}

View File

@@ -0,0 +1,89 @@
data "template_file" "launch_template_userdata" {
template = file("${path.module}/templates/userdata.sh.tpl")
vars = {
cluster_name = local.cluster_name
endpoint = module.eks.cluster_endpoint
cluster_auth_base64 = module.eks.cluster_certificate_authority_data
bootstrap_extra_args = ""
kubelet_extra_args = ""
}
}
// this is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
// there are several more options one could set but you probably dont need to modify them
// you can take the default and add your custom AMI and/or custom tags
//
// Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
// then the default user-data for bootstrapping a cluster is merged in the copy.
resource "aws_launch_template" "default" {
name_prefix = "eks-example-"
description = "Default Launch-Template"
update_default_version = true
block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = 100
volume_type = "gp2"
delete_on_termination = true
//encrypted = true
// enable this if you want to encrypt your node root volumes with a KMS/CMK. encryption of PVCs is handled via k8s StorageClass tho
// you also need to attach data.aws_iam_policy_document.ebs_decryption.json from the disk_encryption_policy.tf to the KMS/CMK key then !!
//kms_key_id = var.kms_key_arn
}
}
instance_type = var.instance_type
monitoring {
enabled = true
}
network_interfaces {
associate_public_ip_address = false
delete_on_termination = true
security_groups = [module.eks.worker_security_group_id]
}
//image_id = var.ami_id // if you want to use a custom AMI
// if you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
// you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
//
// (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
// user_data = base64encode(
// data.template_file.launch_template_userdata.rendered,
// )
// supplying custom tags to EKS instances is another use-case for LaunchTemplates
tag_specifications {
resource_type = "instance"
tags = {
CustomTag = "EKS example"
}
}
// supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC tho)
tag_specifications {
resource_type = "volume"
tags = {
CustomTag = "EKS example"
}
}
// tag the LT itself
tags = {
CustomTag = "EKS example"
}
lifecycle {
create_before_destroy = true
}
}

View File

@@ -0,0 +1,93 @@
terraform {
required_version = ">= 0.12.9"
}
provider "aws" {
version = ">= 3.3.0"
region = var.region
}
provider "random" {
version = "~> 2.1"
}
provider "local" {
version = "~>1.4"
}
provider "null" {
version = "~> 2.1"
}
provider "template" {
version = "~> 2.1"
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data)
token = data.aws_eks_cluster_auth.cluster.token
load_config_file = false
version = "~> 1.11"
}
data "aws_availability_zones" "available" {
}
locals {
cluster_name = "test-eks-lt-${random_string.suffix.result}"
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "2.47.0"
name = "test-vpc"
cidr = "172.16.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared" // EKS adds this and TF would want to remove then later
}
}
module "eks" {
source = "../.."
cluster_name = local.cluster_name
cluster_version = "1.17"
subnets = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
node_groups = {
example = {
desired_capacity = 1
max_capacity = 15
min_capacity = 1
launch_template_id = aws_launch_template.default.id
launch_template_version = aws_launch_template.default.default_version
additional_tags = {
CustomTag = "EKS example"
}
}
}
}

View File

@@ -0,0 +1,12 @@
MIME-Version: 1.0
Content-Type: multipart/mixed; boundary="//"
--//
Content-Type: text/x-shellscript; charset="us-ascii"
#!/bin/bash
set -xe
# Bootstrap and join the cluster
/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
--//--

View File

@@ -0,0 +1,14 @@
variable "region" {
default = "eu-central-1"
}
variable "instance_type" {
default = "t3.small" // smallest recommended, where ~1.1Gb of 2Gb memory is available for the Kubernetes pods after warming up Docker, Kubelet, and OS
type = string
}
variable "kms_key_arn" {
default = ""
description = "KMS key ARN to use if you want to encrypt EKS node root volumes"
type = string
}