feat!: Removed support for launch configuration and replace count with for_each (#1680)

This commit is contained in:
Bryant Biggs
2022-01-05 06:01:31 -06:00
committed by GitHub
parent d569aa3554
commit ee9f0c646a
138 changed files with 9118 additions and 6991 deletions

View File

@@ -3,9 +3,15 @@ provider "aws" {
}
locals {
name = "complete-${random_string.suffix.result}"
cluster_version = "1.20"
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.21"
region = "eu-west-1"
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
@@ -15,87 +21,102 @@ locals {
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
vpc_id = module.vpc.vpc_id
subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
fargate_subnets = [module.vpc.private_subnets[2]]
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
# Worker groups (using Launch Configurations)
worker_groups = [
{
name = "worker-group-1"
instance_type = "t3.small"
additional_userdata = "echo foo bar"
asg_desired_capacity = 2
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
},
{
name = "worker-group-2"
instance_type = "t3.medium"
additional_userdata = "echo foo bar"
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
asg_desired_capacity = 1
},
]
# Worker groups (using Launch Templates)
worker_groups_launch_template = [
{
name = "spot-1"
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
spot_instance_pools = 4
asg_max_size = 5
asg_desired_capacity = 5
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
public_ip = true
},
]
# Managed Node Groups
node_groups_defaults = {
ami_type = "AL2_x86_64"
disk_size = 50
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
}
}
node_groups = {
example = {
desired_capacity = 1
max_capacity = 10
min_capacity = 1
cluster_encryption_config = [{
provider_key_arn = aws_kms_key.eks.arn
resources = ["secrets"]
}]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
enable_irsa = true
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
vpc_security_group_ids = [aws_security_group.additional.id]
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
}
self_managed_node_groups = {
spot = {
instance_type = "m5.large"
instance_market_options = {
market_type = "spot"
}
pre_bootstrap_user_data = <<-EOT
echo "foo"
export FOO=bar
EOT
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
post_bootstrap_user_data = <<-EOT
cd /tmp
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
sudo systemctl enable amazon-ssm-agent
sudo systemctl start amazon-ssm-agent
EOT
}
}
# EKS Managed Node Group(s)
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
disk_size = 50
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
vpc_security_group_ids = [aws_security_group.additional.id]
}
eks_managed_node_groups = {
blue = {}
green = {
min_size = 1
max_size = 10
desired_size = 1
instance_types = ["t3.large"]
capacity_type = "SPOT"
k8s_labels = {
labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
additional_tags = {
ExtraTag = "example"
}
taints = [
{
taints = {
dedicated = {
key = "dedicated"
value = "gpuGroup"
effect = "NO_SCHEDULE"
}
]
}
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
tags = {
ExtraTag = "example"
}
}
}
# Fargate
# Fargate Profile(s)
fargate_profiles = {
default = {
name = "default"
@@ -122,38 +143,59 @@ module "eks" {
}
}
# AWS Auth (kubernetes_config_map)
map_roles = [
{
rolearn = "arn:aws:iam::66666666666:role/role1"
username = "role1"
groups = ["system:masters"]
},
tags = local.tags
}
################################################################################
# Sub-Module Usage on Existing/Separate Cluster
################################################################################
module "eks_managed_node_group" {
source = "../../modules/eks-managed-node-group"
name = "separate-eks-mng"
cluster_name = module.eks.cluster_id
cluster_version = local.cluster_version
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
tags = merge(local.tags, { Separate = "eks-managed-node-group" })
}
module "self_managed_node_group" {
source = "../../modules/self-managed-node-group"
name = "separate-self-mng"
cluster_name = module.eks.cluster_id
cluster_version = local.cluster_version
cluster_endpoint = module.eks.cluster_endpoint
cluster_auth_base64 = module.eks.cluster_certificate_authority_data
instance_type = "m5.large"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
vpc_security_group_ids = [
module.eks.cluster_primary_security_group_id,
module.eks.cluster_security_group_id,
]
map_users = [
{
userarn = "arn:aws:iam::66666666666:user/user1"
username = "user1"
groups = ["system:masters"]
},
{
userarn = "arn:aws:iam::66666666666:user/user2"
username = "user2"
groups = ["system:masters"]
},
]
tags = merge(local.tags, { Separate = "self-managed-node-group" })
}
map_accounts = [
"777777777777",
"888888888888",
]
module "fargate_profile" {
source = "../../modules/fargate-profile"
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
name = "separate-fargate-profile"
cluster_name = module.eks.cluster_id
subnet_ids = module.vpc.private_subnets
selectors = [{
namespace = "kube-system"
}]
tags = merge(local.tags, { Separate = "fargate-profile" })
}
################################################################################
@@ -163,87 +205,98 @@ module "eks" {
module "disabled_eks" {
source = "../.."
create_eks = false
create = false
}
module "disabled_fargate" {
source = "../../modules/fargate"
module "disabled_fargate_profile" {
source = "../../modules/fargate-profile"
create_fargate_pod_execution_role = false
create = false
}
module "disabled_node_groups" {
source = "../../modules/node_groups"
module "disabled_eks_managed_node_group" {
source = "../../modules/eks-managed-node-group"
create_eks = false
create = false
}
module "disabled_self_managed_node_group" {
source = "../../modules/self-managed-node-group"
create = false
}
################################################################################
# Kubernetes provider configuration
# aws-auth configmap
# Only EKS managed node groups automatically add roles to aws-auth configmap
# so we need to ensure fargate profiles and self-managed node roles are added
################################################################################
data "aws_eks_cluster" "cluster" {
data "aws_eks_cluster_auth" "this" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
locals {
kubeconfig = yamlencode({
apiVersion = "v1"
kind = "Config"
current-context = "terraform"
clusters = [{
name = module.eks.cluster_id
cluster = {
certificate-authority-data = module.eks.cluster_certificate_authority_data
server = module.eks.cluster_endpoint
}
}]
contexts = [{
name = "terraform"
context = {
cluster = module.eks.cluster_id
user = "terraform"
}
}]
users = [{
name = "terraform"
user = {
token = data.aws_eks_cluster_auth.this.token
}
}]
})
# we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules
aws_auth_configmap_yaml = <<-EOT
${chomp(module.eks.aws_auth_configmap_yaml)}
- rolearn: ${module.eks_managed_node_group.iam_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
- rolearn: ${module.self_managed_node_group.iam_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
- rolearn: ${module.fargate_profile.fargate_profile_arn}
username: system:node:{{SessionName}}
groups:
- system:bootstrappers
- system:nodes
- system:node-proxier
EOT
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
}
################################################################################
# Additional security groups for workers
################################################################################
resource "aws_security_group" "worker_group_mgmt_one" {
name_prefix = "worker_group_mgmt_one"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
]
resource "null_resource" "patch" {
triggers = {
kubeconfig = base64encode(local.kubeconfig)
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${local.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
}
}
resource "aws_security_group" "worker_group_mgmt_two" {
name_prefix = "worker_group_mgmt_two"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"192.168.0.0/16",
]
}
}
resource "aws_security_group" "all_worker_mgmt" {
name_prefix = "all_worker_management"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
provisioner "local-exec" {
interpreter = ["/bin/bash", "-c"]
environment = {
KUBECONFIG = self.triggers.kubeconfig
}
command = self.triggers.cmd_patch
}
}
@@ -251,40 +304,60 @@ resource "aws_security_group" "all_worker_mgmt" {
# Supporting resources
################################################################################
data "aws_availability_zones" "available" {
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
name = local.name
cidr = "10.0.0.0/16"
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_flow_log = true
create_flow_log_cloudwatch_iam_role = true
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = "1"
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
"kubernetes.io/role/internal-elb" = 1
}
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
tags = local.tags
}
resource "aws_security_group" "additional" {
name_prefix = "${local.name}-additional"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
tags = local.tags
}
resource "aws_kms_key" "eks" {
description = "EKS Secret Encryption Key"
deletion_window_in_days = 7
enable_key_rotation = true
tags = local.tags
}