feat!: Add support for Outposts, remove node security group, add support for addon preserve and most_recent configurations (#2250)

Co-authored-by: Anton Babenko <anton@antonbabenko.com>
Resolves undefined
This commit is contained in:
Bryant Biggs
2022-12-05 16:26:23 -05:00
committed by GitHub
parent efbe952632
commit b2e97ca3dc
66 changed files with 2749 additions and 1776 deletions

View File

@@ -33,15 +33,15 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 4.45 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 4.45 |
## Modules
@@ -61,7 +61,9 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
## Inputs
@@ -81,9 +83,9 @@ No inputs.
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |

View File

@@ -14,10 +14,15 @@ provider "kubernetes" {
}
}
data "aws_availability_zones" "available" {}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -32,46 +37,56 @@ locals {
module "eks" {
source = "../.."
cluster_name = local.name
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_name = local.name
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
preserve = true
most_recent = true
timeouts = {
create = "25m"
delete = "10m"
}
}
kube-proxy = {
most_recent = true
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
most_recent = true
}
}
# Encryption key
create_kms_key = true
cluster_encryption_config = [{
cluster_encryption_config = {
resources = ["secrets"]
}]
}
kms_key_deletion_window_in_days = 7
enable_kms_key_rotation = true
iam_role_additional_policies = {
additional = aws_iam_policy.additional.arn
}
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
description = "To node 1025-65535"
ingress_nodes_ephemeral_ports_tcp = {
description = "Nodes on ephemeral ports"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "egress"
type = "ingress"
source_node_security_group = true
}
}
# Extend node-to-node security group rules
node_security_group_ntp_ipv4_cidr_block = ["169.254.169.123/32"]
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
@@ -81,21 +96,21 @@ module "eks" {
type = "ingress"
self = true
}
egress_all = {
description = "Node all egress"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
# Self Managed Node Group(s)
self_managed_node_group_defaults = {
vpc_security_group_ids = [aws_security_group.additional.id]
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
vpc_security_group_ids = [aws_security_group.additional.id]
iam_role_additional_policies = {
additional = aws_iam_policy.additional.arn
}
instance_refresh = {
strategy = "Rolling"
preferences = {
min_healthy_percentage = 66
}
}
}
self_managed_node_groups = {
@@ -106,17 +121,17 @@ module "eks" {
}
pre_bootstrap_user_data = <<-EOT
echo "foo"
export FOO=bar
echo "foo"
export FOO=bar
EOT
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
post_bootstrap_user_data = <<-EOT
cd /tmp
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
sudo systemctl enable amazon-ssm-agent
sudo systemctl start amazon-ssm-agent
cd /tmp
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
sudo systemctl enable amazon-ssm-agent
sudo systemctl start amazon-ssm-agent
EOT
}
}
@@ -128,6 +143,9 @@ module "eks" {
attach_cluster_primary_security_group = true
vpc_security_group_ids = [aws_security_group.additional.id]
iam_role_additional_policies = {
additional = aws_iam_policy.additional.arn
}
}
eks_managed_node_groups = {
@@ -154,7 +172,7 @@ module "eks" {
}
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
max_unavailable_percentage = 33 # or set `max_unavailable`
}
tags = {
@@ -270,7 +288,6 @@ module "eks_managed_node_group" {
cluster_name = module.eks.cluster_name
cluster_version = module.eks.cluster_version
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
cluster_primary_security_group_id = module.eks.cluster_primary_security_group_id
vpc_security_group_ids = [
@@ -305,7 +322,6 @@ module "self_managed_node_group" {
instance_type = "m5.large"
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
vpc_security_group_ids = [
module.eks.cluster_primary_security_group_id,
@@ -366,12 +382,12 @@ module "vpc" {
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
cidr = local.vpc_cidr
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
intra_subnets = ["10.0.7.0/28", "10.0.7.16/28", "10.0.7.32/28"]
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
@@ -382,13 +398,11 @@ module "vpc" {
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = 1
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
@@ -409,5 +423,22 @@ resource "aws_security_group" "additional" {
]
}
tags = local.tags
tags = merge(local.tags, { Name = "${local.name}-additional" })
}
resource "aws_iam_policy" "additional" {
name = "${local.name}-additional"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"ec2:Describe*",
]
Effect = "Allow"
Resource = "*"
},
]
})
}

View File

@@ -17,14 +17,14 @@ output "cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "cluster_name" {
description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_name
output "cluster_id" {
description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
value = module.eks.cluster_id
}
output "cluster_id" {
description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_id
output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}
output "cluster_oidc_issuer_url" {

View File

@@ -1,10 +1,10 @@
terraform {
required_version = ">= 0.13.1"
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
version = ">= 4.45"
}
kubernetes = {
source = "hashicorp/kubernetes"

View File

@@ -57,45 +57,37 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 4.45 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 3.0 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 3.0 |
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 4.45 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_ebs_kms_key"></a> [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.1 |
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
| <a name="module_key_pair"></a> [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 |
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
| <a name="module_vpc_cni_irsa"></a> [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 4.12 |
| <a name="module_vpc_cni_irsa"></a> [vpc\_cni\_irsa](#module\_vpc\_cni\_irsa) | terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks | ~> 5.0 |
## Resources
| Name | Type |
|------|------|
| [aws_autoscaling_group_tag.cluster_autoscaler_label_tags](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_group_tag) | resource |
| [aws_iam_policy.node_additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_iam_role_policy_attachment.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [aws_security_group.remote_access](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_arm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
@@ -115,9 +107,9 @@ No inputs.
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |

View File

@@ -14,11 +14,17 @@ provider "kubernetes" {
}
}
data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.24"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -26,8 +32,6 @@ locals {
}
}
data "aws_caller_identity" "current" {}
################################################################################
# EKS Module
################################################################################
@@ -35,10 +39,9 @@ data "aws_caller_identity" "current" {}
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
# IPV6
cluster_ip_family = "ipv6"
@@ -53,66 +56,23 @@ module "eks" {
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
most_recent = true
}
kube-proxy = {
most_recent = true
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
most_recent = true
service_account_role_arn = module.vpc_cni_irsa.iam_role_arn
}
}
cluster_encryption_config = [{
provider_key_arn = aws_kms_key.eks.arn
resources = ["secrets"]
}]
cluster_tags = {
# This should not affect the name of the cluster primary security group
# Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2006
# Ref: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2008
Name = local.name
}
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
manage_aws_auth_configmap = true
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
description = "To node 1025-65535"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "egress"
source_node_security_group = true
}
}
# Extend node-to-node security group rules
node_security_group_ntp_ipv6_cidr_block = ["fd00:ec2::123/128"]
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
egress_all = {
description = "Node all egress"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
eks_managed_node_group_defaults = {
ami_type = "AL2_x86_64"
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
@@ -130,14 +90,13 @@ module "eks" {
default_node_group = {
# By default, the module creates a launch template to ensure tags are propagated to instances, etc.,
# so we need to disable it to use the default template provided by the AWS EKS managed node group service
create_launch_template = false
launch_template_name = ""
use_custom_launch_template = false
disk_size = 50
# Remote access cannot be specified with a launch template
remote_access = {
ec2_ssh_key = aws_key_pair.this.key_name
ec2_ssh_key = module.key_pair.key_pair_name
source_security_group_ids = [aws_security_group.remote_access.id]
}
}
@@ -146,8 +105,7 @@ module "eks" {
bottlerocket_default = {
# By default, the module creates a launch template to ensure tags are propagated to instances, etc.,
# so we need to disable it to use the default template provided by the AWS EKS managed node group service
create_launch_template = false
launch_template_name = ""
use_custom_launch_template = false
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
@@ -158,11 +116,11 @@ module "eks" {
ami_type = "BOTTLEROCKET_x86_64"
platform = "bottlerocket"
# this will get added to what AWS provides
# This will get added to what AWS provides
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# extra args added
[settings.kernel]
lockdown = "integrity"
EOT
}
@@ -172,31 +130,35 @@ module "eks" {
ami_id = data.aws_ami.eks_default_bottlerocket.image_id
platform = "bottlerocket"
# use module user data template to boostrap
# Use module user data template to boostrap
enable_bootstrap_user_data = true
# this will get added to the template
# This will get added to the template
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# The admin host container provides SSH access and runs with "superpowers".
# It is disabled by default, but can be disabled explicitly.
[settings.host-containers.admin]
enabled = false
[settings.kubernetes.node-labels]
"label1" = "foo"
"label2" = "bar"
# The control host container provides out-of-band access via SSM.
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
enabled = true
[settings.kubernetes.node-taints]
"dedicated" = "experimental:PreferNoSchedule"
"special" = "true:NoSchedule"
# extra args added
[settings.kernel]
lockdown = "integrity"
[settings.kubernetes.node-labels]
label1 = "foo"
label2 = "bar"
[settings.kubernetes.node-taints]
dedicated = "experimental:PreferNoSchedule"
special = "true:NoSchedule"
EOT
}
# Use existing/external launch template
external_lt = {
create_launch_template = false
launch_template_name = aws_launch_template.external.name
launch_template_version = aws_launch_template.external.default_version
}
# Use a custom AMI
custom_ami = {
ami_type = "AL2_ARM_64"
@@ -219,15 +181,15 @@ module "eks" {
# See issue https://github.com/awslabs/amazon-eks-ami/issues/844
pre_bootstrap_user_data = <<-EOT
#!/bin/bash
set -ex
cat <<-EOF > /etc/profile.d/bootstrap.sh
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
export KUBELET_EXTRA_ARGS="--max-pods=110"
EOF
# Source extra environment variables in bootstrap script
sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh
#!/bin/bash
set -ex
cat <<-EOF > /etc/profile.d/bootstrap.sh
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
export KUBELET_EXTRA_ARGS="--max-pods=110"
EOF
# Source extra environment variables in bootstrap script
sed -i '/^set -o errexit/a\\nsource /etc/profile.d/bootstrap.sh' /etc/eks/bootstrap.sh
EOT
}
@@ -247,12 +209,12 @@ module "eks" {
bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
pre_bootstrap_user_data = <<-EOT
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
EOT
post_bootstrap_user_data = <<-EOT
echo "you are free little kubelet!"
echo "you are free little kubelet!"
EOT
capacity_type = "SPOT"
@@ -272,13 +234,12 @@ module "eks" {
]
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
max_unavailable_percentage = 33 # or set `max_unavailable`
}
description = "EKS managed node group example launch template"
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
disable_api_termination = false
enable_monitoring = true
@@ -291,7 +252,7 @@ module "eks" {
iops = 3000
throughput = 150
encrypted = true
kms_key_id = aws_kms_key.ebs.arn
kms_key_id = module.ebs_kms_key.key_id
delete_on_termination = true
}
}
@@ -311,34 +272,9 @@ module "eks" {
iam_role_tags = {
Purpose = "Protector of the kubelet"
}
iam_role_additional_policies = [
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
]
create_security_group = true
security_group_name = "eks-managed-node-group-complete-example"
security_group_use_name_prefix = false
security_group_description = "EKS managed node group complete example security group"
security_group_rules = {
phoneOut = {
description = "Hello CloudFlare"
protocol = "udp"
from_port = 53
to_port = 53
type = "egress"
cidr_blocks = ["1.1.1.1/32"]
}
phoneHome = {
description = "Hello cluster"
protocol = "udp"
from_port = 53
to_port = 53
type = "egress"
source_cluster_security_group = true # bit of reflection lookup
}
}
security_group_tags = {
Purpose = "Protector of the kubelet"
iam_role_additional_policies = {
AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
additional = aws_iam_policy.node_additional.arn
}
tags = {
@@ -350,18 +286,6 @@ module "eks" {
tags = local.tags
}
# References to resources that do not exist yet when creating a cluster will cause a plan failure due to https://github.com/hashicorp/terraform/issues/4149
# There are two options users can take
# 1. Create the dependent resources before the cluster => `terraform apply -target <your policy or your security group> and then `terraform apply`
# Note: this is the route users will have to take for adding additonal security groups to nodes since there isn't a separate "security group attachment" resource
# 2. For addtional IAM policies, users can attach the policies outside of the cluster definition as demonstrated below
resource "aws_iam_role_policy_attachment" "additional" {
for_each = module.eks.eks_managed_node_groups
policy_arn = aws_iam_policy.node_additional.arn
role = each.value.iam_role_name
}
################################################################################
# Supporting Resources
################################################################################
@@ -371,11 +295,12 @@ module "vpc" {
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
cidr = local.vpc_cidr
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_ipv6 = true
assign_ipv6_address_on_creation = true
@@ -393,13 +318,11 @@ module "vpc" {
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = 1
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
@@ -407,7 +330,7 @@ module "vpc" {
module "vpc_cni_irsa" {
source = "terraform-aws-modules/iam/aws//modules/iam-role-for-service-accounts-eks"
version = "~> 4.12"
version = "~> 5.0"
role_name_prefix = "VPC-CNI-IRSA"
attach_vpc_cni_policy = true
@@ -423,175 +346,35 @@ module "vpc_cni_irsa" {
tags = local.tags
}
resource "aws_security_group" "additional" {
name_prefix = "${local.name}-additional"
vpc_id = module.vpc.vpc_id
module "ebs_kms_key" {
source = "terraform-aws-modules/kms/aws"
version = "~> 1.1"
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
description = "Customer managed key to encrypt EKS managed node group volumes"
# Policy
key_administrators = [
data.aws_caller_identity.current.arn
]
key_service_users = [
# required for the ASG to manage encrypted volumes for nodes
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
# required for the cluster / persistentvolume-controller to create encrypted PVCs
module.eks.cluster_iam_role_arn,
]
# Aliases
aliases = ["eks/${local.name}/ebs"]
tags = local.tags
}
resource "aws_kms_key" "eks" {
description = "EKS Secret Encryption Key"
deletion_window_in_days = 7
enable_key_rotation = true
module "key_pair" {
source = "terraform-aws-modules/key-pair/aws"
version = "~> 2.0"
tags = local.tags
}
resource "aws_kms_key" "ebs" {
description = "Customer managed key to encrypt EKS managed node group volumes"
deletion_window_in_days = 7
policy = data.aws_iam_policy_document.ebs.json
}
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
data "aws_iam_policy_document" "ebs" {
# Copy of default KMS policy that lets you manage it
statement {
sid = "Enable IAM User Permissions"
actions = ["kms:*"]
resources = ["*"]
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
}
# Required for EKS
statement {
sid = "Allow service-linked role use of the CMK"
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
]
resources = ["*"]
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
}
statement {
sid = "Allow attachment of persistent resources"
actions = ["kms:CreateGrant"]
resources = ["*"]
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
condition {
test = "Bool"
variable = "kms:GrantIsForAWSResource"
values = ["true"]
}
}
}
# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
# there are several more options one could set but you probably dont need to modify them
# you can take the default and add your custom AMI and/or custom tags
#
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
# then the default user-data for bootstrapping a cluster is merged in the copy.
resource "aws_launch_template" "external" {
name_prefix = "external-eks-ex-"
description = "EKS managed node group external launch template"
update_default_version = true
block_device_mappings {
device_name = "/dev/xvda"
ebs {
volume_size = 100
volume_type = "gp2"
delete_on_termination = true
}
}
monitoring {
enabled = true
}
# Disabling due to https://github.com/hashicorp/terraform-provider-aws/issues/23766
# network_interfaces {
# associate_public_ip_address = false
# delete_on_termination = true
# }
# if you want to use a custom AMI
# image_id = var.ami_id
# If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
# you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
# user_data = base64encode(data.template_file.launch_template_userdata.rendered)
tag_specifications {
resource_type = "instance"
tags = {
Name = "external_lt"
CustomTag = "Instance custom tag"
}
}
tag_specifications {
resource_type = "volume"
tags = {
CustomTag = "Volume custom tag"
}
}
tag_specifications {
resource_type = "network-interface"
tags = {
CustomTag = "EKS example"
}
}
tags = {
CustomTag = "Launch template custom tag"
}
lifecycle {
create_before_destroy = true
}
}
resource "tls_private_key" "this" {
algorithm = "RSA"
}
resource "aws_key_pair" "this" {
key_name_prefix = local.name
public_key = tls_private_key.this.public_key_openssh
key_name_prefix = local.name
create_private_key = true
tags = local.tags
}
@@ -617,7 +400,7 @@ resource "aws_security_group" "remote_access" {
ipv6_cidr_blocks = ["::/0"]
}
tags = local.tags
tags = merge(local.tags, { Name = "${local.name}-remote" })
}
resource "aws_iam_policy" "node_additional" {
@@ -669,52 +452,3 @@ data "aws_ami" "eks_default_bottlerocket" {
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
}
}
################################################################################
# Tags for the ASG to support cluster-autoscaler scale up from 0
################################################################################
locals {
# We need to lookup K8s taint effect from the AWS API value
taint_effects = {
NO_SCHEDULE = "NoSchedule"
NO_EXECUTE = "NoExecute"
PREFER_NO_SCHEDULE = "PreferNoSchedule"
}
cluster_autoscaler_label_tags = merge([
for name, group in module.eks.eks_managed_node_groups : {
for label_name, label_value in coalesce(group.node_group_labels, {}) : "${name}|label|${label_name}" => {
autoscaling_group = group.node_group_autoscaling_group_names[0],
key = "k8s.io/cluster-autoscaler/node-template/label/${label_name}",
value = label_value,
}
}
]...)
cluster_autoscaler_taint_tags = merge([
for name, group in module.eks.eks_managed_node_groups : {
for taint in coalesce(group.node_group_taints, []) : "${name}|taint|${taint.key}" => {
autoscaling_group = group.node_group_autoscaling_group_names[0],
key = "k8s.io/cluster-autoscaler/node-template/taint/${taint.key}"
value = "${taint.value}:${local.taint_effects[taint.effect]}"
}
}
]...)
cluster_autoscaler_asg_tags = merge(local.cluster_autoscaler_label_tags, local.cluster_autoscaler_taint_tags)
}
resource "aws_autoscaling_group_tag" "cluster_autoscaler_label_tags" {
for_each = local.cluster_autoscaler_asg_tags
autoscaling_group_name = each.value.autoscaling_group
tag {
key = each.value.key
value = each.value.value
propagate_at_launch = false
}
}

View File

@@ -17,14 +17,14 @@ output "cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "cluster_name" {
description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_name
output "cluster_id" {
description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
value = module.eks.cluster_id
}
output "cluster_id" {
description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_id
output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}
output "cluster_oidc_issuer_url" {

View File

@@ -1,14 +1,10 @@
terraform {
required_version = ">= 0.13.1"
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
}
tls = {
source = "hashicorp/tls"
version = ">= 3.0"
version = ">= 4.45"
}
kubernetes = {
source = "hashicorp/kubernetes"

View File

@@ -19,8 +19,8 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 4.45 |
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.7 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
@@ -28,7 +28,7 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 4.45 |
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.7 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
@@ -43,10 +43,11 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [helm_release.coredns](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
| [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
@@ -68,9 +69,9 @@ No inputs.
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |

View File

@@ -16,11 +16,16 @@ provider "helm" {
}
}
data "aws_availability_zones" "available" {}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.24"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -35,28 +40,29 @@ locals {
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
cluster_addons = {
kube-proxy = {}
vpc-cni = {}
}
cluster_encryption_config = [{
provider_key_arn = aws_kms_key.eks.arn
resources = ["secrets"]
}]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
# Fargate profiles use the cluster primary security group so these are not utilized
create_cluster_security_group = false
create_node_security_group = false
fargate_profile_defaults = {
iam_role_additional_policies = {
additional = aws_iam_policy.additional.arn
}
}
fargate_profiles = {
example = {
name = "example"
@@ -231,11 +237,12 @@ module "vpc" {
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
cidr = local.vpc_cidr
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
@@ -246,22 +253,29 @@ module "vpc" {
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = 1
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
resource "aws_kms_key" "eks" {
description = "EKS Secret Encryption Key"
deletion_window_in_days = 7
enable_key_rotation = true
resource "aws_iam_policy" "additional" {
name = "${local.name}-additional"
tags = local.tags
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"ec2:Describe*",
]
Effect = "Allow"
Resource = "*"
},
]
})
}

View File

@@ -17,14 +17,14 @@ output "cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "cluster_name" {
description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_name
output "cluster_id" {
description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
value = module.eks.cluster_id
}
output "cluster_id" {
description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_id
output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}
output "cluster_oidc_issuer_url" {

View File

@@ -1,10 +1,10 @@
terraform {
required_version = ">= 0.13.1"
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
version = ">= 4.45"
}
helm = {
source = "hashicorp/helm"

View File

@@ -51,9 +51,9 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.4 |
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 4.45 |
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.7 |
| <a name="requirement_kubectl"></a> [kubectl](#requirement\_kubectl) | >= 1.14 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
@@ -62,8 +62,9 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.4 |
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 4.45 |
| <a name="provider_aws.virginia"></a> [aws.virginia](#provider\_aws.virginia) | >= 4.45 |
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.7 |
| <a name="provider_kubectl"></a> [kubectl](#provider\_kubectl) | >= 1.14 |
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
@@ -86,6 +87,8 @@ Note that this example may create resources which cost money. Run `terraform des
| [kubectl_manifest.karpenter_provisioner](https://registry.terraform.io/providers/gavinbunney/kubectl/latest/docs/resources/manifest) | resource |
| [null_resource.modify_kube_dns](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [null_resource.remove_default_coredns_deployment](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_ecrpublic_authorization_token.token](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ecrpublic_authorization_token) | data source |
| [aws_eks_addon_version.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_addon_version) | data source |
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
@@ -107,9 +110,9 @@ No inputs.
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |

View File

@@ -2,6 +2,11 @@ provider "aws" {
region = local.region
}
provider "aws" {
region = "us-east-1"
alias = "virginia"
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
@@ -42,11 +47,19 @@ provider "kubectl" {
}
}
data "aws_availability_zones" "available" {}
data "aws_ecrpublic_authorization_token" "token" {
provider = aws.virginia
}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.24"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -61,17 +74,13 @@ locals {
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
# Fargate profiles use the cluster primary security group so these are not utilized
create_cluster_security_group = false
create_node_security_group = false
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
manage_aws_auth_configmap = true
aws_auth_roles = [
@@ -114,7 +123,6 @@ module "eks" {
# Karpenter
################################################################################
module "karpenter" {
source = "../../modules/karpenter"
@@ -128,10 +136,12 @@ resource "helm_release" "karpenter" {
namespace = "karpenter"
create_namespace = true
name = "karpenter"
repository = "oci://public.ecr.aws/karpenter"
chart = "karpenter"
version = "v0.19.1"
name = "karpenter"
repository = "oci://public.ecr.aws/karpenter"
repository_username = data.aws_ecrpublic_authorization_token.token.user_name
repository_password = data.aws_ecrpublic_authorization_token.token.password
chart = "karpenter"
version = "v0.19.3"
set {
name = "settings.aws.clusterName"
@@ -367,24 +377,27 @@ module "vpc" {
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
cidr = local.vpc_cidr
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
enable_flow_log = true
create_flow_log_cloudwatch_iam_role = true
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = 1
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
"kubernetes.io/role/internal-elb" = 1
# Tags subnets for Karpenter auto-discovery
"karpenter.sh/discovery" = local.name
}

View File

@@ -17,14 +17,14 @@ output "cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "cluster_name" {
description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_name
output "cluster_id" {
description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
value = module.eks.cluster_id
}
output "cluster_id" {
description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_id
output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}
output "cluster_oidc_issuer_url" {

View File

@@ -1,10 +1,10 @@
terraform {
required_version = ">= 0.13.1"
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
version = ">= 4.45"
}
kubernetes = {
source = "hashicorp/kubernetes"
@@ -12,7 +12,7 @@ terraform {
}
helm = {
source = "hashicorp/helm"
version = ">= 2.4"
version = ">= 2.7"
}
kubectl = {
source = "gavinbunney/kubectl"

115
examples/outposts/README.md Normal file
View File

@@ -0,0 +1,115 @@
# EKS on Outposts
Configuration in this directory creates an AWS EKS local cluster on AWS Outposts
See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html) for further details.
Note: This example requires an an AWS Outpost to provision.
## Usage
To run this example you need to:
1. Deploy the remote host where the cluster will be provisioned from. The remote host is required since only private access is permitted to clusters created on Outposts. If you have access to the network where Outposts are provisioned (VPN, etc.), you can skip this step:
```bash
$ cd prerequisites
$ terraform init
$ terraform plan
$ terraform apply
```
2. If provisioning using the remote host deployed in step 1, connect to the remote host using SSM. Note, you will need to have the [SSM plugin for the AWS CLI installed](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). You can use the output generated by step 1 to connect:
```bash
$ aws ssm start-session --region <REGION> --target <INSTANCE_ID>
```
3. Once connected to the remote host, navigate to the cloned project example directory and deploy the example:
```bash
$ cd $HOME/terraform-aws-eks/examples/outposts
$ terraform init
$ terraform plan
$ terraform apply
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 4.45 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 4.45 |
| <a name="provider_kubernetes"></a> [kubernetes](#provider\_kubernetes) | >= 2.10 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
## Resources
| Name | Type |
|------|------|
| [kubernetes_storage_class_v1.this](https://registry.terraform.io/providers/hashicorp/kubernetes/latest/docs/resources/storage_class_v1) | resource |
| [aws_outposts_outpost_instance_types.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outpost_instance_types) | data source |
| [aws_outposts_outposts.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/outposts_outposts) | data source |
| [aws_subnet.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet) | data source |
| [aws_subnets.lookup](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source |
| [aws_subnets.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnets) | data source |
| [aws_vpc.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_region"></a> [region](#input\_region) | The AWS region to deploy into (e.g. us-east-1) | `string` | `"us-west-2"` | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
| <a name="output_cluster_tls_certificate_sha1_fingerprint"></a> [cluster\_tls\_certificate\_sha1\_fingerprint](#output\_cluster\_tls\_certificate\_sha1\_fingerprint) | The SHA1 fingerprint of the public key of the cluster's certificate |
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
| <a name="output_eks_managed_node_groups_autoscaling_group_names"></a> [eks\_managed\_node\_groups\_autoscaling\_group\_names](#output\_eks\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by EKS managed node groups |
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
| <a name="output_kms_key_arn"></a> [kms\_key\_arn](#output\_kms\_key\_arn) | The Amazon Resource Name (ARN) of the key |
| <a name="output_kms_key_id"></a> [kms\_key\_id](#output\_kms\_key\_id) | The globally unique identifier for the key |
| <a name="output_kms_key_policy"></a> [kms\_key\_policy](#output\_kms\_key\_policy) | The IAM resource policy set on the key |
| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
| <a name="output_oidc_provider"></a> [oidc\_provider](#output\_oidc\_provider) | The OpenID Connect identity provider (issuer URL without leading `https://`) |
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
| <a name="output_self_managed_node_groups_autoscaling_group_names"></a> [self\_managed\_node\_groups\_autoscaling\_group\_names](#output\_self\_managed\_node\_groups\_autoscaling\_group\_names) | List of the autoscaling group names created by self-managed node groups |
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->

152
examples/outposts/main.tf Normal file
View File

@@ -0,0 +1,152 @@
provider "aws" {
region = var.region
}
provider "kubernetes" {
host = module.eks.cluster_endpoint
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
exec {
api_version = "client.authentication.k8s.io/v1beta1"
command = "aws"
# Note: `cluster_id` is used with Outposts for auth
args = ["eks", "get-token", "--cluster-id", module.eks.cluster_id, "--region", var.region]
}
}
locals {
name = "ex-${basename(path.cwd)}"
cluster_version = "1.21" # Required by EKS on Outposts
outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0)
instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
# EKS Module
################################################################################
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = false # Not available on Outpost
cluster_endpoint_private_access = true
vpc_id = data.aws_vpc.this.id
subnet_ids = data.aws_subnets.this.ids
outpost_config = {
control_plane_instance_type = local.instance_type
outpost_arns = [local.outpost_arn]
}
# Local clusters will automatically add the node group IAM role to the aws-auth configmap
manage_aws_auth_configmap = true
# Extend cluster security group rules
cluster_security_group_additional_rules = {
ingress_vpc_https = {
description = "Remote host to control plane"
protocol = "tcp"
from_port = 443
to_port = 443
type = "ingress"
cidr_blocks = [data.aws_vpc.this.cidr_block]
}
}
self_managed_node_group_defaults = {
attach_cluster_primary_security_group = true
iam_role_additional_policies = {
AmazonSSMManagedInstanceCore = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
}
}
self_managed_node_groups = {
outpost = {
name = local.name
min_size = 2
max_size = 5
desired_size = 3
instance_type = local.instance_type
# Additional information is required to join local clusters to EKS
bootstrap_extra_args = <<-EOT
--enable-local-outpost true --cluster-id ${module.eks.cluster_id} --container-runtime containerd
EOT
}
}
tags = local.tags
}
resource "kubernetes_storage_class_v1" "this" {
metadata {
name = "ebs-sc"
annotations = {
"storageclass.kubernetes.io/is-default-class" = "true"
}
}
storage_provisioner = "ebs.csi.aws.com"
volume_binding_mode = "WaitForFirstConsumer"
allow_volume_expansion = true
parameters = {
type = "gp2"
encrypted = "true"
}
}
################################################################################
# Supporting Resources
################################################################################
data "aws_outposts_outposts" "this" {}
data "aws_outposts_outpost_instance_types" "this" {
arn = local.outpost_arn
}
# This just grabs the first Outpost and returns its subnets
data "aws_subnets" "lookup" {
filter {
name = "outpost-arn"
values = [local.outpost_arn]
}
}
# This grabs a single subnet to reverse lookup those that belong to same VPC
# This is whats used for the cluster
data "aws_subnet" "this" {
id = element(tolist(data.aws_subnets.lookup.ids), 0)
}
# These are subnets for the Outpost and restricted to the same VPC
# This is whats used for the cluster
data "aws_subnets" "this" {
filter {
name = "outpost-arn"
values = [local.outpost_arn]
}
filter {
name = "vpc-id"
values = [data.aws_subnet.this.vpc_id]
}
}
data "aws_vpc" "this" {
id = data.aws_subnet.this.vpc_id
}

View File

@@ -0,0 +1,211 @@
################################################################################
# Cluster
################################################################################
output "cluster_arn" {
description = "The Amazon Resource Name (ARN) of the cluster"
value = module.eks.cluster_arn
}
output "cluster_certificate_authority_data" {
description = "Base64 encoded certificate data required to communicate with the cluster"
value = module.eks.cluster_certificate_authority_data
}
output "cluster_endpoint" {
description = "Endpoint for your Kubernetes API server"
value = module.eks.cluster_endpoint
}
output "cluster_id" {
description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
value = module.eks.cluster_id
}
output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}
output "cluster_oidc_issuer_url" {
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
value = module.eks.cluster_oidc_issuer_url
}
output "cluster_platform_version" {
description = "Platform version for the cluster"
value = module.eks.cluster_platform_version
}
output "cluster_status" {
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
value = module.eks.cluster_status
}
output "cluster_primary_security_group_id" {
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
value = module.eks.cluster_primary_security_group_id
}
################################################################################
# KMS Key
################################################################################
output "kms_key_arn" {
description = "The Amazon Resource Name (ARN) of the key"
value = module.eks.kms_key_arn
}
output "kms_key_id" {
description = "The globally unique identifier for the key"
value = module.eks.kms_key_id
}
output "kms_key_policy" {
description = "The IAM resource policy set on the key"
value = module.eks.kms_key_policy
}
################################################################################
# Security Group
################################################################################
output "cluster_security_group_arn" {
description = "Amazon Resource Name (ARN) of the cluster security group"
value = module.eks.cluster_security_group_arn
}
output "cluster_security_group_id" {
description = "ID of the cluster security group"
value = module.eks.cluster_security_group_id
}
################################################################################
# Node Security Group
################################################################################
output "node_security_group_arn" {
description = "Amazon Resource Name (ARN) of the node shared security group"
value = module.eks.node_security_group_arn
}
output "node_security_group_id" {
description = "ID of the node shared security group"
value = module.eks.node_security_group_id
}
################################################################################
# IRSA
################################################################################
output "oidc_provider" {
description = "The OpenID Connect identity provider (issuer URL without leading `https://`)"
value = module.eks.oidc_provider
}
output "oidc_provider_arn" {
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
value = module.eks.oidc_provider_arn
}
output "cluster_tls_certificate_sha1_fingerprint" {
description = "The SHA1 fingerprint of the public key of the cluster's certificate"
value = module.eks.cluster_tls_certificate_sha1_fingerprint
}
################################################################################
# IAM Role
################################################################################
output "cluster_iam_role_name" {
description = "IAM role name of the EKS cluster"
value = module.eks.cluster_iam_role_name
}
output "cluster_iam_role_arn" {
description = "IAM role ARN of the EKS cluster"
value = module.eks.cluster_iam_role_arn
}
output "cluster_iam_role_unique_id" {
description = "Stable and unique string identifying the IAM role"
value = module.eks.cluster_iam_role_unique_id
}
################################################################################
# EKS Addons
################################################################################
output "cluster_addons" {
description = "Map of attribute maps for all EKS cluster addons enabled"
value = module.eks.cluster_addons
}
################################################################################
# EKS Identity Provider
################################################################################
output "cluster_identity_providers" {
description = "Map of attribute maps for all EKS identity providers enabled"
value = module.eks.cluster_identity_providers
}
################################################################################
# CloudWatch Log Group
################################################################################
output "cloudwatch_log_group_name" {
description = "Name of cloudwatch log group created"
value = module.eks.cloudwatch_log_group_name
}
output "cloudwatch_log_group_arn" {
description = "Arn of cloudwatch log group created"
value = module.eks.cloudwatch_log_group_arn
}
################################################################################
# Fargate Profile
################################################################################
output "fargate_profiles" {
description = "Map of attribute maps for all EKS Fargate Profiles created"
value = module.eks.fargate_profiles
}
################################################################################
# EKS Managed Node Group
################################################################################
output "eks_managed_node_groups" {
description = "Map of attribute maps for all EKS managed node groups created"
value = module.eks.eks_managed_node_groups
}
output "eks_managed_node_groups_autoscaling_group_names" {
description = "List of the autoscaling group names created by EKS managed node groups"
value = module.eks.eks_managed_node_groups_autoscaling_group_names
}
################################################################################
# Self Managed Node Group
################################################################################
output "self_managed_node_groups" {
description = "Map of attribute maps for all self managed node groups created"
value = module.eks.self_managed_node_groups
}
output "self_managed_node_groups_autoscaling_group_names" {
description = "List of the autoscaling group names created by self-managed node groups"
value = module.eks.self_managed_node_groups_autoscaling_group_names
}
################################################################################
# Additional
################################################################################
output "aws_auth_configmap_yaml" {
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
value = module.eks.aws_auth_configmap_yaml
}

View File

@@ -0,0 +1,150 @@
provider "aws" {
region = var.region
}
locals {
name = "ex-${basename(path.cwd)}"
terraform_version = "1.3.6"
outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0)
instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
# Pre-Requisites
################################################################################
module "ssm_bastion_ec2" {
source = "terraform-aws-modules/ec2-instance/aws"
version = "~> 4.2"
name = "${local.name}-bastion"
create_iam_instance_profile = true
iam_role_policies = {
AdministratorAccess = "arn:aws:iam::aws:policy/AdministratorAccess"
}
instance_type = local.instance_type
user_data = <<-EOT
#!/bin/bash
# Add ssm-user since it won't exist until first login
adduser -m ssm-user
tee /etc/sudoers.d/ssm-agent-users <<'EOF'
# User rules for ssm-user
ssm-user ALL=(ALL) NOPASSWD:ALL
EOF
chmod 440 /etc/sudoers.d/ssm-agent-users
cd /home/ssm-user
# Install git to clone repo
yum install git -y
# Install Terraform
curl -sSO https://releases.hashicorp.com/terraform/${local.terraform_version}/terraform_${local.terraform_version}_linux_amd64.zip
sudo unzip -qq terraform_${local.terraform_version}_linux_amd64.zip terraform -d /usr/bin/
rm terraform_${local.terraform_version}_linux_amd64.zip 2> /dev/null
# Install kubectl
curl -LO https://dl.k8s.io/release/v1.21.0/bin/linux/amd64/kubectl
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
# Remove default awscli which is v1 - we want latest v2
yum remove awscli -y
curl "https://awscli.amazonaws.com/awscli-exe-linux-x86_64.zip" -o "awscliv2.zip"
unzip -qq awscliv2.zip
./aws/install
# Clone repo
git clone https://github.com/bryantbiggs/terraform-aws-eks.git \
&& cd /home/ssm-user/terraform-aws-eks \
&& git checkout refactor/v19
chown -R ssm-user:ssm-user /home/ssm-user/
EOT
vpc_security_group_ids = [module.bastion_security_group.security_group_id]
subnet_id = element(data.aws_subnets.this.ids, 0)
tags = local.tags
}
module "bastion_security_group" {
source = "terraform-aws-modules/security-group/aws"
version = "~> 4.0"
name = "${local.name}-bastion"
description = "Security group to allow provisioning ${local.name} EKS local cluster on Outposts"
vpc_id = data.aws_vpc.this.id
ingress_with_cidr_blocks = [
{
from_port = 443
to_port = 443
protocol = "tcp"
cidr_blocks = data.aws_vpc.this.cidr_block
},
]
egress_with_cidr_blocks = [
{
from_port = 0
to_port = 0
protocol = "-1"
cidr_blocks = "0.0.0.0/0"
},
]
tags = local.tags
}
################################################################################
# Supporting Resources
################################################################################
data "aws_outposts_outposts" "this" {}
data "aws_outposts_outpost_instance_types" "this" {
arn = local.outpost_arn
}
# This just grabs the first Outpost and returns its subnets
data "aws_subnets" "lookup" {
filter {
name = "outpost-arn"
values = [local.outpost_arn]
}
}
# This grabs a single subnet to reverse lookup those that belong to same VPC
# This is whats used for the cluster
data "aws_subnet" "this" {
id = element(tolist(data.aws_subnets.lookup.ids), 0)
}
# These are subnets for the Outpost and restricted to the same VPC
# This is whats used for the cluster
data "aws_subnets" "this" {
filter {
name = "outpost-arn"
values = [local.outpost_arn]
}
filter {
name = "vpc-id"
values = [data.aws_subnet.this.vpc_id]
}
}
data "aws_vpc" "this" {
id = data.aws_subnet.this.vpc_id
}

View File

@@ -0,0 +1,4 @@
output "ssm_start_session" {
description = "SSM start session command to connect to remote host created"
value = "aws ssm start-session --region ${var.region} --target ${module.ssm_bastion_ec2.id}"
}

View File

@@ -0,0 +1,5 @@
variable "region" {
description = "The AWS region to deploy into (e.g. us-east-1)"
type = string
default = "us-west-2"
}

View File

@@ -0,0 +1,10 @@
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 4.34"
}
}
}

View File

@@ -0,0 +1,5 @@
variable "region" {
description = "The AWS region to deploy into (e.g. us-east-1)"
type = string
default = "us-west-2"
}

View File

@@ -0,0 +1,14 @@
terraform {
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 4.45"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = ">= 2.10"
}
}
}

View File

@@ -25,23 +25,23 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 4.45 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 2.10 |
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 3.0 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.72 |
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 3.0 |
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 4.45 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_ebs_kms_key"></a> [ebs\_kms\_key](#module\_ebs\_kms\_key) | terraform-aws-modules/kms/aws | ~> 1.1 |
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
| <a name="module_key_pair"></a> [key\_pair](#module\_key\_pair) | terraform-aws-modules/key-pair/aws | ~> 2.0 |
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
@@ -49,15 +49,11 @@ Note that this example may create resources which cost money. Run `terraform des
| Name | Type |
|------|------|
| [aws_ec2_capacity_reservation.targeted](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/ec2_capacity_reservation) | resource |
| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
| [aws_iam_policy.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
| [aws_ami.eks_default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_ami.eks_default_bottlerocket](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
## Inputs
@@ -77,9 +73,9 @@ No inputs.
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts |
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster. Will block on cluster creation until the cluster is really ready |
| <a name="output_cluster_name"></a> [cluster\_name](#output\_cluster\_name) | The name of the EKS cluster |
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |

View File

@@ -14,11 +14,17 @@ provider "kubernetes" {
}
}
data "aws_caller_identity" "current" {}
data "aws_availability_zones" "available" {}
locals {
name = "ex-${replace(basename(path.cwd), "_", "-")}"
cluster_version = "1.24"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
@@ -26,8 +32,6 @@ locals {
}
}
data "aws_caller_identity" "current" {}
################################################################################
# EKS Module
################################################################################
@@ -35,69 +39,31 @@ data "aws_caller_identity" "current" {}
module "eks" {
source = "../.."
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
cluster_name = local.name
cluster_version = local.cluster_version
cluster_endpoint_public_access = true
cluster_addons = {
coredns = {
resolve_conflicts = "OVERWRITE"
most_recent = true
}
kube-proxy = {
most_recent = true
}
kube-proxy = {}
vpc-cni = {
resolve_conflicts = "OVERWRITE"
most_recent = true
}
}
cluster_encryption_config = [{
provider_key_arn = aws_kms_key.eks.arn
resources = ["secrets"]
}]
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
vpc_id = module.vpc.vpc_id
subnet_ids = module.vpc.private_subnets
control_plane_subnet_ids = module.vpc.intra_subnets
# Self managed node groups will not automatically create the aws-auth configmap so we need to
create_aws_auth_configmap = true
manage_aws_auth_configmap = true
# Extend cluster security group rules
cluster_security_group_additional_rules = {
egress_nodes_ephemeral_ports_tcp = {
description = "To node 1025-65535"
protocol = "tcp"
from_port = 1025
to_port = 65535
type = "egress"
source_node_security_group = true
}
}
# Extend node-to-node security group rules
node_security_group_additional_rules = {
ingress_self_all = {
description = "Node to node all ports/protocols"
protocol = "-1"
from_port = 0
to_port = 0
type = "ingress"
self = true
}
egress_all = {
description = "Node all egress"
protocol = "-1"
from_port = 0
to_port = 0
type = "egress"
cidr_blocks = ["0.0.0.0/0"]
ipv6_cidr_blocks = ["::/0"]
}
}
self_managed_node_group_defaults = {
create_security_group = false
# enable discovery of autoscaling groups by cluster-autoscaler
autoscaling_group_tags = {
"k8s.io/cluster-autoscaler/enabled" : true,
@@ -117,24 +83,31 @@ module "eks" {
ami_id = data.aws_ami.eks_default_bottlerocket.id
instance_type = "m5.large"
desired_size = 2
key_name = aws_key_pair.this.key_name
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
key_name = module.key_pair.key_pair_name
bootstrap_extra_args = <<-EOT
# The admin host container provides SSH access and runs with "superpowers".
# It is disabled by default, but can be disabled explicitly.
[settings.host-containers.admin]
enabled = false
# The admin host container provides SSH access and runs with "superpowers".
# It is disabled by default, but can be disabled explicitly.
[settings.host-containers.admin]
enabled = false
# The control host container provides out-of-band access via SSM.
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
enabled = true
# The control host container provides out-of-band access via SSM.
# It is enabled by default, and can be disabled if you do not expect to use SSM.
# This could leave you with no way to access the API and change settings on an existing node!
[settings.host-containers.control]
enabled = true
[settings.kubernetes.node-labels]
ingress = "allowed"
# extra args added
[settings.kernel]
lockdown = "integrity"
[settings.kubernetes.node-labels]
label1 = "foo"
label2 = "bar"
[settings.kubernetes.node-taints]
dedicated = "experimental:PreferNoSchedule"
special = "true:NoSchedule"
EOT
}
@@ -177,15 +150,14 @@ module "eks" {
instance_type = "c5n.9xlarge"
post_bootstrap_user_data = <<-EOT
# Install EFA
curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz
tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer
./efa_installer.sh -y --minimal
fi_info -p efa -t FI_EP_RDM
# Install EFA
curl -O https://efa-installer.amazonaws.com/aws-efa-installer-latest.tar.gz
tar -xf aws-efa-installer-latest.tar.gz && cd aws-efa-installer
./efa_installer.sh -y --minimal
fi_info -p efa -t FI_EP_RDM
# Disable ptrace
sysctl -w kernel.yama.ptrace_scope=0
# Disable ptrace
sysctl -w kernel.yama.ptrace_scope=0
EOT
network_interfaces = [
@@ -214,12 +186,12 @@ module "eks" {
bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
pre_bootstrap_user_data = <<-EOT
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
EOT
post_bootstrap_user_data = <<-EOT
echo "you are free little kubelet!"
echo "you are free little kubelet!"
EOT
instance_type = "m6i.large"
@@ -228,9 +200,8 @@ module "eks" {
launch_template_use_name_prefix = true
launch_template_description = "Self managed node group example launch template"
ebs_optimized = true
vpc_security_group_ids = [aws_security_group.additional.id]
enable_monitoring = true
ebs_optimized = true
enable_monitoring = true
block_device_mappings = {
xvda = {
@@ -241,7 +212,7 @@ module "eks" {
iops = 3000
throughput = 150
encrypted = true
kms_key_id = aws_kms_key.ebs.arn
kms_key_id = module.ebs_kms_key.key_id
delete_on_termination = true
}
}
@@ -267,34 +238,9 @@ module "eks" {
iam_role_tags = {
Purpose = "Protector of the kubelet"
}
iam_role_additional_policies = [
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
]
create_security_group = true
security_group_name = "self-managed-node-group-complete-example"
security_group_use_name_prefix = false
security_group_description = "Self managed node group complete example security group"
security_group_rules = {
phoneOut = {
description = "Hello CloudFlare"
protocol = "udp"
from_port = 53
to_port = 53
type = "egress"
cidr_blocks = ["1.1.1.1/32"]
}
phoneHome = {
description = "Hello cluster"
protocol = "udp"
from_port = 53
to_port = 53
type = "egress"
source_cluster_security_group = true # bit of reflection lookup
}
}
security_group_tags = {
Purpose = "Protector of the kubelet"
iam_role_additional_policies = {
AmazonEC2ContainerRegistryReadOnly = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
additional = aws_iam_policy.additional.arn
}
timeouts = {
@@ -321,11 +267,12 @@ module "vpc" {
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
cidr = local.vpc_cidr
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
azs = local.azs
private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
enable_nat_gateway = true
single_nat_gateway = true
@@ -336,44 +283,16 @@ module "vpc" {
create_flow_log_cloudwatch_log_group = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = 1
"kubernetes.io/role/elb" = 1
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = 1
"kubernetes.io/role/internal-elb" = 1
}
tags = local.tags
}
resource "aws_security_group" "additional" {
name_prefix = "${local.name}-additional"
vpc_id = module.vpc.vpc_id
ingress {
from_port = 22
to_port = 22
protocol = "tcp"
cidr_blocks = [
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
]
}
tags = local.tags
}
resource "aws_kms_key" "eks" {
description = "EKS Secret Encryption Key"
deletion_window_in_days = 7
enable_key_rotation = true
tags = local.tags
}
data "aws_ami" "eks_default" {
most_recent = true
owners = ["amazon"]
@@ -394,19 +313,37 @@ data "aws_ami" "eks_default_bottlerocket" {
}
}
resource "tls_private_key" "this" {
algorithm = "RSA"
module "key_pair" {
source = "terraform-aws-modules/key-pair/aws"
version = "~> 2.0"
key_name_prefix = local.name
create_private_key = true
tags = local.tags
}
resource "aws_key_pair" "this" {
key_name = local.name
public_key = tls_private_key.this.public_key_openssh
}
module "ebs_kms_key" {
source = "terraform-aws-modules/kms/aws"
version = "~> 1.1"
resource "aws_kms_key" "ebs" {
description = "Customer managed key to encrypt self managed node group volumes"
deletion_window_in_days = 7
policy = data.aws_iam_policy_document.ebs.json
description = "Customer managed key to encrypt EKS managed node group volumes"
# Policy
key_administrators = [
data.aws_caller_identity.current.arn
]
key_service_users = [
# required for the ASG to manage encrypted volumes for nodes
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling",
# required for the cluster / persistentvolume-controller to create encrypted PVCs
module.eks.cluster_iam_role_arn,
]
# Aliases
aliases = ["eks/${local.name}/ebs"]
tags = local.tags
}
resource "aws_ec2_capacity_reservation" "targeted" {
@@ -417,58 +354,22 @@ resource "aws_ec2_capacity_reservation" "targeted" {
instance_match_criteria = "targeted"
}
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
data "aws_iam_policy_document" "ebs" {
# Copy of default KMS policy that lets you manage it
statement {
sid = "Enable IAM User Permissions"
actions = ["kms:*"]
resources = ["*"]
resource "aws_iam_policy" "additional" {
name = "${local.name}-additional"
description = "Example usage of node additional policy"
principals {
type = "AWS"
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
}
}
# Required for EKS
statement {
sid = "Allow service-linked role use of the CMK"
actions = [
"kms:Encrypt",
"kms:Decrypt",
"kms:ReEncrypt*",
"kms:GenerateDataKey*",
"kms:DescribeKey"
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Action = [
"ec2:Describe*",
]
Effect = "Allow"
Resource = "*"
},
]
resources = ["*"]
})
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
}
statement {
sid = "Allow attachment of persistent resources"
actions = ["kms:CreateGrant"]
resources = ["*"]
principals {
type = "AWS"
identifiers = [
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
]
}
condition {
test = "Bool"
variable = "kms:GrantIsForAWSResource"
values = ["true"]
}
}
tags = local.tags
}

View File

@@ -17,14 +17,14 @@ output "cluster_endpoint" {
value = module.eks.cluster_endpoint
}
output "cluster_name" {
description = "The name of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_name
output "cluster_id" {
description = "The ID of the EKS cluster. Note: currently a value is returned only for local EKS clusters created on Outposts"
value = module.eks.cluster_id
}
output "cluster_id" {
description = "The id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
value = module.eks.cluster_id
output "cluster_name" {
description = "The name of the EKS cluster"
value = module.eks.cluster_name
}
output "cluster_oidc_issuer_url" {

View File

@@ -1,14 +1,10 @@
terraform {
required_version = ">= 0.13.1"
required_version = ">= 1.0"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
}
tls = {
source = "hashicorp/tls"
version = ">= 3.0"
version = ">= 4.45"
}
kubernetes = {
source = "hashicorp/kubernetes"

View File

@@ -17,8 +17,7 @@ $ terraform apply
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.72 |
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 1.0 |
## Providers

View File

@@ -19,7 +19,7 @@ module "eks_mng_linux_additional" {
source = "../../modules/_user_data"
pre_bootstrap_user_data = <<-EOT
export CONTAINER_RUNTIME="containerd"
export CONTAINER_RUNTIME="containerd"
EOT
}
@@ -34,14 +34,14 @@ module "eks_mng_linux_custom_ami" {
enable_bootstrap_user_data = true
pre_bootstrap_user_data = <<-EOT
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
export CONTAINER_RUNTIME="containerd"
export USE_MAX_PODS=false
EOT
bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20 --instance-type t3a.large'"
post_bootstrap_user_data = <<-EOT
echo "All done"
echo "All done"
EOT
}
@@ -56,14 +56,14 @@ module "eks_mng_linux_custom_template" {
user_data_template_path = "${path.module}/templates/linux_custom.tpl"
pre_bootstrap_user_data = <<-EOT
echo "foo"
export FOO=bar
echo "foo"
export FOO=bar
EOT
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
post_bootstrap_user_data = <<-EOT
echo "All done"
echo "All done"
EOT
}
@@ -80,9 +80,9 @@ module "eks_mng_bottlerocket_additional" {
platform = "bottlerocket"
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# extra args added
[settings.kernel]
lockdown = "integrity"
EOT
}
@@ -98,9 +98,9 @@ module "eks_mng_bottlerocket_custom_ami" {
enable_bootstrap_user_data = true
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# extra args added
[settings.kernel]
lockdown = "integrity"
EOT
}
@@ -116,9 +116,9 @@ module "eks_mng_bottlerocket_custom_template" {
user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# extra args added
[settings.kernel]
lockdown = "integrity"
EOT
}
@@ -140,14 +140,14 @@ module "self_mng_linux_bootstrap" {
cluster_auth_base64 = local.cluster_auth_base64
pre_bootstrap_user_data = <<-EOT
echo "foo"
export FOO=bar
echo "foo"
export FOO=bar
EOT
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
post_bootstrap_user_data = <<-EOT
echo "All done"
echo "All done"
EOT
}
@@ -164,14 +164,14 @@ module "self_mng_linux_custom_template" {
user_data_template_path = "${path.module}/templates/linux_custom.tpl"
pre_bootstrap_user_data = <<-EOT
echo "foo"
export FOO=bar
echo "foo"
export FOO=bar
EOT
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
post_bootstrap_user_data = <<-EOT
echo "All done"
echo "All done"
EOT
}
@@ -197,9 +197,9 @@ module "self_mng_bottlerocket_bootstrap" {
cluster_auth_base64 = local.cluster_auth_base64
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# extra args added
[settings.kernel]
lockdown = "integrity"
EOT
}
@@ -218,9 +218,9 @@ module "self_mng_bottlerocket_custom_template" {
user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
bootstrap_extra_args = <<-EOT
# extra args added
[settings.kernel]
lockdown = "integrity"
# extra args added
[settings.kernel]
lockdown = "integrity"
EOT
}
@@ -246,13 +246,13 @@ module "self_mng_windows_bootstrap" {
cluster_auth_base64 = local.cluster_auth_base64
pre_bootstrap_user_data = <<-EOT
[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
EOT
# I don't know if this is the right way on WindowsOS, but its just a string check here anyways
bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
post_bootstrap_user_data = <<-EOT
[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
EOT
}
@@ -271,12 +271,12 @@ module "self_mng_windows_custom_template" {
user_data_template_path = "${path.module}/templates/windows_custom.tpl"
pre_bootstrap_user_data = <<-EOT
[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
EOT
# I don't know if this is the right way on WindowsOS, but its just a string check here anyways
bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
post_bootstrap_user_data = <<-EOT
[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
EOT
}

View File

@@ -1,10 +1,3 @@
terraform {
required_version = ">= 0.13.1"
required_providers {
aws = {
source = "hashicorp/aws"
version = ">= 3.72"
}
}
required_version = ">= 1.0"
}