mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-04-30 12:24:34 +02:00
feat!: Removed support for launch configuration and replace count with for_each (#1680)
This commit is contained in:
95
examples/eks_managed_node_group/README.md
Normal file
95
examples/eks_managed_node_group/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# EKS Managed Node Group Example
|
||||
|
||||
Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configuring/customizing:
|
||||
|
||||
- A default, "out of the box" EKS managed node group as supplied by AWS EKS
|
||||
- A default, "out of the box" Bottlerocket EKS managed node group as supplied by AWS EKS
|
||||
- A Bottlerocket EKS managed node group that supplies additional bootstrap settings
|
||||
- A Bottlerocket EKS managed node group that demonstrates many of the configuration/customizations offered by the `eks-managed-node-group` sub-module for the Bottlerocket OS
|
||||
- An EKS managed node group created from a launch template created outside of the module
|
||||
- An EKS managed node group that utilizes a custom AMI that is an EKS optimized AMI derivative
|
||||
- An EKS managed node group that demonstrates nearly all of the configurations/customizations offered by the `eks-managed-node-group` sub-module
|
||||
|
||||
See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.64 |
|
||||
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
|
||||
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
|
||||
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
|
||||
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
|
||||
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
|
||||
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
|
||||
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
|
||||
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
|
||||
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
|
||||
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
|
||||
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
|
||||
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
|
||||
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
|
||||
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
|
||||
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
|
||||
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
|
||||
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
|
||||
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
|
||||
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
|
||||
| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
|
||||
| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
|
||||
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
|
||||
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
490
examples/eks_managed_node_group/main.tf
Normal file
490
examples/eks_managed_node_group/main.tf
Normal file
@@ -0,0 +1,490 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.21"
|
||||
region = "eu-west-1"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_service_ipv4_cidr = "172.16.0.0/16"
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
cluster_addons = {
|
||||
coredns = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
kube-proxy = {}
|
||||
vpc-cni = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
}
|
||||
|
||||
cluster_encryption_config = [{
|
||||
provider_key_arn = aws_kms_key.eks.arn
|
||||
resources = ["secrets"]
|
||||
}]
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
eks_managed_node_group_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
|
||||
}
|
||||
|
||||
eks_managed_node_groups = {
|
||||
# Default node group - as provided by AWS EKS
|
||||
default_node_group = {}
|
||||
|
||||
# Default node group - as provided by AWS EKS using Bottlerocket
|
||||
bottlerocket_default = {
|
||||
ami_type = "BOTTLEROCKET_x86_64"
|
||||
platform = "bottlerocket"
|
||||
}
|
||||
|
||||
# Adds to the AWS provided user data
|
||||
bottlerocket_add = {
|
||||
ami_type = "BOTTLEROCKET_x86_64"
|
||||
platform = "bottlerocket"
|
||||
|
||||
# this will get added to what AWS provides
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Custom AMI, using module provided bootstrap data
|
||||
bottlerocket_custom = {
|
||||
# Current bottlerocket AMI
|
||||
ami_id = "ami-0ff61e0bcfc81dc94"
|
||||
platform = "bottlerocket"
|
||||
|
||||
# use module user data template to boostrap
|
||||
enable_bootstrap_user_data = true
|
||||
# this will get added to the template
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
|
||||
[settings.kubernetes.node-labels]
|
||||
"label1" = "foo"
|
||||
"label2" = "bar"
|
||||
|
||||
[settings.kubernetes.node-taints]
|
||||
"dedicated" = "experimental:PreferNoSchedule"
|
||||
"special" = "true:NoSchedule"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Use existing/external launch template
|
||||
external_lt = {
|
||||
create_launch_template = false
|
||||
launch_template_name = aws_launch_template.external.name
|
||||
launch_template_version = aws_launch_template.external.default_version
|
||||
}
|
||||
|
||||
# Use a custom AMI
|
||||
custom_ami = {
|
||||
# Current default AMI used by managed node groups - pseudo "custom"
|
||||
ami_id = "ami-0caf35bc73450c396"
|
||||
|
||||
# This will ensure the boostrap user data is used to join the node
|
||||
# By default, EKS managed node groups will not append bootstrap script;
|
||||
# this adds it back in using the default template provided by the module
|
||||
# Note: this assumes the AMI provided is an EKS optimized AMI derivative
|
||||
enable_bootstrap_user_data = true
|
||||
}
|
||||
|
||||
# Complete
|
||||
complete = {
|
||||
name = "complete-eks-mng"
|
||||
use_name_prefix = false
|
||||
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
min_size = 1
|
||||
max_size = 7
|
||||
desired_size = 1
|
||||
|
||||
ami_id = "ami-0caf35bc73450c396"
|
||||
enable_bootstrap_user_data = true
|
||||
bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
export CONTAINER_RUNTIME="containerd"
|
||||
export USE_MAX_PODS=false
|
||||
EOT
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "you are free little kubelet!"
|
||||
EOT
|
||||
|
||||
capacity_type = "SPOT"
|
||||
disk_size = 256
|
||||
force_update_version = true
|
||||
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large", "m3.large", "m4.large"]
|
||||
labels = {
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
|
||||
taints = [
|
||||
{
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
|
||||
description = "EKS managed node group example launch template"
|
||||
|
||||
ebs_optimized = true
|
||||
vpc_security_group_ids = [aws_security_group.additional.id]
|
||||
disable_api_termination = false
|
||||
enable_monitoring = true
|
||||
|
||||
block_device_mappings = {
|
||||
xvda = {
|
||||
device_name = "/dev/xvda"
|
||||
ebs = {
|
||||
volume_size = 75
|
||||
volume_type = "gp3"
|
||||
iops = 3000
|
||||
throughput = 150
|
||||
encrypted = true
|
||||
kms_key_id = aws_kms_key.ebs.arn
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metadata_options = {
|
||||
http_endpoint = "enabled"
|
||||
http_tokens = "required"
|
||||
http_put_response_hop_limit = 2
|
||||
}
|
||||
|
||||
create_iam_role = true
|
||||
iam_role_name = "eks-managed-node-group-complete-example"
|
||||
iam_role_use_name_prefix = false
|
||||
iam_role_description = "EKS managed node group complete example role"
|
||||
iam_role_tags = {
|
||||
Purpose = "Protector of the kubelet"
|
||||
}
|
||||
iam_role_additional_policies = [
|
||||
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
]
|
||||
|
||||
create_security_group = true
|
||||
security_group_name = "eks-managed-node-group-complete-example"
|
||||
security_group_use_name_prefix = false
|
||||
security_group_description = "EKS managed node group complete example security group"
|
||||
security_group_rules = {
|
||||
phoneOut = {
|
||||
description = "Hello CloudFlare"
|
||||
protocol = "udp"
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
type = "egress"
|
||||
cidr_blocks = ["1.1.1.1/32"]
|
||||
}
|
||||
phoneHome = {
|
||||
description = "Hello cluster"
|
||||
protocol = "udp"
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
type = "egress"
|
||||
source_cluster_security_group = true # bit of reflection lookup
|
||||
}
|
||||
}
|
||||
security_group_tags = {
|
||||
Purpose = "Protector of the kubelet"
|
||||
}
|
||||
|
||||
tags = {
|
||||
ExtraTag = "EKS managed node group complete example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# aws-auth configmap
|
||||
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
||||
# so we need to ensure fargate profiles and self-managed node roles are added
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster_auth" "this" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
locals {
|
||||
kubeconfig = yamlencode({
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
current-context = "terraform"
|
||||
clusters = [{
|
||||
name = module.eks.cluster_id
|
||||
cluster = {
|
||||
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
||||
server = module.eks.cluster_endpoint
|
||||
}
|
||||
}]
|
||||
contexts = [{
|
||||
name = "terraform"
|
||||
context = {
|
||||
cluster = module.eks.cluster_id
|
||||
user = "terraform"
|
||||
}
|
||||
}]
|
||||
users = [{
|
||||
name = "terraform"
|
||||
user = {
|
||||
token = data.aws_eks_cluster_auth.this.token
|
||||
}
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
resource "null_resource" "patch" {
|
||||
triggers = {
|
||||
kubeconfig = base64encode(local.kubeconfig)
|
||||
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = self.triggers.kubeconfig
|
||||
}
|
||||
command = self.triggers.cmd_patch
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
enable_flow_log = true
|
||||
create_flow_log_cloudwatch_iam_role = true
|
||||
create_flow_log_cloudwatch_log_group = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_security_group" "additional" {
|
||||
name_prefix = "${local.name}-additional"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "eks" {
|
||||
description = "EKS Secret Encryption Key"
|
||||
deletion_window_in_days = 7
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "ebs" {
|
||||
description = "Customer managed key to encrypt EKS managed node group volumes"
|
||||
deletion_window_in_days = 7
|
||||
policy = data.aws_iam_policy_document.ebs.json
|
||||
}
|
||||
|
||||
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
|
||||
data "aws_iam_policy_document" "ebs" {
|
||||
# Copy of default KMS policy that lets you manage it
|
||||
statement {
|
||||
sid = "Enable IAM User Permissions"
|
||||
actions = ["kms:*"]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
|
||||
}
|
||||
}
|
||||
|
||||
# Required for EKS
|
||||
statement {
|
||||
sid = "Allow service-linked role use of the CMK"
|
||||
actions = [
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:ReEncrypt*",
|
||||
"kms:GenerateDataKey*",
|
||||
"kms:DescribeKey"
|
||||
]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = [
|
||||
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "Allow attachment of persistent resources"
|
||||
actions = ["kms:CreateGrant"]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = [
|
||||
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "Bool"
|
||||
variable = "kms:GrantIsForAWSResource"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
|
||||
# there are several more options one could set but you probably dont need to modify them
|
||||
# you can take the default and add your custom AMI and/or custom tags
|
||||
#
|
||||
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
|
||||
# then the default user-data for bootstrapping a cluster is merged in the copy.
|
||||
|
||||
resource "aws_launch_template" "external" {
|
||||
name_prefix = "external-eks-ex-"
|
||||
description = "EKS managed node group external launch template"
|
||||
update_default_version = true
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "/dev/xvda"
|
||||
|
||||
ebs {
|
||||
volume_size = 100
|
||||
volume_type = "gp2"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
monitoring {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
network_interfaces {
|
||||
associate_public_ip_address = false
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
# if you want to use a custom AMI
|
||||
# image_id = var.ami_id
|
||||
|
||||
# If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
|
||||
# you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
|
||||
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
|
||||
# user_data = base64encode(data.template_file.launch_template_userdata.rendered)
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "instance"
|
||||
|
||||
tags = {
|
||||
CustomTag = "Instance custom tag"
|
||||
}
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "volume"
|
||||
|
||||
tags = {
|
||||
CustomTag = "Volume custom tag"
|
||||
}
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "network-interface"
|
||||
|
||||
tags = {
|
||||
CustomTag = "EKS example"
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
CustomTag = "Launch template custom tag"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
167
examples/eks_managed_node_group/outputs.tf
Normal file
167
examples/eks_managed_node_group/outputs.tf
Normal file
@@ -0,0 +1,167 @@
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Base64 encoded certificate data required to communicate with the cluster"
|
||||
value = module.eks.cluster_certificate_authority_data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for your Kubernetes API server"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "cluster_platform_version" {
|
||||
description = "Platform version for the cluster"
|
||||
value = module.eks.cluster_platform_version
|
||||
}
|
||||
|
||||
output "cluster_status" {
|
||||
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
|
||||
value = module.eks.cluster_status
|
||||
}
|
||||
|
||||
output "cluster_primary_security_group_id" {
|
||||
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
|
||||
value = module.eks.cluster_primary_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Security Group
|
||||
################################################################################
|
||||
|
||||
output "cluster_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the cluster security group"
|
||||
value = module.eks.cluster_security_group_arn
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "ID of the cluster security group"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Node Security Group
|
||||
################################################################################
|
||||
|
||||
output "node_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the node shared security group"
|
||||
value = module.eks.node_security_group_arn
|
||||
}
|
||||
|
||||
output "node_security_group_id" {
|
||||
description = "ID of the node shared security group"
|
||||
value = module.eks.node_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IRSA
|
||||
################################################################################
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
|
||||
value = module.eks.oidc_provider_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "cluster_iam_role_unique_id" {
|
||||
description = "Stable and unique string identifying the IAM role"
|
||||
value = module.eks.cluster_iam_role_unique_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
output "cluster_addons" {
|
||||
description = "Map of attribute maps for all EKS cluster addons enabled"
|
||||
value = module.eks.cluster_addons
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
################################################################################
|
||||
|
||||
output "cluster_identity_providers" {
|
||||
description = "Map of attribute maps for all EKS identity providers enabled"
|
||||
value = module.eks.cluster_identity_providers
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CloudWatch Log Group
|
||||
################################################################################
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_name
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_arn" {
|
||||
description = "Arn of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Fargate Profile
|
||||
################################################################################
|
||||
|
||||
output "fargate_profiles" {
|
||||
description = "Map of attribute maps for all EKS Fargate Profiles created"
|
||||
value = module.eks.fargate_profiles
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "eks_managed_node_groups" {
|
||||
description = "Map of attribute maps for all EKS managed node groups created"
|
||||
value = module.eks.eks_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Self Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "self_managed_node_groups" {
|
||||
description = "Map of attribute maps for all self managed node groups created"
|
||||
value = module.eks.self_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional
|
||||
################################################################################
|
||||
|
||||
output "aws_auth_configmap_yaml" {
|
||||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
|
||||
value = module.eks.aws_auth_configmap_yaml
|
||||
}
|
||||
0
examples/eks_managed_node_group/variables.tf
Normal file
0
examples/eks_managed_node_group/variables.tf
Normal file
14
examples/eks_managed_node_group/versions.tf
Normal file
14
examples/eks_managed_node_group/versions.tf
Normal file
@@ -0,0 +1,14 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
version = ">= 3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user