diff --git a/README.md b/README.md
index 124c57a..901dff3 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,6 @@
# AWS EKS Terraform module
-Terraform module which creates AWS EKS (Kubernetes) resources
+Terraform module which creates Amazon EKS (Kubernetes) resources
[](https://github.com/vshymanskyy/StandWithUkraine/blob/main/docs/README.md)
@@ -23,13 +23,6 @@ Please note that we strive to provide a comprehensive suite of documentation for
- [AWS EKS Documentation](https://docs.aws.amazon.com/eks/latest/userguide/getting-started.html)
- [Kubernetes Documentation](https://kubernetes.io/docs/home/)
-#### Reference Architecture
-
-The examples provided under `examples/` provide a comprehensive suite of configurations that demonstrate nearly all of the possible different configurations and settings that can be used with this module. However, these examples are not representative of clusters that you would normally find in use for production workloads. For reference architectures that utilize this module, please see the following:
-
-- [EKS Reference Architecture](https://github.com/clowdhaus/eks-reference-architecture)
-- [EKS Blueprints](https://github.com/aws-ia/terraform-aws-eks-blueprints)
-
## Usage
```hcl
@@ -38,20 +31,15 @@ module "eks" {
version = "~> 20.0"
cluster_name = "my-cluster"
- cluster_version = "1.29"
+ cluster_version = "1.30"
cluster_endpoint_public_access = true
cluster_addons = {
- coredns = {
- most_recent = true
- }
- kube-proxy = {
- most_recent = true
- }
- vpc-cni = {
- most_recent = true
- }
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
}
vpc_id = "vpc-1234556abcdef"
@@ -65,12 +53,13 @@ module "eks" {
eks_managed_node_groups = {
example = {
- min_size = 1
- max_size = 10
- desired_size = 1
+ # Starting on 1.30, AL2023 is the default AMI type for EKS managed node groups
+ ami_type = "AL2023_x86_64_STANDARD"
+ instance_types = ["m5.xlarge"]
- instance_types = ["t3.large"]
- capacity_type = "SPOT"
+ min_size = 2
+ max_size = 10
+ desired_size = 2
}
}
@@ -105,7 +94,7 @@ module "eks" {
### Cluster Access Entry
-When enabling `authentication_mode = "API_AND_CONFIG_MAP"`, EKS will automatically create an access entry for the IAM role(s) used by managed nodegroup(s) and Fargate profile(s). There are no additional actions required by users. For self-managed nodegroups and the Karpenter sub-module, this project automatically adds the access entry on behalf of users so there are no additional actions required by users.
+When enabling `authentication_mode = "API_AND_CONFIG_MAP"`, EKS will automatically create an access entry for the IAM role(s) used by managed node group(s) and Fargate profile(s). There are no additional actions required by users. For self-managed node groups and the Karpenter sub-module, this project automatically adds the access entry on behalf of users so there are no additional actions required by users.
On clusters that were created prior to CAM support, there will be an existing access entry for the cluster creator. This was previously not visible when using `aws-auth` ConfigMap, but will become visible when access entry is enabled.
@@ -115,10 +104,10 @@ Setting the `bootstrap_cluster_creator_admin_permissions` is a one time operatio
### Enabling EFA Support
-When enabling EFA support via `enable_efa_support = true`, there are two locations this can be specified - one at the cluster level, and one at the nodegroup level. Enabling at the cluster level will add the EFA required ingress/egress rules to the shared security group created for the nodegroup(s). Enabling at the nodegroup level will do the following (per nodegroup where enabled):
+When enabling EFA support via `enable_efa_support = true`, there are two locations this can be specified - one at the cluster level, and one at the node group level. Enabling at the cluster level will add the EFA required ingress/egress rules to the shared security group created for the node group(s). Enabling at the node group level will do the following (per node group where enabled):
-1. All EFA interfaces supported by the instance will be exposed on the launch template used by the nodegroup
-2. A placement group with `strategy = "clustered"` per EFA requirements is created and passed to the launch template used by the nodegroup
+1. All EFA interfaces supported by the instance will be exposed on the launch template used by the node group
+2. A placement group with `strategy = "clustered"` per EFA requirements is created and passed to the launch template used by the node group
3. Data sources will reverse lookup the availability zones that support the instance type selected based on the subnets provided, ensuring that only the associated subnets are passed to the launch template and therefore used by the placement group. This avoids the placement group being created in an availability zone that does not support the instance type selected.
> [!TIP]
@@ -126,11 +115,11 @@ When enabling EFA support via `enable_efa_support = true`, there are two locatio
>
> The EKS AL2 GPU AMI comes with the necessary EFA components pre-installed - you just need to expose the EFA devices on the nodes via their launch templates, ensure the required EFA security group rules are in place, and deploy the `aws-efa-k8s-device-plugin` in order to start utilizing EFA within your cluster. Your application container will need to have the necessary libraries and runtime in order to utilize communication over the EFA interfaces (NCCL, aws-ofi-nccl, hwloc, libfabric, aws-neuornx-collectives, CUDA, etc.).
-If you disable the creation and use of the managed nodegroup custom launch template (`create_launch_template = false` and/or `use_custom_launch_template = false`), this will interfere with the EFA functionality provided. In addition, if you do not supply an `instance_type` for self-managed nodegroup(s), or `instance_types` for the managed nodegroup(s), this will also interfere with the functionality. In order to support the EFA functionality provided by `enable_efa_support = true`, you must utilize the custom launch template created/provided by this module, and supply an `instance_type`/`instance_types` for the respective nodegroup.
+If you disable the creation and use of the managed node group custom launch template (`create_launch_template = false` and/or `use_custom_launch_template = false`), this will interfere with the EFA functionality provided. In addition, if you do not supply an `instance_type` for self-managed node group(s), or `instance_types` for the managed node group(s), this will also interfere with the functionality. In order to support the EFA functionality provided by `enable_efa_support = true`, you must utilize the custom launch template created/provided by this module, and supply an `instance_type`/`instance_types` for the respective node group.
-The logic behind supporting EFA uses a data source to lookup the instance type to retrieve the number of interfaces that the instance supports in order to enumerate and expose those interfaces on the launch template created. For managed nodegroups where a list of instance types are supported, the first instance type in the list is used to calculate the number of EFA interfaces supported. Mixing instance types with varying number of interfaces is not recommended for EFA (or in some cases, mixing instance types is not supported - i.e. - p5.48xlarge and p4d.24xlarge). In addition to exposing the EFA interfaces and updating the security group rules, a placement group is created per the EFA requirements and only the availability zones that support the instance type selected are used in the subnets provided to the nodegroup.
+The logic behind supporting EFA uses a data source to lookup the instance type to retrieve the number of interfaces that the instance supports in order to enumerate and expose those interfaces on the launch template created. For managed node groups where a list of instance types are supported, the first instance type in the list is used to calculate the number of EFA interfaces supported. Mixing instance types with varying number of interfaces is not recommended for EFA (or in some cases, mixing instance types is not supported - i.e. - p5.48xlarge and p4d.24xlarge). In addition to exposing the EFA interfaces and updating the security group rules, a placement group is created per the EFA requirements and only the availability zones that support the instance type selected are used in the subnets provided to the node group.
-In order to enable EFA support, you will have to specify `enable_efa_support = true` on both the cluster and each nodegroup that you wish to enable EFA support for:
+In order to enable EFA support, you will have to specify `enable_efa_support = true` on both the cluster and each node group that you wish to enable EFA support for:
```hcl
module "eks" {
@@ -140,14 +129,14 @@ module "eks" {
# Truncated for brevity ...
# Adds the EFA required security group rules to the shared
- # security group created for the nodegroup(s)
+ # security group created for the node group(s)
enable_efa_support = true
eks_managed_node_groups = {
example = {
instance_types = ["p5.48xlarge"]
- # Exposes all EFA interfaces on the launch template created by the nodegroup(s)
+ # Exposes all EFA interfaces on the launch template created by the node group(s)
# This would expose all 32 EFA interfaces for the p5.48xlarge instance type
enable_efa_support = true
@@ -169,12 +158,10 @@ module "eks" {
## Examples
-- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_managed_node_group): EKS Cluster using EKS managed node groups
-- [Fargate Profile](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate_profile): EKS cluster using [Fargate Profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html)
+- [EKS Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks-managed-node-group): EKS Cluster using EKS managed node groups
- [Karpenter](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/karpenter): EKS Cluster with [Karpenter](https://karpenter.sh/) provisioned for intelligent data plane management
- [Outposts](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/outposts): EKS local cluster provisioned on [AWS Outposts](https://docs.aws.amazon.com/eks/latest/userguide/eks-outposts.html)
-- [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self_managed_node_group): EKS Cluster using self-managed node groups
-- [User Data](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data): Various supported methods of providing necessary bootstrap scripts and configuration settings via user data
+- [Self Managed Node Group](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/self-managed-node-group): EKS Cluster using self-managed node groups
## Contributing
@@ -290,7 +277,7 @@ We are grateful to the community for contributing bugfixes and improvements! Ple
| [create\_kms\_key](#input\_create\_kms\_key) | Controls if a KMS key for cluster encryption should be created | `bool` | `true` | no |
| [create\_node\_security\_group](#input\_create\_node\_security\_group) | Determines whether to create a security group for the node groups or use the existing `node_security_group_id` | `bool` | `true` | no |
| [custom\_oidc\_thumbprints](#input\_custom\_oidc\_thumbprints) | Additional list of server certificate thumbprints for the OpenID Connect (OIDC) identity provider's server certificate(s) | `list(string)` | `[]` | no |
-| [dataplane\_wait\_duration](#input\_dataplane\_wait\_duration) | Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed nodegroup(s), self-managed nodegroup(s), Fargate profile(s)) | `string` | `"30s"` | no |
+| [dataplane\_wait\_duration](#input\_dataplane\_wait\_duration) | Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed node group(s), self-managed node group(s), Fargate profile(s)) | `string` | `"30s"` | no |
| [eks\_managed\_node\_group\_defaults](#input\_eks\_managed\_node\_group\_defaults) | Map of EKS managed node group default configurations | `any` | `{}` | no |
| [eks\_managed\_node\_groups](#input\_eks\_managed\_node\_groups) | Map of EKS managed node group definitions to create | `any` | `{}` | no |
| [enable\_cluster\_creator\_admin\_permissions](#input\_enable\_cluster\_creator\_admin\_permissions) | Indicates whether or not to add the cluster creator (the identity used by Terraform) as an administrator via access entry | `bool` | `false` | no |
diff --git a/docs/UPGRADE-20.0.md b/docs/UPGRADE-20.0.md
index 1f16712..6f53e32 100644
--- a/docs/UPGRADE-20.0.md
+++ b/docs/UPGRADE-20.0.md
@@ -19,8 +19,8 @@ To give users advanced notice and provide some future direction for this module,
1. The `aws-auth` sub-module will be removed entirely from the project. Since this sub-module is captured in the v20.x releases, users can continue using it even after the module moves forward with the next major version. The long term strategy and direction is cluster access entry and to rely only on the AWS Terraform provider.
2. The default value for `authentication_mode` will change to `API`. Aligning with point 1 above, this is a one way change, but users are free to specify the value of their choosing in place of this default (when the change is made). This module will proceed with an EKS API first strategy.
-3. The launch template and autoscaling group usage contained within the EKS managed nodegroup and self-managed nodegroup sub-modules *might be replaced with the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module. At minimum, it makes sense to replace most of functionality in the self-managed nodegroup module with this external module, but its not yet clear if there is any benefit of using it in the EKS managed nodegroup sub-module. The interface that users interact with will stay the same, the changes will be internal to the implementation and we will do everything we can to keep the disruption to a minimum.
-4. The `platform` variable will be replaced and instead `ami_type` will become the standard across both self-managed nodegroup(s) and EKS managed nodegroup(s). As EKS expands its portfolio of supported operating systems, the `ami_type` is better suited to associate the correct user data format to the respective OS. The `platform` variable is a legacy artifact of self-managed nodegroups but not as descriptive as the `ami_type`, and therefore it will be removed in favor of `ami_type`.
+3. The launch template and autoscaling group usage contained within the EKS managed node group and self-managed node group sub-modules *might be replaced with the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module. At minimum, it makes sense to replace most of functionality in the self-managed node group module with this external module, but its not yet clear if there is any benefit of using it in the EKS managed node group sub-module. The interface that users interact with will stay the same, the changes will be internal to the implementation and we will do everything we can to keep the disruption to a minimum.
+4. The `platform` variable will be replaced and instead `ami_type` will become the standard across both self-managed node group(s) and EKS managed node group(s). As EKS expands its portfolio of supported operating systems, the `ami_type` is better suited to associate the correct user data format to the respective OS. The `platform` variable is a legacy artifact of self-managed node groups but not as descriptive as the `ami_type`, and therefore it will be removed in favor of `ami_type`.
## Additional changes
@@ -29,8 +29,8 @@ To give users advanced notice and provide some future direction for this module,
- A module tag has been added to the cluster control plane
- Support for cluster access entries. The `bootstrap_cluster_creator_admin_permissions` setting on the control plane has been hardcoded to `false` since this operation is a one time operation only at cluster creation per the EKS API. Instead, users can enable/disable `enable_cluster_creator_admin_permissions` at any time to achieve the same functionality. This takes the identity that Terraform is using to make API calls and maps it into a cluster admin via an access entry. For users on existing clusters, you will need to remove the default cluster administrator that was created by EKS prior to the cluster access entry APIs - see the section [`Removing the default cluster administrator`](https://aws.amazon.com/blogs/containers/a-deep-dive-into-simplified-amazon-eks-access-management-controls/) for more details.
- Support for specifying the CloudWatch log group class (standard or infrequent access)
- - Native support for Windows based managed nodegroups similar to AL2 and Bottlerocket
- - Self-managed nodegroups now support `instance_maintenance_policy` and have added `max_healthy_percentage`, `scale_in_protected_instances`, and `standby_instances` arguments to the `instance_refresh.preferences` block
+ - Native support for Windows based managed node groups similar to AL2 and Bottlerocket
+ - Self-managed node groups now support `instance_maintenance_policy` and have added `max_healthy_percentage`, `scale_in_protected_instances`, and `standby_instances` arguments to the `instance_refresh.preferences` block
### Modified
@@ -109,7 +109,7 @@ To give users advanced notice and provide some future direction for this module,
- `create_access_entry`
- `access_entry_type`
- - Self-managed nodegroup
+ - Self-managed node group
- `instance_maintenance_policy`
- `create_access_entry`
- `iam_role_arn`
@@ -135,7 +135,7 @@ To give users advanced notice and provide some future direction for this module,
- Karpenter
- `node_access_entry_arn`
- - Self-managed nodegroup
+ - Self-managed node group
- `access_entry_arn`
## Upgrade Migrations
@@ -239,13 +239,13 @@ terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if T
Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above).
> [!CAUTION]
-> You will need to add entries to the `aws-auth` sub-module for any IAM roles used by nodegroups and/or Fargate profiles - the module no longer handles this in the background on behalf of users.
+> You will need to add entries to the `aws-auth` sub-module for any IAM roles used by node groups and/or Fargate profiles - the module no longer handles this in the background on behalf of users.
>
> When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes.
### authentication_mode = "API_AND_CONFIG_MAP"
-When using `authentication_mode = "API_AND_CONFIG_MAP"` and there are entries that will remain in the configmap (entries that cannot be replaced by cluster access entry), you will first need to update the `authentication_mode` on the cluster to `"API_AND_CONFIG_MAP"`. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed nodegroups and Fargate profiles; users do not need to do anything additional for these roles.
+When using `authentication_mode = "API_AND_CONFIG_MAP"` and there are entries that will remain in the configmap (entries that cannot be replaced by cluster access entry), you will first need to update the `authentication_mode` on the cluster to `"API_AND_CONFIG_MAP"`. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed node groups and Fargate profiles; users do not need to do anything additional for these roles.
Once the `authentication_mode` has been updated, next you will need to remove the configmap from the statefile to avoid any disruptions:
@@ -261,10 +261,10 @@ terraform state rm 'module.eks.kubernetes_config_map.aws_auth[0]' # include if T
If you are using Terraform `v1.7+`, you can utilize the [`remove`](https://developer.hashicorp.com/terraform/language/resources/syntax#removing-resources) to facilitate both the removal of the configmap through code. You can create a fork/clone of the provided [migration module](https://github.com/clowdhaus/terraform-aws-eks-migrate-v19-to-v20) and add the `remove` blocks and apply those changes before proceeding. We do not want to force users onto the bleeding edge with this module, so we have not included `remove` support at this time.
-Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. In the before/example above - the configmap would remove any entries for roles used by nodegroups and/or Fargate Profiles, but maintain the custom entries for users and roles passed into the module definition.
+Once the configmap has been removed from the statefile, you can add the new `aws-auth` sub-module and copy the relevant definitions from the EKS module over to the new `aws-auth` sub-module definition (see before after diff above). When you apply the changes with the new sub-module, the configmap in the cluster will get updated with the contents provided in the sub-module definition, so please be sure all of the necessary entries are added before applying the changes. In the before/example above - the configmap would remove any entries for roles used by node groups and/or Fargate Profiles, but maintain the custom entries for users and roles passed into the module definition.
### authentication_mode = "API"
-In order to switch to `API` only using cluster access entry, you first need to update the `authentication_mode` on the cluster to `API_AND_CONFIG_MAP` without modifying the `aws-auth` configmap. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed nodegroups and Fargate profiles; users do not need to do anything additional for these roles.
+In order to switch to `API` only using cluster access entry, you first need to update the `authentication_mode` on the cluster to `API_AND_CONFIG_MAP` without modifying the `aws-auth` configmap. To help make this upgrade process easier, a copy of the changes defined in the [`v20.0.0`](https://github.com/terraform-aws-modules/terraform-aws-eks/pull/2858) PR have been captured [here](https://github.com/clowdhaus/terraform-aws-eks-v20-migrate) but with the `aws-auth` components still provided in the module. This means you get the equivalent of the `v20.0.0` module, but it still includes support for the `aws-auth` configmap. You can follow the provided README on that interim migration module for the order of execution and return here once the `authentication_mode` has been updated to `"API_AND_CONFIG_MAP"`. Note - EKS automatically adds access entries for the roles used by EKS managed node groups and Fargate profiles; users do not need to do anything additional for these roles.
Once the `authentication_mode` has been updated, you can update the `authentication_mode` on the cluster to `API` and remove the `aws-auth` configmap components.
diff --git a/examples/README.md b/examples/README.md
index f417c0a..8d54b3d 100644
--- a/examples/README.md
+++ b/examples/README.md
@@ -1,8 +1,5 @@
# Examples
-Please note - the examples provided serve two primary means:
-
-1. Show users working examples of the various ways in which the module can be configured and features supported
-2. A means of testing/validating module changes
+The examples provided demonstrate different cluster configurations that users can create with the modules provided.
Please do not mistake the examples provided as "best practices". It is up to users to consult the AWS service documentation for best practices, usage recommendations, etc.
diff --git a/examples/eks-managed-node-group/README.md b/examples/eks-managed-node-group/README.md
new file mode 100644
index 0000000..a756364
--- /dev/null
+++ b/examples/eks-managed-node-group/README.md
@@ -0,0 +1,23 @@
+# EKS Managed Node Group Examples
+
+Configuration in this directory creates Amazon EKS clusters with EKS Managed Node Groups demonstrating different configurations:
+
+- `eks-al2.tf` demonstrates an EKS cluster using EKS managed node group that utilizes the EKS Amazon Linux 2 optimized AMI
+- `eks-al2023.tf` demonstrates an EKS cluster using EKS managed node group that utilizes the EKS Amazon Linux 2023 optimized AMI
+- `eks-bottlerocket.tf` demonstrates an EKS cluster using EKS managed node group that utilizes the Bottlerocket EKS optimized AMI
+
+See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for additional details on Amazon EKS managed node groups.
+
+The different cluster configuration examples provided are separated per file and independent of the other cluster configurations.
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/eks-managed-node-group/eks-al2.tf b/examples/eks-managed-node-group/eks-al2.tf
new file mode 100644
index 0000000..2dfb5b0
--- /dev/null
+++ b/examples/eks-managed-node-group/eks-al2.tf
@@ -0,0 +1,34 @@
+module "eks_al2" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 20.0"
+
+ cluster_name = "${local.name}-al2"
+ cluster_version = "1.30"
+
+ # EKS Addons
+ cluster_addons = {
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
+ }
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ example = {
+ # Starting on 1.30, AL2023 is the default AMI type for EKS managed node groups
+ ami_type = "AL2_x86_64"
+ instance_types = ["m6i.large"]
+
+ min_size = 2
+ max_size = 5
+ # This value is ignored after the initial creation
+ # https://github.com/bryantbiggs/eks-desired-size-hack
+ desired_size = 2
+ }
+ }
+
+ tags = local.tags
+}
diff --git a/examples/eks-managed-node-group/eks-al2023.tf b/examples/eks-managed-node-group/eks-al2023.tf
new file mode 100644
index 0000000..1b112d2
--- /dev/null
+++ b/examples/eks-managed-node-group/eks-al2023.tf
@@ -0,0 +1,52 @@
+module "eks_al2023" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 20.0"
+
+ cluster_name = "${local.name}-al2023"
+ cluster_version = "1.30"
+
+ # EKS Addons
+ cluster_addons = {
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
+ }
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ example = {
+ # Starting on 1.30, AL2023 is the default AMI type for EKS managed node groups
+ instance_types = ["m6i.large"]
+
+ min_size = 2
+ max_size = 5
+ # This value is ignored after the initial creation
+ # https://github.com/bryantbiggs/eks-desired-size-hack
+ desired_size = 2
+
+ # This is not required - demonstrates how to pass additional configuration to nodeadm
+ # Ref https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
+ cloudinit_pre_nodeadm = [
+ {
+ content_type = "application/node.eks.aws"
+ content = <<-EOT
+ ---
+ apiVersion: node.eks.aws/v1alpha1
+ kind: NodeConfig
+ spec:
+ kubelet:
+ config:
+ shutdownGracePeriod: 30s
+ featureGates:
+ DisableKubeletCloudCredentialProviders: true
+ EOT
+ }
+ ]
+ }
+ }
+
+ tags = local.tags
+}
diff --git a/examples/eks-managed-node-group/eks-bottlerocket.tf b/examples/eks-managed-node-group/eks-bottlerocket.tf
new file mode 100644
index 0000000..44efa59
--- /dev/null
+++ b/examples/eks-managed-node-group/eks-bottlerocket.tf
@@ -0,0 +1,52 @@
+module "eks_bottlerocket" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 20.0"
+
+ cluster_name = "${local.name}-bottlerocket"
+ cluster_version = "1.30"
+
+ # EKS Addons
+ cluster_addons = {
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
+ }
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ eks_managed_node_groups = {
+ example = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ instance_types = ["m6i.large"]
+
+ min_size = 2
+ max_size = 5
+ # This value is ignored after the initial creation
+ # https://github.com/bryantbiggs/eks-desired-size-hack
+ desired_size = 2
+
+ # This is not required - demonstrates how to pass additional configuration
+ # Ref https://bottlerocket.dev/en/os/1.19.x/api/settings/
+ bootstrap_extra_args = <<-EOT
+ # The admin host container provides SSH access and runs with "superpowers".
+ # It is disabled by default, but can be disabled explicitly.
+ [settings.host-containers.admin]
+ enabled = false
+
+ # The control host container provides out-of-band access via SSM.
+ # It is enabled by default, and can be disabled if you do not expect to use SSM.
+ # This could leave you with no way to access the API and change settings on an existing node!
+ [settings.host-containers.control]
+ enabled = true
+
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
+ }
+
+ tags = local.tags
+}
diff --git a/examples/eks-managed-node-group/main.tf b/examples/eks-managed-node-group/main.tf
new file mode 100644
index 0000000..4409a2e
--- /dev/null
+++ b/examples/eks-managed-node-group/main.tf
@@ -0,0 +1,49 @@
+provider "aws" {
+ region = local.region
+}
+
+data "aws_availability_zones" "available" {}
+
+locals {
+ name = "ex-eks-mng"
+ region = "eu-west-1"
+
+ vpc_cidr = "10.0.0.0/16"
+ azs = slice(data.aws_availability_zones.available.names, 0, 3)
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+################################################################################
+# VPC
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 5.0"
+
+ name = local.name
+ cidr = local.vpc_cidr
+
+ azs = local.azs
+ private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+ public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+ intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+
+ public_subnet_tags = {
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
+}
diff --git a/examples/eks_managed_node_group/variables.tf b/examples/eks-managed-node-group/outputs.tf
similarity index 100%
rename from examples/eks_managed_node_group/variables.tf
rename to examples/eks-managed-node-group/outputs.tf
diff --git a/examples/fargate_profile/variables.tf b/examples/eks-managed-node-group/variables.tf
similarity index 100%
rename from examples/fargate_profile/variables.tf
rename to examples/eks-managed-node-group/variables.tf
diff --git a/examples/eks_managed_node_group/versions.tf b/examples/eks-managed-node-group/versions.tf
similarity index 100%
rename from examples/eks_managed_node_group/versions.tf
rename to examples/eks-managed-node-group/versions.tf
diff --git a/examples/karpenter/README.md b/examples/karpenter/README.md
index def4eb9..bc09df1 100644
--- a/examples/karpenter/README.md
+++ b/examples/karpenter/README.md
@@ -4,12 +4,12 @@ Configuration in this directory creates an AWS EKS cluster with [Karpenter](http
## Usage
-To run this example you need to execute:
+To provision the provided configurations you need to execute:
```bash
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
Once the cluster is up and running, you can check that Karpenter is functioning as intended with the following command:
@@ -78,7 +78,7 @@ kubectl delete node -l karpenter.sh/provisioner-name=default
2. Remove the resources created by Terraform
```bash
-terraform destroy
+terraform destroy --auto-approve
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/karpenter/main.tf b/examples/karpenter/main.tf
index b3db544..9dd3617 100644
--- a/examples/karpenter/main.tf
+++ b/examples/karpenter/main.tf
@@ -62,7 +62,7 @@ module "eks" {
source = "../.."
cluster_name = local.name
- cluster_version = "1.29"
+ cluster_version = "1.30"
# Gives Terraform identity admin access to cluster which will
# allow deploying resources (Karpenter) into the cluster
@@ -82,6 +82,7 @@ module "eks" {
eks_managed_node_groups = {
karpenter = {
+ ami_type = "AL2023_x86_64_STANDARD"
instance_types = ["m5.large"]
min_size = 2
@@ -146,7 +147,7 @@ resource "helm_release" "karpenter" {
repository_username = data.aws_ecrpublic_authorization_token.token.user_name
repository_password = data.aws_ecrpublic_authorization_token.token.password
chart = "karpenter"
- version = "0.36.1"
+ version = "0.37.0"
wait = false
values = [
@@ -168,7 +169,7 @@ resource "kubectl_manifest" "karpenter_node_class" {
metadata:
name: default
spec:
- amiFamily: AL2
+ amiFamily: AL2023
role: ${module.karpenter.node_iam_role_name}
subnetSelectorTerms:
- tags:
diff --git a/examples/outposts/README.md b/examples/outposts/README.md
index a454fd2..3779487 100644
--- a/examples/outposts/README.md
+++ b/examples/outposts/README.md
@@ -1,4 +1,4 @@
-# EKS on Outposts
+# EKS on Outposts Example
Configuration in this directory creates an AWS EKS local cluster on AWS Outposts
@@ -16,7 +16,7 @@ To run this example you need to:
$ cd prerequisites
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
2. If provisioning using the remote host deployed in step 1, connect to the remote host using SSM. Note, you will need to have the [SSM plugin for the AWS CLI installed](https://docs.aws.amazon.com/systems-manager/latest/userguide/session-manager-working-with-install-plugin.html). You can use the output generated by step 1 to connect:
@@ -31,13 +31,13 @@ $ aws ssm start-session --region --target
$ cd $HOME/terraform-aws-eks/examples/outposts
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
```bash
-terraform destroy
+terraform destroy --auto-approve
```
diff --git a/examples/outposts/main.tf b/examples/outposts/main.tf
index 4b13f52..c783597 100644
--- a/examples/outposts/main.tf
+++ b/examples/outposts/main.tf
@@ -4,7 +4,7 @@ provider "aws" {
locals {
name = "ex-${basename(path.cwd)}"
- cluster_version = "1.29"
+ cluster_version = "1.30"
outpost_arn = element(tolist(data.aws_outposts_outposts.this.arns), 0)
instance_type = element(tolist(data.aws_outposts_outpost_instance_types.this.instance_types), 0)
diff --git a/examples/outposts/prerequisites/main.tf b/examples/outposts/prerequisites/main.tf
index 66ab2a4..1a1dd18 100644
--- a/examples/outposts/prerequisites/main.tf
+++ b/examples/outposts/prerequisites/main.tf
@@ -56,7 +56,7 @@ module "ssm_bastion_ec2" {
rm terraform_${local.terraform_version}_linux_amd64.zip 2> /dev/null
# Install kubectl
- curl -LO https://dl.k8s.io/release/v1.29.0/bin/linux/amd64/kubectl
+ curl -LO https://dl.k8s.io/release/v1.30.0/bin/linux/amd64/kubectl
install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
# Remove default awscli which is v1 - we want latest v2
diff --git a/examples/self-managed-node-group/README.md b/examples/self-managed-node-group/README.md
new file mode 100644
index 0000000..da81c84
--- /dev/null
+++ b/examples/self-managed-node-group/README.md
@@ -0,0 +1,21 @@
+# Self-managed Node Group Examples
+
+Configuration in this directory creates Amazon EKS clusters with self-managed node groups demonstrating different configurations:
+
+- `eks-al2.tf` demonstrates an EKS cluster using self-managed node group that utilizes the EKS Amazon Linux 2 optimized AMI
+- `eks-al2023.tf` demonstrates an EKS cluster using self-managed node group that utilizes the EKS Amazon Linux 2023 optimized AMI
+- `eks-bottlerocket.tf` demonstrates an EKS cluster using self-managed node group that utilizes the Bottlerocket EKS optimized AMI
+
+The different cluster configuration examples provided are separated per file and independent of the other cluster configurations.
+
+## Usage
+
+To provision the provided configurations you need to execute:
+
+```bash
+$ terraform init
+$ terraform plan
+$ terraform apply --auto-approve
+```
+
+Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/self-managed-node-group/eks-al2.tf b/examples/self-managed-node-group/eks-al2.tf
new file mode 100644
index 0000000..be5c65a
--- /dev/null
+++ b/examples/self-managed-node-group/eks-al2.tf
@@ -0,0 +1,33 @@
+module "eks_al2" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 20.0"
+
+ cluster_name = "${local.name}-al2"
+ cluster_version = "1.30"
+
+ # EKS Addons
+ cluster_addons = {
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
+ }
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ self_managed_node_groups = {
+ example = {
+ ami_type = "AL2_x86_64"
+ instance_type = "m6i.large"
+
+ min_size = 2
+ max_size = 5
+ # This value is ignored after the initial creation
+ # https://github.com/bryantbiggs/eks-desired-size-hack
+ desired_size = 2
+ }
+ }
+
+ tags = local.tags
+}
diff --git a/examples/self-managed-node-group/eks-al2023.tf b/examples/self-managed-node-group/eks-al2023.tf
new file mode 100644
index 0000000..7015605
--- /dev/null
+++ b/examples/self-managed-node-group/eks-al2023.tf
@@ -0,0 +1,52 @@
+module "eks_al2023" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 20.0"
+
+ cluster_name = "${local.name}-al2023"
+ cluster_version = "1.30"
+
+ # EKS Addons
+ cluster_addons = {
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
+ }
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ self_managed_node_groups = {
+ example = {
+ ami_type = "AL2023_x86_64_STANDARD"
+ instance_type = "m6i.large"
+
+ min_size = 2
+ max_size = 5
+ # This value is ignored after the initial creation
+ # https://github.com/bryantbiggs/eks-desired-size-hack
+ desired_size = 2
+
+ # This is not required - demonstrates how to pass additional configuration to nodeadm
+ # Ref https://awslabs.github.io/amazon-eks-ami/nodeadm/doc/api/
+ cloudinit_pre_nodeadm = [
+ {
+ content_type = "application/node.eks.aws"
+ content = <<-EOT
+ ---
+ apiVersion: node.eks.aws/v1alpha1
+ kind: NodeConfig
+ spec:
+ kubelet:
+ config:
+ shutdownGracePeriod: 30s
+ featureGates:
+ DisableKubeletCloudCredentialProviders: true
+ EOT
+ }
+ ]
+ }
+ }
+
+ tags = local.tags
+}
diff --git a/examples/self-managed-node-group/eks-bottlerocket.tf b/examples/self-managed-node-group/eks-bottlerocket.tf
new file mode 100644
index 0000000..2afb079
--- /dev/null
+++ b/examples/self-managed-node-group/eks-bottlerocket.tf
@@ -0,0 +1,52 @@
+module "eks_bottlerocket" {
+ source = "terraform-aws-modules/eks/aws"
+ version = "~> 20.0"
+
+ cluster_name = "${local.name}-bottlerocket"
+ cluster_version = "1.30"
+
+ # EKS Addons
+ cluster_addons = {
+ coredns = {}
+ eks-pod-identity-agent = {}
+ kube-proxy = {}
+ vpc-cni = {}
+ }
+
+ vpc_id = module.vpc.vpc_id
+ subnet_ids = module.vpc.private_subnets
+
+ self_managed_node_groups = {
+ example = {
+ ami_type = "BOTTLEROCKET_x86_64"
+ instance_type = "m6i.large"
+
+ min_size = 2
+ max_size = 5
+ # This value is ignored after the initial creation
+ # https://github.com/bryantbiggs/eks-desired-size-hack
+ desired_size = 2
+
+ # This is not required - demonstrates how to pass additional configuration
+ # Ref https://bottlerocket.dev/en/os/1.19.x/api/settings/
+ bootstrap_extra_args = <<-EOT
+ # The admin host container provides SSH access and runs with "superpowers".
+ # It is disabled by default, but can be disabled explicitly.
+ [settings.host-containers.admin]
+ enabled = false
+
+ # The control host container provides out-of-band access via SSM.
+ # It is enabled by default, and can be disabled if you do not expect to use SSM.
+ # This could leave you with no way to access the API and change settings on an existing node!
+ [settings.host-containers.control]
+ enabled = true
+
+ # extra args added
+ [settings.kernel]
+ lockdown = "integrity"
+ EOT
+ }
+ }
+
+ tags = local.tags
+}
diff --git a/examples/self-managed-node-group/main.tf b/examples/self-managed-node-group/main.tf
new file mode 100644
index 0000000..ed6982a
--- /dev/null
+++ b/examples/self-managed-node-group/main.tf
@@ -0,0 +1,49 @@
+provider "aws" {
+ region = local.region
+}
+
+data "aws_availability_zones" "available" {}
+
+locals {
+ name = "ex-self-mng"
+ region = "eu-west-1"
+
+ vpc_cidr = "10.0.0.0/16"
+ azs = slice(data.aws_availability_zones.available.names, 0, 3)
+
+ tags = {
+ Example = local.name
+ GithubRepo = "terraform-aws-eks"
+ GithubOrg = "terraform-aws-modules"
+ }
+}
+
+################################################################################
+# VPC
+################################################################################
+
+module "vpc" {
+ source = "terraform-aws-modules/vpc/aws"
+ version = "~> 5.0"
+
+ name = local.name
+ cidr = local.vpc_cidr
+
+ azs = local.azs
+ private_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 4, k)]
+ public_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 48)]
+ intra_subnets = [for k, v in local.azs : cidrsubnet(local.vpc_cidr, 8, k + 52)]
+
+ enable_nat_gateway = true
+ single_nat_gateway = true
+
+ public_subnet_tags = {
+ "kubernetes.io/role/elb" = 1
+ }
+
+ private_subnet_tags = {
+ "kubernetes.io/role/internal-elb" = 1
+ }
+
+ tags = local.tags
+}
diff --git a/examples/self_managed_node_group/variables.tf b/examples/self-managed-node-group/outputs.tf
similarity index 100%
rename from examples/self_managed_node_group/variables.tf
rename to examples/self-managed-node-group/outputs.tf
diff --git a/examples/user_data/variables.tf b/examples/self-managed-node-group/variables.tf
similarity index 100%
rename from examples/user_data/variables.tf
rename to examples/self-managed-node-group/variables.tf
diff --git a/examples/fargate_profile/versions.tf b/examples/self-managed-node-group/versions.tf
similarity index 100%
rename from examples/fargate_profile/versions.tf
rename to examples/self-managed-node-group/versions.tf
diff --git a/modules/_user_data/main.tf b/modules/_user_data/main.tf
index b695ba6..79b8cba 100644
--- a/modules/_user_data/main.tf
+++ b/modules/_user_data/main.tf
@@ -86,7 +86,7 @@ locals {
}
# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
-# Managed nodegroup data must in MIME multi-part archive format,
+# Managed node group data must in MIME multi-part archive format,
# as by default, EKS will merge the bootstrapping command required for nodes to join the
# cluster with your user data. If you use a custom AMI in your launch template,
# this merging will NOT happen and you are responsible for nodes joining the cluster.
diff --git a/modules/eks-managed-node-group/README.md b/modules/eks-managed-node-group/README.md
index 9cc5680..494b187 100644
--- a/modules/eks-managed-node-group/README.md
+++ b/modules/eks-managed-node-group/README.md
@@ -122,7 +122,7 @@ module "eks_managed_node_group" {
| [create](#input\_create) | Determines whether to create EKS managed node group or not | `bool` | `true` | no |
| [create\_iam\_role](#input\_create\_iam\_role) | Determines whether an IAM role is created or to use an existing IAM role | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create a launch template or not. If set to `false`, EKS will use its own default launch template | `bool` | `true` | no |
-| [create\_placement\_group](#input\_create\_placement\_group) | Determines whether a placement group is created & used by the nodegroup | `bool` | `false` | no |
+| [create\_placement\_group](#input\_create\_placement\_group) | Determines whether a placement group is created & used by the node group | `bool` | `false` | no |
| [create\_schedule](#input\_create\_schedule) | Determines whether to create autoscaling group schedule or not | `bool` | `true` | no |
| [credit\_specification](#input\_credit\_specification) | Customize the credit specification of the instance | `map(string)` | `{}` | no |
| [desired\_size](#input\_desired\_size) | Desired number of instances/nodes | `number` | `1` | no |
diff --git a/modules/eks-managed-node-group/variables.tf b/modules/eks-managed-node-group/variables.tf
index 344b346..d626296 100644
--- a/modules/eks-managed-node-group/variables.tf
+++ b/modules/eks-managed-node-group/variables.tf
@@ -298,7 +298,7 @@ variable "placement" {
}
variable "create_placement_group" {
- description = "Determines whether a placement group is created & used by the nodegroup"
+ description = "Determines whether a placement group is created & used by the node group"
type = bool
default = false
}
diff --git a/modules/karpenter/README.md b/modules/karpenter/README.md
index ec819ed..ee8cce3 100644
--- a/modules/karpenter/README.md
+++ b/modules/karpenter/README.md
@@ -43,7 +43,7 @@ In the following example, the Karpenter module will create:
- An IAM role for use with Pod Identity and a scoped IAM policy for the Karpenter controller
- SQS queue and EventBridge event rules for Karpenter to utilize for spot termination handling, capacity re-balancing, etc.
-In this scenario, Karpenter will re-use an existing Node IAM role from the EKS managed nodegroup which already has the necessary access entry permissions:
+In this scenario, Karpenter will re-use an existing Node IAM role from the EKS managed node group which already has the necessary access entry permissions:
```hcl
module "eks" {
@@ -70,7 +70,7 @@ module "karpenter" {
create_node_iam_role = false
node_iam_role_arn = module.eks.eks_managed_node_groups["initial"].iam_role_arn
- # Since the nodegroup role will already have an access entry
+ # Since the node group role will already have an access entry
create_access_entry = false
tags = {
diff --git a/modules/self-managed-node-group/README.md b/modules/self-managed-node-group/README.md
index 5fb9cd3..681a322 100644
--- a/modules/self-managed-node-group/README.md
+++ b/modules/self-managed-node-group/README.md
@@ -103,7 +103,7 @@ module "self_managed_node_group" {
| [context](#input\_context) | Reserved | `string` | `null` | no |
| [cpu\_options](#input\_cpu\_options) | The CPU options for the instance | `map(string)` | `{}` | no |
| [create](#input\_create) | Determines whether to create self managed node group or not | `bool` | `true` | no |
-| [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the nodegroup | `bool` | `true` | no |
+| [create\_access\_entry](#input\_create\_access\_entry) | Determines whether an access entry is created for the IAM role used by the node group | `bool` | `true` | no |
| [create\_autoscaling\_group](#input\_create\_autoscaling\_group) | Determines whether to create autoscaling group or not | `bool` | `true` | no |
| [create\_iam\_instance\_profile](#input\_create\_iam\_instance\_profile) | Determines whether an IAM instance profile is created or to use an existing IAM instance profile | `bool` | `true` | no |
| [create\_launch\_template](#input\_create\_launch\_template) | Determines whether to create launch template or not | `bool` | `true` | no |
diff --git a/modules/self-managed-node-group/variables.tf b/modules/self-managed-node-group/variables.tf
index aea1269..c476747 100644
--- a/modules/self-managed-node-group/variables.tf
+++ b/modules/self-managed-node-group/variables.tf
@@ -650,7 +650,7 @@ variable "iam_role_tags" {
################################################################################
variable "create_access_entry" {
- description = "Determines whether an access entry is created for the IAM role used by the nodegroup"
+ description = "Determines whether an access entry is created for the IAM role used by the node group"
type = bool
default = true
}
diff --git a/examples/eks_managed_node_group/README.md b/tests/eks-managed-node-group/README.md
similarity index 88%
rename from examples/eks_managed_node_group/README.md
rename to tests/eks-managed-node-group/README.md
index a6faed8..169b4c4 100644
--- a/examples/eks_managed_node_group/README.md
+++ b/tests/eks-managed-node-group/README.md
@@ -1,25 +1,13 @@
-# EKS Managed Node Group Example
-
-Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configuring/customizing:
-
-- A default, "out of the box" EKS managed node group as supplied by AWS EKS
-- A default, "out of the box" Bottlerocket EKS managed node group as supplied by AWS EKS
-- A Bottlerocket EKS managed node group that supplies additional bootstrap settings
-- A Bottlerocket EKS managed node group that demonstrates many of the configuration/customizations offered by the `eks-managed-node-group` sub-module for the Bottlerocket OS
-- An EKS managed node group created from a launch template created outside of the module
-- An EKS managed node group that utilizes a custom AMI that is an EKS optimized AMI derivative
-- An EKS managed node group that demonstrates nearly all of the configurations/customizations offered by the `eks-managed-node-group` sub-module
-
-See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
+# EKS Managed Node Group
## Usage
-To run this example you need to execute:
+To provision the provided configurations you need to execute:
```bash
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/eks_managed_node_group/main.tf b/tests/eks-managed-node-group/main.tf
similarity index 99%
rename from examples/eks_managed_node_group/main.tf
rename to tests/eks-managed-node-group/main.tf
index 2f1202f..c5bb833 100644
--- a/examples/eks_managed_node_group/main.tf
+++ b/tests/eks-managed-node-group/main.tf
@@ -14,7 +14,7 @@ locals {
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
- Example = local.name
+ Test = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
diff --git a/examples/eks_managed_node_group/outputs.tf b/tests/eks-managed-node-group/outputs.tf
similarity index 100%
rename from examples/eks_managed_node_group/outputs.tf
rename to tests/eks-managed-node-group/outputs.tf
diff --git a/examples/user_data/rendered/al2/eks-mng-no-op.sh b/tests/eks-managed-node-group/variables.tf
old mode 100755
new mode 100644
similarity index 100%
rename from examples/user_data/rendered/al2/eks-mng-no-op.sh
rename to tests/eks-managed-node-group/variables.tf
diff --git a/examples/self_managed_node_group/versions.tf b/tests/eks-managed-node-group/versions.tf
similarity index 100%
rename from examples/self_managed_node_group/versions.tf
rename to tests/eks-managed-node-group/versions.tf
diff --git a/examples/fargate_profile/README.md b/tests/fargate-profile/README.md
similarity index 97%
rename from examples/fargate_profile/README.md
rename to tests/fargate-profile/README.md
index 604fd3a..bdb1a90 100644
--- a/examples/fargate_profile/README.md
+++ b/tests/fargate-profile/README.md
@@ -1,15 +1,13 @@
-# AWS EKS Cluster with Fargate profiles
-
-Configuration in this directory creates an AWS EKS cluster utilizing Fargate profiles.
+# Fargate Profile
## Usage
-To run this example you need to execute:
+To provision the provided configurations you need to execute:
```bash
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/fargate_profile/main.tf b/tests/fargate-profile/main.tf
similarity index 96%
rename from examples/fargate_profile/main.tf
rename to tests/fargate-profile/main.tf
index 16fe82c..6106022 100644
--- a/examples/fargate_profile/main.tf
+++ b/tests/fargate-profile/main.tf
@@ -5,15 +5,15 @@ provider "aws" {
data "aws_availability_zones" "available" {}
locals {
- name = "ex-${replace(basename(path.cwd), "_", "-")}"
- cluster_version = "1.29"
+ name = "ex-${basename(path.cwd)}"
+ cluster_version = "1.30"
region = "eu-west-1"
vpc_cidr = "10.0.0.0/16"
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
- Example = local.name
+ Test = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
diff --git a/examples/fargate_profile/outputs.tf b/tests/fargate-profile/outputs.tf
similarity index 100%
rename from examples/fargate_profile/outputs.tf
rename to tests/fargate-profile/outputs.tf
diff --git a/examples/user_data/rendered/al2/self-mng-no-op.sh b/tests/fargate-profile/variables.tf
old mode 100755
new mode 100644
similarity index 100%
rename from examples/user_data/rendered/al2/self-mng-no-op.sh
rename to tests/fargate-profile/variables.tf
diff --git a/tests/fargate-profile/versions.tf b/tests/fargate-profile/versions.tf
new file mode 100644
index 0000000..6f83215
--- /dev/null
+++ b/tests/fargate-profile/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.3.2"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.40"
+ }
+ }
+}
diff --git a/examples/self_managed_node_group/README.md b/tests/self-managed-node-group/README.md
similarity index 90%
rename from examples/self_managed_node_group/README.md
rename to tests/self-managed-node-group/README.md
index f6f063f..feebef9 100644
--- a/examples/self_managed_node_group/README.md
+++ b/tests/self-managed-node-group/README.md
@@ -1,21 +1,13 @@
-# Self Managed Node Groups Example
-
-Configuration in this directory creates an AWS EKS cluster with various Self Managed Node Groups (AutoScaling Groups) demonstrating the various methods of configuring/customizing:
-
-- A default, "out of the box" self managed node group as supplied by the `self-managed-node-group` sub-module
-- A Bottlerocket self managed node group that demonstrates many of the configuration/customizations offered by the `self-manged-node-group` sub-module for the Bottlerocket OS
-- A self managed node group that demonstrates nearly all of the configurations/customizations offered by the `self-managed-node-group` sub-module
-
-See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
+# Self-managed Node Group
## Usage
-To run this example you need to execute:
+To provision the provided configurations you need to execute:
```bash
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
diff --git a/examples/self_managed_node_group/main.tf b/tests/self-managed-node-group/main.tf
similarity index 99%
rename from examples/self_managed_node_group/main.tf
rename to tests/self-managed-node-group/main.tf
index 1d0e80b..7fc6171 100644
--- a/examples/self_managed_node_group/main.tf
+++ b/tests/self-managed-node-group/main.tf
@@ -14,7 +14,7 @@ locals {
azs = slice(data.aws_availability_zones.available.names, 0, 3)
tags = {
- Example = local.name
+ Test = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
diff --git a/examples/self_managed_node_group/outputs.tf b/tests/self-managed-node-group/outputs.tf
similarity index 100%
rename from examples/self_managed_node_group/outputs.tf
rename to tests/self-managed-node-group/outputs.tf
diff --git a/examples/user_data/rendered/al2023/eks-mng-no-op.txt b/tests/self-managed-node-group/variables.tf
old mode 100755
new mode 100644
similarity index 100%
rename from examples/user_data/rendered/al2023/eks-mng-no-op.txt
rename to tests/self-managed-node-group/variables.tf
diff --git a/tests/self-managed-node-group/versions.tf b/tests/self-managed-node-group/versions.tf
new file mode 100644
index 0000000..6f83215
--- /dev/null
+++ b/tests/self-managed-node-group/versions.tf
@@ -0,0 +1,10 @@
+terraform {
+ required_version = ">= 1.3.2"
+
+ required_providers {
+ aws = {
+ source = "hashicorp/aws"
+ version = ">= 5.40"
+ }
+ }
+}
diff --git a/examples/user_data/README.md b/tests/user-data/README.md
similarity index 99%
rename from examples/user_data/README.md
rename to tests/user-data/README.md
index de9b419..3278c7f 100644
--- a/examples/user_data/README.md
+++ b/tests/user-data/README.md
@@ -4,12 +4,12 @@ Configuration in this directory render various user data outputs used for testin
## Usage
-To run this example you need to execute:
+To provision the provided configurations you need to execute:
```bash
$ terraform init
$ terraform plan
-$ terraform apply
+$ terraform apply --auto-approve
```
diff --git a/examples/user_data/main.tf b/tests/user-data/main.tf
similarity index 100%
rename from examples/user_data/main.tf
rename to tests/user-data/main.tf
diff --git a/examples/user_data/outputs.tf b/tests/user-data/outputs.tf
similarity index 100%
rename from examples/user_data/outputs.tf
rename to tests/user-data/outputs.tf
diff --git a/examples/user_data/rendered/al2/eks-mng-additional.txt b/tests/user-data/rendered/al2/eks-mng-additional.txt
similarity index 100%
rename from examples/user_data/rendered/al2/eks-mng-additional.txt
rename to tests/user-data/rendered/al2/eks-mng-additional.txt
diff --git a/examples/user_data/rendered/al2/eks-mng-custom-ami-ipv6.sh b/tests/user-data/rendered/al2/eks-mng-custom-ami-ipv6.sh
similarity index 100%
rename from examples/user_data/rendered/al2/eks-mng-custom-ami-ipv6.sh
rename to tests/user-data/rendered/al2/eks-mng-custom-ami-ipv6.sh
diff --git a/examples/user_data/rendered/al2/eks-mng-custom-ami.sh b/tests/user-data/rendered/al2/eks-mng-custom-ami.sh
similarity index 100%
rename from examples/user_data/rendered/al2/eks-mng-custom-ami.sh
rename to tests/user-data/rendered/al2/eks-mng-custom-ami.sh
diff --git a/examples/user_data/rendered/al2/eks-mng-custom-template.sh b/tests/user-data/rendered/al2/eks-mng-custom-template.sh
similarity index 100%
rename from examples/user_data/rendered/al2/eks-mng-custom-template.sh
rename to tests/user-data/rendered/al2/eks-mng-custom-template.sh
diff --git a/examples/user_data/rendered/al2023/self-mng-no-op.txt b/tests/user-data/rendered/al2/eks-mng-no-op.sh
similarity index 100%
rename from examples/user_data/rendered/al2023/self-mng-no-op.txt
rename to tests/user-data/rendered/al2/eks-mng-no-op.sh
diff --git a/examples/user_data/rendered/al2/self-mng-bootstrap-ipv6.sh b/tests/user-data/rendered/al2/self-mng-bootstrap-ipv6.sh
similarity index 100%
rename from examples/user_data/rendered/al2/self-mng-bootstrap-ipv6.sh
rename to tests/user-data/rendered/al2/self-mng-bootstrap-ipv6.sh
diff --git a/examples/user_data/rendered/al2/self-mng-bootstrap.sh b/tests/user-data/rendered/al2/self-mng-bootstrap.sh
similarity index 100%
rename from examples/user_data/rendered/al2/self-mng-bootstrap.sh
rename to tests/user-data/rendered/al2/self-mng-bootstrap.sh
diff --git a/examples/user_data/rendered/al2/self-mng-custom-template.sh b/tests/user-data/rendered/al2/self-mng-custom-template.sh
similarity index 100%
rename from examples/user_data/rendered/al2/self-mng-custom-template.sh
rename to tests/user-data/rendered/al2/self-mng-custom-template.sh
diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-no-op.toml b/tests/user-data/rendered/al2/self-mng-no-op.sh
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/eks-mng-no-op.toml
rename to tests/user-data/rendered/al2/self-mng-no-op.sh
diff --git a/examples/user_data/rendered/al2023/eks-mng-additional.txt b/tests/user-data/rendered/al2023/eks-mng-additional.txt
similarity index 100%
rename from examples/user_data/rendered/al2023/eks-mng-additional.txt
rename to tests/user-data/rendered/al2023/eks-mng-additional.txt
diff --git a/examples/user_data/rendered/al2023/eks-mng-custom-ami.txt b/tests/user-data/rendered/al2023/eks-mng-custom-ami.txt
similarity index 100%
rename from examples/user_data/rendered/al2023/eks-mng-custom-ami.txt
rename to tests/user-data/rendered/al2023/eks-mng-custom-ami.txt
diff --git a/examples/user_data/rendered/al2023/eks-mng-custom-template.txt b/tests/user-data/rendered/al2023/eks-mng-custom-template.txt
similarity index 100%
rename from examples/user_data/rendered/al2023/eks-mng-custom-template.txt
rename to tests/user-data/rendered/al2023/eks-mng-custom-template.txt
diff --git a/examples/user_data/rendered/bottlerocket/self-mng-no-op.toml b/tests/user-data/rendered/al2023/eks-mng-no-op.txt
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/self-mng-no-op.toml
rename to tests/user-data/rendered/al2023/eks-mng-no-op.txt
diff --git a/examples/user_data/rendered/al2023/self-mng-bootstrap.txt b/tests/user-data/rendered/al2023/self-mng-bootstrap.txt
similarity index 100%
rename from examples/user_data/rendered/al2023/self-mng-bootstrap.txt
rename to tests/user-data/rendered/al2023/self-mng-bootstrap.txt
diff --git a/examples/user_data/rendered/al2023/self-mng-custom-template.txt b/tests/user-data/rendered/al2023/self-mng-custom-template.txt
similarity index 100%
rename from examples/user_data/rendered/al2023/self-mng-custom-template.txt
rename to tests/user-data/rendered/al2023/self-mng-custom-template.txt
diff --git a/examples/user_data/rendered/windows/eks-mng-no-op.ps1 b/tests/user-data/rendered/al2023/self-mng-no-op.txt
similarity index 100%
rename from examples/user_data/rendered/windows/eks-mng-no-op.ps1
rename to tests/user-data/rendered/al2023/self-mng-no-op.txt
diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-additional.toml b/tests/user-data/rendered/bottlerocket/eks-mng-additional.toml
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/eks-mng-additional.toml
rename to tests/user-data/rendered/bottlerocket/eks-mng-additional.toml
diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-custom-ami.toml b/tests/user-data/rendered/bottlerocket/eks-mng-custom-ami.toml
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/eks-mng-custom-ami.toml
rename to tests/user-data/rendered/bottlerocket/eks-mng-custom-ami.toml
diff --git a/examples/user_data/rendered/bottlerocket/eks-mng-custom-template.toml b/tests/user-data/rendered/bottlerocket/eks-mng-custom-template.toml
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/eks-mng-custom-template.toml
rename to tests/user-data/rendered/bottlerocket/eks-mng-custom-template.toml
diff --git a/examples/user_data/rendered/windows/self-mng-no-op.ps1 b/tests/user-data/rendered/bottlerocket/eks-mng-no-op.toml
similarity index 100%
rename from examples/user_data/rendered/windows/self-mng-no-op.ps1
rename to tests/user-data/rendered/bottlerocket/eks-mng-no-op.toml
diff --git a/examples/user_data/rendered/bottlerocket/self-mng-bootstrap.toml b/tests/user-data/rendered/bottlerocket/self-mng-bootstrap.toml
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/self-mng-bootstrap.toml
rename to tests/user-data/rendered/bottlerocket/self-mng-bootstrap.toml
diff --git a/examples/user_data/rendered/bottlerocket/self-mng-custom-template.toml b/tests/user-data/rendered/bottlerocket/self-mng-custom-template.toml
similarity index 100%
rename from examples/user_data/rendered/bottlerocket/self-mng-custom-template.toml
rename to tests/user-data/rendered/bottlerocket/self-mng-custom-template.toml
diff --git a/tests/user-data/rendered/bottlerocket/self-mng-no-op.toml b/tests/user-data/rendered/bottlerocket/self-mng-no-op.toml
new file mode 100755
index 0000000..e69de29
diff --git a/examples/user_data/rendered/windows/eks-mng-additional.ps1 b/tests/user-data/rendered/windows/eks-mng-additional.ps1
similarity index 100%
rename from examples/user_data/rendered/windows/eks-mng-additional.ps1
rename to tests/user-data/rendered/windows/eks-mng-additional.ps1
diff --git a/examples/user_data/rendered/windows/eks-mng-custom-ami.ps1 b/tests/user-data/rendered/windows/eks-mng-custom-ami.ps1
similarity index 100%
rename from examples/user_data/rendered/windows/eks-mng-custom-ami.ps1
rename to tests/user-data/rendered/windows/eks-mng-custom-ami.ps1
diff --git a/examples/user_data/rendered/windows/eks-mng-custom-template.ps1 b/tests/user-data/rendered/windows/eks-mng-custom-template.ps1
similarity index 100%
rename from examples/user_data/rendered/windows/eks-mng-custom-template.ps1
rename to tests/user-data/rendered/windows/eks-mng-custom-template.ps1
diff --git a/tests/user-data/rendered/windows/eks-mng-no-op.ps1 b/tests/user-data/rendered/windows/eks-mng-no-op.ps1
new file mode 100755
index 0000000..e69de29
diff --git a/examples/user_data/rendered/windows/self-mng-bootstrap.ps1 b/tests/user-data/rendered/windows/self-mng-bootstrap.ps1
similarity index 100%
rename from examples/user_data/rendered/windows/self-mng-bootstrap.ps1
rename to tests/user-data/rendered/windows/self-mng-bootstrap.ps1
diff --git a/examples/user_data/rendered/windows/self-mng-custom-template.ps1 b/tests/user-data/rendered/windows/self-mng-custom-template.ps1
similarity index 100%
rename from examples/user_data/rendered/windows/self-mng-custom-template.ps1
rename to tests/user-data/rendered/windows/self-mng-custom-template.ps1
diff --git a/tests/user-data/rendered/windows/self-mng-no-op.ps1 b/tests/user-data/rendered/windows/self-mng-no-op.ps1
new file mode 100755
index 0000000..e69de29
diff --git a/examples/user_data/templates/al2023_custom.tpl b/tests/user-data/templates/al2023_custom.tpl
similarity index 100%
rename from examples/user_data/templates/al2023_custom.tpl
rename to tests/user-data/templates/al2023_custom.tpl
diff --git a/examples/user_data/templates/bottlerocket_custom.tpl b/tests/user-data/templates/bottlerocket_custom.tpl
similarity index 100%
rename from examples/user_data/templates/bottlerocket_custom.tpl
rename to tests/user-data/templates/bottlerocket_custom.tpl
diff --git a/examples/user_data/templates/linux_custom.tpl b/tests/user-data/templates/linux_custom.tpl
similarity index 100%
rename from examples/user_data/templates/linux_custom.tpl
rename to tests/user-data/templates/linux_custom.tpl
diff --git a/examples/user_data/templates/windows_custom.tpl b/tests/user-data/templates/windows_custom.tpl
similarity index 100%
rename from examples/user_data/templates/windows_custom.tpl
rename to tests/user-data/templates/windows_custom.tpl
diff --git a/tests/user-data/variables.tf b/tests/user-data/variables.tf
new file mode 100644
index 0000000..e69de29
diff --git a/examples/user_data/versions.tf b/tests/user-data/versions.tf
similarity index 100%
rename from examples/user_data/versions.tf
rename to tests/user-data/versions.tf
diff --git a/variables.tf b/variables.tf
index 639110a..2d75a4b 100644
--- a/variables.tf
+++ b/variables.tf
@@ -499,7 +499,7 @@ variable "cluster_encryption_policy_tags" {
}
variable "dataplane_wait_duration" {
- description = "Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed nodegroup(s), self-managed nodegroup(s), Fargate profile(s))"
+ description = "Duration to wait after the EKS cluster has become active before creating the dataplane components (EKS managed node group(s), self-managed node group(s), Fargate profile(s))"
type = string
default = "30s"
}