mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-01-11 14:30:55 +01:00
feat!: Removed support for launch configuration and replace count with for_each (#1680)
This commit is contained in:
1
.github/CONTRIBUTING.md
vendored
1
.github/CONTRIBUTING.md
vendored
@@ -31,4 +31,3 @@ To generate changelog, Pull Requests or Commits must have semantic and must foll
|
||||
- `chore:` for chores stuff
|
||||
|
||||
The `chore` prefix skipped during changelog generation. It can be used for `chore: update changelog` commit message by example.
|
||||
|
||||
|
||||
1
.github/images/security_groups.svg
vendored
Normal file
1
.github/images/security_groups.svg
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 81 KiB |
1
.github/images/user_data.svg
vendored
Normal file
1
.github/images/user_data.svg
vendored
Normal file
File diff suppressed because one or more lines are too long
|
After Width: | Height: | Size: 32 KiB |
1
.github/workflows/release.yml
vendored
1
.github/workflows/release.yml
vendored
@@ -7,6 +7,7 @@ on:
|
||||
- main
|
||||
- master
|
||||
paths:
|
||||
- '**/*.tpl'
|
||||
- '**/*.py'
|
||||
- '**/*.tf'
|
||||
|
||||
|
||||
2
.github/workflows/stale-actions.yaml
vendored
2
.github/workflows/stale-actions.yaml
vendored
@@ -29,4 +29,4 @@ jobs:
|
||||
days-before-close: 10
|
||||
delete-branch: true
|
||||
close-issue-message: This issue was automatically closed because of stale in 10 days
|
||||
close-pr-message: This PR was automatically closed because of stale in 10 days
|
||||
close-pr-message: This PR was automatically closed because of stale in 10 days
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
repos:
|
||||
- repo: https://github.com/antonbabenko/pre-commit-terraform
|
||||
rev: v1.58.0
|
||||
rev: v1.62.0
|
||||
hooks:
|
||||
- id: terraform_fmt
|
||||
- id: terraform_validate
|
||||
@@ -17,7 +17,7 @@ repos:
|
||||
- '--args=--only=terraform_documented_variables'
|
||||
- '--args=--only=terraform_typed_variables'
|
||||
- '--args=--only=terraform_module_pinned_source'
|
||||
# - '--args=--only=terraform_naming_convention'
|
||||
- '--args=--only=terraform_naming_convention'
|
||||
- '--args=--only=terraform_required_version'
|
||||
- '--args=--only=terraform_required_providers'
|
||||
- '--args=--only=terraform_standard_module_structure'
|
||||
@@ -26,3 +26,4 @@ repos:
|
||||
rev: v4.0.1
|
||||
hooks:
|
||||
- id: check-merge-conflict
|
||||
- id: end-of-file-fixer
|
||||
|
||||
550
UPGRADE-18.0.md
Normal file
550
UPGRADE-18.0.md
Normal file
@@ -0,0 +1,550 @@
|
||||
# Upgrade from v17.x to v18.x
|
||||
|
||||
Please consult the `examples` directory for reference example configurations. If you find a bug, please open an issue with supporting configuration to reproduce.
|
||||
|
||||
## List of backwards incompatible changes
|
||||
|
||||
- Launch configuration support has been removed and only launch template is supported going forward. AWS is no longer adding new features back into launch configuration and their docs state [`We strongly recommend that you do not use launch configurations. They do not provide full functionality for Amazon EC2 Auto Scaling or Amazon EC2. We provide information about launch configurations for customers who have not yet migrated from launch configurations to launch templates.`](https://docs.aws.amazon.com/autoscaling/ec2/userguide/LaunchConfiguration.html)
|
||||
- Support for managing aws-auth configmap has been removed. This change also removes the dependency on the Kubernetes Terraform provider, the local dependency on aws-iam-authenticator for users, as well as the reliance on the forked http provider to wait and poll on cluster creation. To aid users in this change, an output variable `aws_auth_configmap_yaml` has been provided which renders the aws-auth configmap necessary to support at least the IAM roles used by the module (additional mapRoles/mapUsers definitions to be provided by users)
|
||||
- Support for managing kubeconfig and its associated `local_file` resources have been removed; users are able to use the awscli provided `aws eks update-kubeconfig --name <cluster_name>` to update their local kubeconfig as necessary
|
||||
- The terminology used in the module has been modified to reflect that used by the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-compute.html).
|
||||
- [AWS EKS Managed Node Group](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html), `eks_managed_node_groups`, was previously referred to as simply node group, `node_groups`
|
||||
- [Self Managed Node Group Group](https://docs.aws.amazon.com/eks/latest/userguide/worker.html), `self_managed_node_groups`, was previously referred to as worker group, `worker_groups`
|
||||
- [AWS Fargate Profile](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html), `fargate_profiles`, remains unchanged in terms of naming and terminology
|
||||
- The three different node group types supported by AWS and the module have been refactored into standalone sub-modules that are both used by the root `eks` module as well as available for individual, standalone consumption if desired.
|
||||
- The previous `node_groups` sub-module is now named `eks-managed-node-group` and provisions a single AWS EKS Managed Node Group per sub-module definition (previous version utilized `for_each` to create 0 or more node groups)
|
||||
- Additional changes for the `eks-managed-node-group` sub-module over the previous `node_groups` module include:
|
||||
- Variable name changes defined in section `Variable and output changes` below
|
||||
- Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
|
||||
- Support for nearly full control of the security group created, or provide the ID of an existing security group, has been added
|
||||
- User data has been revamped and all user data logic moved to the `_user_data` internal sub-module; the local `userdata.sh.tpl` has been removed entirely
|
||||
- The previous `fargate` sub-module is now named `fargate-profile` and provisions a single AWS EKS Fargate Profile per sub-module definition (previous version utilized `for_each` to create 0 or more profiles)
|
||||
- Additional changes for the `fargate-profile` sub-module over the previous `fargate` module include:
|
||||
- Variable name changes defined in section `Variable and output changes` below
|
||||
- Support for nearly full control of the IAM role created, or provide the ARN of an existing IAM role, has been added
|
||||
- Similar to the `eks_managed_node_group_defaults` and `self_managed_node_group_defaults`, a `fargate_profile_defaults` has been provided to allow users to control the default configurations for the Fargate profiles created
|
||||
- A sub-module for `self-managed-node-group` has been created and provisions a single self managed node group (autoscaling group) per sub-module definition
|
||||
- Additional changes for the `self-managed-node-group` sub-module over the previous `node_groups` variable include:
|
||||
- The underlying autoscaling group and launch template have been updated to more closely match that of the [`terraform-aws-autoscaling`](https://github.com/terraform-aws-modules/terraform-aws-autoscaling) module and the features it offers
|
||||
- The previous iteration used a count over a list of node group definitions which was prone to disruptive updates; this is now replaced with a map/for_each to align with that of the EKS managed node group and Fargate profile behaviors/style
|
||||
- The user data configuration supported across the module has been completely revamped. A new `_user_data` internal sub-module has been created to consolidate all user data configuration in one location which provides better support for testability (via the [`examples/user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) example). The new sub-module supports nearly all possible combinations including the ability to allow users to provide their own user data template which will be rendered by the module. See the `examples/user_data` example project for the full plethora of example configuration possibilities and more details on the logic of the design can be found in the [`modules/_user_data`](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/_user_data_) directory.
|
||||
|
||||
## Additional changes
|
||||
|
||||
### Added
|
||||
|
||||
- Support for AWS EKS Addons has been added
|
||||
- Support for AWS EKS Cluster Identity Provider Configuration has been added
|
||||
- AWS Terraform provider minimum required version has been updated to 3.64 to support the changes made and additional resources supported
|
||||
- An example `user_data` project has been added to aid in demonstrating, testing, and validating the various methods of configuring user data with the `_user_data` sub-module as well as the root `eks` module
|
||||
- Template for rendering the aws-auth configmap output - `aws_auth_cm.tpl`
|
||||
- Template for Bottlerocket OS user data bootstrapping - `bottlerocket_user_data.tpl`
|
||||
|
||||
### Modified
|
||||
|
||||
- The previous `fargate` example has been renamed to `fargate_profile`
|
||||
- The previous `irsa` and `instance_refresh` examples have been merged into one example `irsa_autoscale_refresh`
|
||||
- The previous `managed_node_groups` example has been renamed to `self_managed_node_group`
|
||||
- The previously hardcoded EKS OIDC root CA thumbprint value and variable has been replaced with a `tls_certificate` data source that refers to the cluster OIDC issuer url. Thumbprint values should remain unchanged however
|
||||
- Individual cluster security group resources have been replaced with a single security group resource that takes a map of rules as input. The default ingress/egress rules have had their scope reduced in order to provide the bare minimum of access to permit successful cluster creation and allow users to opt in to any additional network access as needed for a better security posture. This means the `0.0.0.0/0` egress rule has been removed, instead TCP/443 and TCP/10250 egress rules to the node group security group are used instead
|
||||
- The Linux/bash user data template has been updated to include the bare minimum necessary for bootstrapping AWS EKS Optimized AMI derivative nodes with provisions for providing additional user data and configurations; was named `userdata.sh.tpl` and is now named `linux_user_data.tpl`
|
||||
- The Windows user data template has been renamed from `userdata_windows.tpl` to `windows_user_data.tpl`
|
||||
|
||||
### Removed
|
||||
|
||||
- Miscellaneous documents on how to configure Kubernetes cluster internals have been removed. Documentation related to how to configure the AWS EKS Cluster and its supported infrastructure resources provided by the module are supported, while cluster internal configuration is out of scope for this project
|
||||
- The previous `bottlerocket` example has been removed in favor of demonstrating the use and configuration of Bottlerocket nodes via the respective `eks_managed_node_group` and `self_managed_node_group` examples
|
||||
- The previous `launch_template` and `launch_templates_with_managed_node_groups` examples have been removed; only launch templates are now supported (default) and launch configuration support has been removed
|
||||
- The previous `secrets_encryption` example has been removed; the functionality has been demonstrated in several of the new examples rendering this standalone example redundant
|
||||
- The additional, custom IAM role policy for the cluster role has been removed. The permissions are either now provided in the attached managed AWS permission policies used or are no longer required
|
||||
- The `kubeconfig.tpl` template; kubeconfig management is no longer supported under this module
|
||||
- The HTTP Terraform provider (forked copy) dependency has been removed
|
||||
|
||||
### Variable and output changes
|
||||
|
||||
1. Removed variables:
|
||||
|
||||
- `cluster_create_timeout`, `cluster_update_timeout`, and `cluster_delete_timeout` have been replaced with `cluster_timeouts`
|
||||
- `kubeconfig_name`
|
||||
- `kubeconfig_output_path`
|
||||
- `kubeconfig_file_permission`
|
||||
- `kubeconfig_api_version`
|
||||
- `kubeconfig_aws_authenticator_command`
|
||||
- `kubeconfig_aws_authenticator_command_args`
|
||||
- `kubeconfig_aws_authenticator_additional_args`
|
||||
- `kubeconfig_aws_authenticator_env_variables`
|
||||
- `write_kubeconfig`
|
||||
- `default_platform`
|
||||
- `manage_aws_auth`
|
||||
- `aws_auth_additional_labels`
|
||||
- `map_accounts`
|
||||
- `map_roles`
|
||||
- `map_users`
|
||||
- `fargate_subnets`
|
||||
- `worker_groups_launch_template`
|
||||
- `worker_security_group_id`
|
||||
- `worker_ami_name_filter`
|
||||
- `worker_ami_name_filter_windows`
|
||||
- `worker_ami_owner_id`
|
||||
- `worker_ami_owner_id_windows`
|
||||
- `worker_additional_security_group_ids`
|
||||
- `worker_sg_ingress_from_port`
|
||||
- `workers_additional_policies`
|
||||
- `worker_create_security_group`
|
||||
- `worker_create_initial_lifecycle_hooks`
|
||||
- `worker_create_cluster_primary_security_group_rules`
|
||||
- `cluster_create_endpoint_private_access_sg_rule`
|
||||
- `cluster_endpoint_private_access_cidrs`
|
||||
- `cluster_endpoint_private_access_sg`
|
||||
- `manage_worker_iam_resources`
|
||||
- `workers_role_name`
|
||||
- `attach_worker_cni_policy`
|
||||
- `eks_oidc_root_ca_thumbprint`
|
||||
- `create_fargate_pod_execution_role`
|
||||
- `fargate_pod_execution_role_name`
|
||||
- `cluster_egress_cidrs`
|
||||
- `workers_egress_cidrs`
|
||||
- `wait_for_cluster_timeout`
|
||||
- EKS Managed Node Group sub-module (was `node_groups`)
|
||||
- `default_iam_role_arn`
|
||||
- `workers_group_defaults`
|
||||
- `worker_security_group_id`
|
||||
- `node_groups_defaults`
|
||||
- `node_groups`
|
||||
- `ebs_optimized_not_supported`
|
||||
- Fargate profile sub-module (was `fargate`)
|
||||
- `create_eks` and `create_fargate_pod_execution_role` have been replaced with simply `create`
|
||||
|
||||
2. Renamed variables:
|
||||
|
||||
- `create_eks` -> `create`
|
||||
- `subnets` -> `subnet_ids`
|
||||
- `cluster_create_security_group` -> `create_cluster_security_group`
|
||||
- `cluster_log_retention_in_days` -> `cloudwatch_log_group_retention_in_days`
|
||||
- `cluster_log_kms_key_id` -> `cloudwatch_log_group_kms_key_id`
|
||||
- `manage_cluster_iam_resources` -> `create_iam_role`
|
||||
- `cluster_iam_role_name` -> `iam_role_name`
|
||||
- `permissions_boundary` -> `iam_role_permissions_boundary`
|
||||
- `iam_path` -> `iam_role_path`
|
||||
- `pre_userdata` -> `pre_bootstrap_user_data`
|
||||
- `additional_userdata` -> `post_bootstrap_user_data`
|
||||
- `worker_groups` -> `self_managed_node_groups`
|
||||
- `workers_group_defaults` -> `self_managed_node_group_defaults`
|
||||
- `node_groups` -> `eks_managed_node_groups`
|
||||
- `node_groups_defaults` -> `eks_managed_node_group_defaults`
|
||||
- EKS Managed Node Group sub-module (was `node_groups`)
|
||||
- `create_eks` -> `create`
|
||||
- `worker_additional_security_group_ids` -> `vpc_security_group_ids`
|
||||
- Fargate profile sub-module
|
||||
- `fargate_pod_execution_role_name` -> `name`
|
||||
- `create_fargate_pod_execution_role` -> `create_iam_role`
|
||||
- `subnets` -> `subnet_ids`
|
||||
- `iam_path` -> `iam_role_path`
|
||||
- `permissions_boundary` -> `iam_role_permissions_boundary`
|
||||
|
||||
3. Added variables:
|
||||
|
||||
- `cluster_additional_security_group_ids` added to allow users to add additional security groups to the cluster as needed
|
||||
- `cluster_security_group_name`
|
||||
- `cluster_security_group_use_name_prefix` added to allow users to use either the name as specified or default to using the name specified as a prefix
|
||||
- `cluster_security_group_description`
|
||||
- `cluster_security_group_additional_rules`
|
||||
- `cluster_security_group_tags`
|
||||
- `create_cloudwatch_log_group` added in place of the logic that checked if any cluster log types were enabled to allow users to opt in as they see fit
|
||||
- `create_node_security_group` added to create single security group that connects node groups and cluster in central location
|
||||
- `node_security_group_id`
|
||||
- `node_security_group_name`
|
||||
- `node_security_group_use_name_prefix`
|
||||
- `node_security_group_description`
|
||||
- `node_security_group_additional_rules`
|
||||
- `node_security_group_tags`
|
||||
- `iam_role_arn`
|
||||
- `iam_role_use_name_prefix`
|
||||
- `iam_role_description`
|
||||
- `iam_role_additional_policies`
|
||||
- `iam_role_tags`
|
||||
- `cluster_addons`
|
||||
- `cluster_identity_providers`
|
||||
- `fargate_profile_defaults`
|
||||
- EKS Managed Node Group sub-module (was `node_groups`)
|
||||
- `platform`
|
||||
- `enable_bootstrap_user_data`
|
||||
- `pre_bootstrap_user_data`
|
||||
- `post_bootstrap_user_data`
|
||||
- `bootstrap_extra_args`
|
||||
- `user_data_template_path`
|
||||
- `create_launch_template`
|
||||
- `launch_template_name`
|
||||
- `launch_template_use_name_prefix`
|
||||
- `description`
|
||||
- `ebs_optimized`
|
||||
- `ami_id`
|
||||
- `key_name`
|
||||
- `launch_template_default_version`
|
||||
- `update_launch_template_default_version`
|
||||
- `disable_api_termination`
|
||||
- `kernel_id`
|
||||
- `ram_disk_id`
|
||||
- `block_device_mappings`
|
||||
- `capacity_reservation_specification`
|
||||
- `cpu_options`
|
||||
- `credit_specification`
|
||||
- `elastic_gpu_specifications`
|
||||
- `elastic_inference_accelerator`
|
||||
- `enclave_options`
|
||||
- `instance_market_options`
|
||||
- `license_specifications`
|
||||
- `metadata_options`
|
||||
- `enable_monitoring`
|
||||
- `network_interfaces`
|
||||
- `placement`
|
||||
- `min_size`
|
||||
- `max_size`
|
||||
- `desired_size`
|
||||
- `use_name_prefix`
|
||||
- `ami_type`
|
||||
- `ami_release_version`
|
||||
- `capacity_type`
|
||||
- `disk_size`
|
||||
- `force_update_version`
|
||||
- `instance_types`
|
||||
- `labels`
|
||||
- `cluster_version`
|
||||
- `launch_template_version`
|
||||
- `remote_access`
|
||||
- `taints`
|
||||
- `update_config`
|
||||
- `timeouts`
|
||||
- `create_security_group`
|
||||
- `security_group_name`
|
||||
- `security_group_use_name_prefix`
|
||||
- `security_group_description`
|
||||
- `vpc_id`
|
||||
- `security_group_rules`
|
||||
- `cluster_security_group_id`
|
||||
- `security_group_tags`
|
||||
- `create_iam_role`
|
||||
- `iam_role_arn`
|
||||
- `iam_role_name`
|
||||
- `iam_role_use_name_prefix`
|
||||
- `iam_role_path`
|
||||
- `iam_role_description`
|
||||
- `iam_role_permissions_boundary`
|
||||
- `iam_role_additional_policies`
|
||||
- `iam_role_tags`
|
||||
- Fargate profile sub-module (was `fargate`)
|
||||
- `iam_role_arn` (for if `create_iam_role` is `false` to bring your own externally created role)
|
||||
- `iam_role_name`
|
||||
- `iam_role_use_name_prefix`
|
||||
- `iam_role_description`
|
||||
- `iam_role_additional_policies`
|
||||
- `iam_role_tags`
|
||||
- `selectors`
|
||||
- `timeouts`
|
||||
|
||||
4. Removed outputs:
|
||||
|
||||
- `cluster_version`
|
||||
- `kubeconfig`
|
||||
- `kubeconfig_filename`
|
||||
- `workers_asg_arns`
|
||||
- `workers_asg_names`
|
||||
- `workers_user_data`
|
||||
- `workers_default_ami_id`
|
||||
- `workers_default_ami_id_windows`
|
||||
- `workers_launch_template_ids`
|
||||
- `workers_launch_template_arns`
|
||||
- `workers_launch_template_latest_versions`
|
||||
- `worker_security_group_id`
|
||||
- `worker_iam_instance_profile_arns`
|
||||
- `worker_iam_instance_profile_names`
|
||||
- `worker_iam_role_name`
|
||||
- `worker_iam_role_arn`
|
||||
- `fargate_profile_ids`
|
||||
- `fargate_profile_arns`
|
||||
- `fargate_iam_role_name`
|
||||
- `fargate_iam_role_arn`
|
||||
- `node_groups`
|
||||
- `security_group_rule_cluster_https_worker_ingress`
|
||||
- EKS Managed Node Group sub-module (was `node_groups`)
|
||||
- `node_groups`
|
||||
- `aws_auth_roles`
|
||||
- Fargate profile sub-module (was `fargate`)
|
||||
- `aws_auth_roles`
|
||||
|
||||
5. Renamed outputs:
|
||||
|
||||
- `config_map_aws_auth` -> `aws_auth_configmap_yaml`
|
||||
- Fargate profile sub-module (was `fargate`)
|
||||
- `fargate_profile_ids` -> `fargate_profile_id`
|
||||
- `fargate_profile_arns` -> `fargate_profile_arn`
|
||||
|
||||
6. Added outputs:
|
||||
|
||||
- `cluster_platform_version`
|
||||
- `cluster_status`
|
||||
- `cluster_security_group_arn`
|
||||
- `cluster_security_group_id`
|
||||
- `node_security_group_arn`
|
||||
- `node_security_group_id`
|
||||
- `cluster_iam_role_unique_id`
|
||||
- `cluster_addons`
|
||||
- `cluster_identity_providers`
|
||||
- `fargate_profiles`
|
||||
- `eks_managed_node_groups`
|
||||
- `self_managed_node_groups`
|
||||
- EKS Managed Node Group sub-module (was `node_groups`)
|
||||
- `launch_template_id`
|
||||
- `launch_template_arn`
|
||||
- `launch_template_latest_version`
|
||||
- `node_group_arn`
|
||||
- `node_group_id`
|
||||
- `node_group_resources`
|
||||
- `node_group_status`
|
||||
- `security_group_arn`
|
||||
- `security_group_id`
|
||||
- `iam_role_name`
|
||||
- `iam_role_arn`
|
||||
- `iam_role_unique_id`
|
||||
- Fargate profile sub-module (was `fargate`)
|
||||
- `iam_role_unique_id`
|
||||
- `fargate_profile_status`
|
||||
|
||||
## Upgrade Migrations
|
||||
|
||||
### Before 17.x Example
|
||||
|
||||
```hcl
|
||||
module "eks" {
|
||||
source = "terraform-aws-modules/eks/aws"
|
||||
version = "~> 17.0"
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
# Managed Node Groups
|
||||
node_groups_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
}
|
||||
|
||||
node_groups = {
|
||||
node_group = {
|
||||
min_capacity = 1
|
||||
max_capacity = 10
|
||||
desired_capacity = 1
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
capacity_type = "SPOT"
|
||||
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50
|
||||
}
|
||||
|
||||
k8s_labels = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
|
||||
taints = [
|
||||
{
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
|
||||
additional_tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Worker groups
|
||||
worker_additional_security_group_ids = [aws_security_group.additional.id]
|
||||
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
name = "worker-group"
|
||||
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
|
||||
spot_instance_pools = 4
|
||||
asg_max_size = 5
|
||||
asg_desired_capacity = 2
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
public_ip = true
|
||||
},
|
||||
]
|
||||
|
||||
# Fargate
|
||||
fargate_profiles = {
|
||||
default = {
|
||||
name = "default"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "kube-system"
|
||||
labels = {
|
||||
k8s-app = "kube-dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
namespace = "default"
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Owner = "test"
|
||||
}
|
||||
|
||||
timeouts = {
|
||||
create = "20m"
|
||||
delete = "20m"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### After 18.x Example
|
||||
|
||||
```hcl
|
||||
module "cluster_after" {
|
||||
source = "terraform-aws-modules/eks/aws"
|
||||
version = "~> 18.0"
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
eks_managed_node_group_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
}
|
||||
|
||||
eks_managed_node_groups = {
|
||||
node_group = {
|
||||
min_size = 1
|
||||
max_size = 10
|
||||
desired_size = 1
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
capacity_type = "SPOT"
|
||||
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50
|
||||
}
|
||||
|
||||
labels = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
|
||||
taints = [
|
||||
{
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self_managed_node_group_defaults = {
|
||||
vpc_security_group_ids = [aws_security_group.additional.id]
|
||||
}
|
||||
|
||||
self_managed_node_groups = {
|
||||
worker_group = {
|
||||
name = "worker-group"
|
||||
|
||||
min_size = 1
|
||||
max_size = 5
|
||||
desired_size = 2
|
||||
instance_type = "m4.large"
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
block_device_mappings = {
|
||||
xvda = {
|
||||
device_name = "/dev/xvda"
|
||||
ebs = {
|
||||
delete_on_termination = true
|
||||
encrypted = false
|
||||
volume_size = 100
|
||||
volume_type = "gp2"
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
use_mixed_instances_policy = true
|
||||
mixed_instances_policy = {
|
||||
instances_distribution = {
|
||||
spot_instance_pools = 4
|
||||
}
|
||||
|
||||
override = [
|
||||
{ instance_type = "m5.large" },
|
||||
{ instance_type = "m5a.large" },
|
||||
{ instance_type = "m5d.large" },
|
||||
{ instance_type = "m5ad.large" },
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Fargate
|
||||
fargate_profiles = {
|
||||
default = {
|
||||
name = "default"
|
||||
|
||||
selectors = [
|
||||
{
|
||||
namespace = "kube-system"
|
||||
labels = {
|
||||
k8s-app = "kube-dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
namespace = "default"
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Owner = "test"
|
||||
}
|
||||
|
||||
timeouts = {
|
||||
create = "20m"
|
||||
delete = "20m"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
```
|
||||
92
aws_auth.tf
92
aws_auth.tf
@@ -1,92 +0,0 @@
|
||||
locals {
|
||||
auth_launch_template_worker_roles = [
|
||||
for index in range(0, var.create_eks ? local.worker_group_launch_template_count : 0) : {
|
||||
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
|
||||
coalescelist(
|
||||
aws_iam_instance_profile.workers_launch_template.*.role,
|
||||
data.aws_iam_instance_profile.custom_worker_group_launch_template_iam_instance_profile.*.role_name,
|
||||
[""]
|
||||
),
|
||||
index
|
||||
)}"
|
||||
platform = lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"platform",
|
||||
local.workers_group_defaults["platform"]
|
||||
)
|
||||
}
|
||||
]
|
||||
|
||||
auth_worker_roles = [
|
||||
for index in range(0, var.create_eks ? local.worker_group_launch_configuration_count : 0) : {
|
||||
worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element(
|
||||
coalescelist(
|
||||
aws_iam_instance_profile.workers.*.role,
|
||||
data.aws_iam_instance_profile.custom_worker_group_iam_instance_profile.*.role_name,
|
||||
[""]
|
||||
),
|
||||
index,
|
||||
)}"
|
||||
platform = lookup(
|
||||
var.worker_groups[index],
|
||||
"platform",
|
||||
local.workers_group_defaults["platform"]
|
||||
)
|
||||
}
|
||||
]
|
||||
|
||||
# Convert to format needed by aws-auth ConfigMap
|
||||
configmap_roles = [
|
||||
for role in concat(
|
||||
local.auth_launch_template_worker_roles,
|
||||
local.auth_worker_roles,
|
||||
module.node_groups.aws_auth_roles,
|
||||
module.fargate.aws_auth_roles,
|
||||
) :
|
||||
{
|
||||
# Work around https://github.com/kubernetes-sigs/aws-iam-authenticator/issues/153
|
||||
# Strip the leading slash off so that Terraform doesn't think it's a regex
|
||||
rolearn = replace(role["worker_role_arn"], replace(var.iam_path, "/^//", ""), "")
|
||||
username = role["platform"] == "fargate" ? "system:node:{{SessionName}}" : "system:node:{{EC2PrivateDNSName}}"
|
||||
groups = tolist(concat(
|
||||
[
|
||||
"system:bootstrappers",
|
||||
"system:nodes",
|
||||
],
|
||||
role["platform"] == "windows" ? ["eks:kube-proxy-windows"] : [],
|
||||
role["platform"] == "fargate" ? ["system:node-proxier"] : [],
|
||||
))
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
resource "kubernetes_config_map" "aws_auth" {
|
||||
count = var.create_eks && var.manage_aws_auth ? 1 : 0
|
||||
|
||||
metadata {
|
||||
name = "aws-auth"
|
||||
namespace = "kube-system"
|
||||
labels = merge(
|
||||
{
|
||||
"app.kubernetes.io/managed-by" = "Terraform"
|
||||
# / are replaced by . because label validator fails in this lib
|
||||
# https://github.com/kubernetes/apimachinery/blob/1bdd76d09076d4dc0362456e59c8f551f5f24a72/pkg/util/validation/validation.go#L166
|
||||
"terraform.io/module" = "terraform-aws-modules.eks.aws"
|
||||
},
|
||||
var.aws_auth_additional_labels
|
||||
)
|
||||
}
|
||||
|
||||
data = {
|
||||
mapRoles = yamlencode(
|
||||
distinct(concat(
|
||||
local.configmap_roles,
|
||||
var.map_roles,
|
||||
))
|
||||
)
|
||||
mapUsers = yamlencode(var.map_users)
|
||||
mapAccounts = yamlencode(var.map_accounts)
|
||||
}
|
||||
|
||||
depends_on = [data.http.wait_for_cluster[0]]
|
||||
}
|
||||
104
data.tf
104
data.tf
@@ -1,104 +0,0 @@
|
||||
data "aws_partition" "current" {}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
data "aws_iam_policy_document" "workers_assume_role_policy" {
|
||||
statement {
|
||||
sid = "EKSWorkerAssumeRole"
|
||||
|
||||
actions = [
|
||||
"sts:AssumeRole",
|
||||
]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = [local.ec2_principal]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_ami" "eks_worker" {
|
||||
count = contains(local.worker_groups_platforms, "linux") ? 1 : 0
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = [local.worker_ami_name_filter]
|
||||
}
|
||||
|
||||
most_recent = true
|
||||
|
||||
owners = [var.worker_ami_owner_id]
|
||||
}
|
||||
|
||||
data "aws_ami" "eks_worker_windows" {
|
||||
count = contains(local.worker_groups_platforms, "windows") ? 1 : 0
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = [local.worker_ami_name_filter_windows]
|
||||
}
|
||||
|
||||
filter {
|
||||
name = "platform"
|
||||
values = ["windows"]
|
||||
}
|
||||
|
||||
most_recent = true
|
||||
|
||||
owners = [var.worker_ami_owner_id_windows]
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "cluster_assume_role_policy" {
|
||||
statement {
|
||||
sid = "EKSClusterAssumeRole"
|
||||
|
||||
actions = [
|
||||
"sts:AssumeRole",
|
||||
]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = ["eks.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_iam_role" "custom_cluster_iam_role" {
|
||||
count = var.manage_cluster_iam_resources ? 0 : 1
|
||||
|
||||
name = var.cluster_iam_role_name
|
||||
}
|
||||
|
||||
data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
|
||||
count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_configuration_count
|
||||
|
||||
name = lookup(
|
||||
var.worker_groups[count.index],
|
||||
"iam_instance_profile_name",
|
||||
local.workers_group_defaults["iam_instance_profile_name"],
|
||||
)
|
||||
}
|
||||
|
||||
data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
|
||||
count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
|
||||
|
||||
name = lookup(
|
||||
var.worker_groups_launch_template[count.index],
|
||||
"iam_instance_profile_name",
|
||||
local.workers_group_defaults["iam_instance_profile_name"],
|
||||
)
|
||||
}
|
||||
|
||||
data "http" "wait_for_cluster" {
|
||||
count = var.create_eks && var.manage_aws_auth ? 1 : 0
|
||||
|
||||
url = format("%s/healthz", aws_eks_cluster.this[0].endpoint)
|
||||
ca_certificate = base64decode(local.cluster_auth_base64)
|
||||
timeout = var.wait_for_cluster_timeout
|
||||
|
||||
depends_on = [
|
||||
aws_eks_cluster.this,
|
||||
aws_security_group_rule.cluster_private_access_sg_source,
|
||||
aws_security_group_rule.cluster_private_access_cidrs_source,
|
||||
]
|
||||
}
|
||||
@@ -1,98 +0,0 @@
|
||||
# Autoscaling
|
||||
|
||||
To enable worker node autoscaling you will need to do a few things:
|
||||
|
||||
- Add the [required tags](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler/cloudprovider/aws#auto-discovery-setup) to the worker group
|
||||
- Install the cluster-autoscaler
|
||||
- Give the cluster-autoscaler access via an IAM policy
|
||||
|
||||
It's probably easiest to follow the example in [examples/irsa](../examples/irsa), this will install the cluster-autoscaler using [Helm](https://helm.sh/) and use IRSA to attach a policy.
|
||||
|
||||
If you don't want to use IRSA then you will need to attach the IAM policy to the worker node IAM role or add AWS credentials to the cluster-autoscaler environment variables. Here is some example terraform code for the policy:
|
||||
|
||||
```hcl
|
||||
resource "aws_iam_role_policy_attachment" "workers_autoscaling" {
|
||||
policy_arn = aws_iam_policy.worker_autoscaling.arn
|
||||
role = module.my_cluster.worker_iam_role_name
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "worker_autoscaling" {
|
||||
name_prefix = "eks-worker-autoscaling-${module.my_cluster.cluster_id}"
|
||||
description = "EKS worker node autoscaling policy for cluster ${module.my_cluster.cluster_id}"
|
||||
policy = data.aws_iam_policy_document.worker_autoscaling.json
|
||||
path = var.iam_path
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "worker_autoscaling" {
|
||||
statement {
|
||||
sid = "eksWorkerAutoscalingAll"
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
"ec2:DescribeLaunchTemplateVersions",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "eksWorkerAutoscalingOwn"
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
||||
"autoscaling:UpdateAutoScalingGroup",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/kubernetes.io/cluster/${module.my_cluster.cluster_id}"
|
||||
values = ["owned"]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
And example values for the [helm chart](https://github.com/helm/charts/tree/master/stable/cluster-autoscaler):
|
||||
|
||||
```yaml
|
||||
rbac:
|
||||
create: true
|
||||
|
||||
cloudProvider: aws
|
||||
awsRegion: YOUR_AWS_REGION
|
||||
|
||||
autoDiscovery:
|
||||
clusterName: YOUR_CLUSTER_NAME
|
||||
enabled: true
|
||||
|
||||
image:
|
||||
repository: us.gcr.io/k8s-artifacts-prod/autoscaling/cluster-autoscaler
|
||||
tag: v1.16.5
|
||||
```
|
||||
|
||||
To install the chart, simply run helm with the `--values` option:
|
||||
|
||||
```
|
||||
helm install stable/cluster-autoscaler --values=path/to/your/values-file.yaml
|
||||
```
|
||||
|
||||
## Notes
|
||||
|
||||
There is a variable `asg_desired_capacity` given in the `local.tf` file, currently it can be used to change the desired worker(s) capacity in the autoscaling group but currently it is being ignored in terraform to reduce the [complexities](https://github.com/terraform-aws-modules/terraform-aws-eks/issues/510#issuecomment-531700442) and the feature of scaling up and down the cluster nodes is being handled by the cluster autoscaler.
|
||||
|
||||
The cluster autoscaler major and minor versions must match your cluster. For example if you are running a 1.16 EKS cluster set `image.tag=v1.16.5`. Search through their [releases page](https://github.com/kubernetes/autoscaler/releases) for valid version numbers.
|
||||
@@ -1,23 +0,0 @@
|
||||
# Enable Docker Bridge Network
|
||||
|
||||
The latest versions of the AWS EKS-optimized AMI disable the docker bridge network by default. To enable it, add the `bootstrap_extra_args` parameter to your worker group template.
|
||||
|
||||
```hcl
|
||||
locals {
|
||||
worker_groups = [
|
||||
{
|
||||
# Other parameters omitted for brevity
|
||||
bootstrap_extra_args = "--enable-docker-bridge true"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
Examples of when this would be necessary are:
|
||||
|
||||
- You are running Continuous Integration in K8s, and building docker images by either mounting the docker sock as a volume or using docker in docker. Without the bridge enabled, internal routing from the inner container can't reach the outside world.
|
||||
|
||||
## See More
|
||||
|
||||
- [Docker in Docker no longer works without docker0 bridge](https://github.com/awslabs/amazon-eks-ami/issues/183)
|
||||
- [Add enable-docker-bridge bootstrap argument](https://github.com/awslabs/amazon-eks-ami/pull/187)
|
||||
235
docs/faq.md
235
docs/faq.md
@@ -1,235 +0,0 @@
|
||||
# Frequently Asked Questions
|
||||
|
||||
## How do I customize X on the worker group's settings?
|
||||
|
||||
All the options that can be customized for worker groups are listed in [local.tf](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/local.tf) under `workers_group_defaults_defaults`.
|
||||
|
||||
Please open Issues or PRs if you think something is missing.
|
||||
|
||||
## Why are nodes not being registered?
|
||||
|
||||
### Networking
|
||||
|
||||
Often caused by a networking or endpoint configuration issue.
|
||||
|
||||
At least one of the cluster public or private endpoints must be enabled to access the cluster to work. If you require a public endpoint, setting up both (public and private) and restricting the public endpoint via setting `cluster_endpoint_public_access_cidrs` is recommended. More about communication with an endpoint is available [here](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html).
|
||||
|
||||
Nodes need to be able to contact the EKS cluster endpoint. By default, the module only creates a public endpoint. To access endpoint, the nodes need outgoing internet access:
|
||||
|
||||
- Nodes in private subnets: via a NAT gateway or instance. It will need adding along with appropriate routing rules.
|
||||
- Nodes in public subnets: assign public IPs to nodes. Set `public_ip = true` in the `worker_groups` list on this module.
|
||||
|
||||
> Important:
|
||||
> If you apply only the public endpoint and setup `cluster_endpoint_public_access_cidrs` to restrict access, remember that EKS nodes also use the public endpoint, so you must allow access to the endpoint. If not, then your nodes will not be working correctly.
|
||||
|
||||
Cluster private endpoint can also be enabled by setting `cluster_endpoint_private_access = true` on this module. Node calls to the endpoint stay within the VPC.
|
||||
|
||||
When the private endpoint is enabled ensure that VPC DNS resolution and hostnames are also enabled:
|
||||
|
||||
- If managing the VPC with Terraform: set `enable_dns_hostnames = true` and `enable_dns_support = true` on the `aws_vpc` resource. The [`terraform-aws-module/vpc/aws`](https://github.com/terraform-aws-modules/terraform-aws-vpc/) community module also has these variables.
|
||||
- Otherwise refer to the [AWS VPC docs](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-dns.html#vpc-dns-updating) and [AWS EKS Cluster Endpoint Access docs](https://docs.aws.amazon.com/eks/latest/userguide/cluster-endpoint.html) for more information.
|
||||
|
||||
Nodes need to be able to connect to other AWS services plus pull down container images from repos. If for some reason you cannot enable public internet access for nodes you can add VPC endpoints to the relevant services: EC2 API, ECR API, ECR DKR and S3.
|
||||
|
||||
### `aws-auth` ConfigMap not present
|
||||
|
||||
The module configures the `aws-auth` ConfigMap. This is used by the cluster to grant IAM users and roles RBAC permissions in the cluster, like the IAM role assigned to the worker nodes.
|
||||
|
||||
Confirm that the ConfigMap matches the contents of the `config_map_aws_auth` module output. You can retrieve the live config by running the following in your terraform folder:
|
||||
`kubectl --kubeconfig=kubeconfig_* -n kube-system get cm aws-auth -o yaml`
|
||||
|
||||
If the ConfigMap is missing or the contents are incorrect then ensure that you have properly configured the kubernetes provider block by referring to [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) and run `terraform apply` again.
|
||||
|
||||
Users with `manage_aws_auth = false` will need to apply the ConfigMap themselves.
|
||||
|
||||
## How can I work with the cluster if I disable the public endpoint?
|
||||
|
||||
You have to interact with the cluster from within the VPC that it's associated with, from an instance that's allowed access via the cluster's security group.
|
||||
|
||||
Creating a new cluster with the public endpoint disabled is harder to achieve. You will either want to pass in a pre-configured cluster security group or apply the `aws-auth` configmap in a separate action.
|
||||
|
||||
## ConfigMap "aws-auth" already exists
|
||||
|
||||
This can happen if the kubernetes provider has not been configured for use with the cluster. The kubernetes provider will be accessing your default kubernetes cluster which already has the map defined. Read [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details on how to configure the kubernetes provider correctly.
|
||||
|
||||
Users upgrading from modules before 8.0.0 will need to import their existing aws-auth ConfigMap in to the terraform state. See 8.0.0's [CHANGELOG](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/v8.0.0/CHANGELOG.md#v800---2019-12-11) for more details.
|
||||
|
||||
## `Error: Get http://localhost/api/v1/namespaces/kube-system/configmaps/aws-auth: dial tcp 127.0.0.1:80: connect: connection refused`
|
||||
|
||||
Usually this means that the kubernetes provider has not been configured, there is no default `~/.kube/config` and so the kubernetes provider is attempting to talk to localhost.
|
||||
|
||||
You need to configure the kubernetes provider correctly. See [README.md](https://github.com/terraform-aws-modules/terraform-aws-eks/#usage-example) for more details.
|
||||
|
||||
## How can I stop Terraform from removing the EKS tags from my VPC and subnets?
|
||||
|
||||
You need to add the tags to the VPC and subnets yourself. See the [basic example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic).
|
||||
|
||||
An alternative is to use the aws provider's [`ignore_tags` variable](https://www.terraform.io/docs/providers/aws/#ignore\_tags-configuration-block). However this can also cause terraform to display a perpetual difference.
|
||||
|
||||
## How do I safely remove old worker groups?
|
||||
|
||||
You've added new worker groups. Deleting worker groups from earlier in the list causes Terraform to want to recreate all worker groups. This is a limitation with how Terraform works and the module using `count` to create the ASGs and other resources.
|
||||
|
||||
The safest and easiest option is to set `asg_min_size` and `asg_max_size` to 0 on the worker groups to "remove".
|
||||
|
||||
## Why does changing the node or worker group's desired count not do anything?
|
||||
|
||||
The module is configured to ignore this value. Unfortunately, Terraform does not support variables within the `lifecycle` block.
|
||||
|
||||
The setting is ignored to allow the cluster autoscaler to work correctly so that `terraform apply` does not accidentally remove running workers.
|
||||
|
||||
You can change the desired count via the CLI or console if you're not using the cluster autoscaler.
|
||||
|
||||
If you are not using autoscaling and want to control the number of nodes via terraform, set the `min_capacity` and `max_capacity` for node groups or `asg_min_size` and `asg_max_size` for worker groups. Before changing those values, you must satisfy AWS `desired` capacity constraints (which must be between new min/max values).
|
||||
|
||||
When you scale down AWS will remove a random instance, so you will have to weigh the risks here.
|
||||
|
||||
## Why are nodes not recreated when the `launch_configuration`/`launch_template` is recreated?
|
||||
|
||||
By default the ASG is not configured to be recreated when the launch configuration or template changes. Terraform spins up new instances and then deletes all the old instances in one go as the AWS provider team have refused to implement rolling updates of autoscaling groups. This is not good for kubernetes stability.
|
||||
|
||||
You need to use a process to drain and cycle the workers.
|
||||
|
||||
You are not using the cluster autoscaler:
|
||||
|
||||
- Add a new instance
|
||||
- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
|
||||
- Wait for pods to be Running
|
||||
- Terminate the old node instance. ASG will start a new instance
|
||||
- Repeat the drain and delete process until all old nodes are replaced
|
||||
|
||||
You are using the cluster autoscaler:
|
||||
|
||||
- Drain an old node `kubectl drain --force --ignore-daemonsets --delete-local-data ip-xxxxxxx.eu-west-1.compute.internal`
|
||||
- Wait for pods to be Running
|
||||
- Cluster autoscaler will create new nodes when required
|
||||
- Repeat until all old nodes are drained
|
||||
- Cluster autoscaler will terminate the old nodes after 10-60 minutes automatically
|
||||
|
||||
You can also use a 3rd party tool like Gruntwork's kubergrunt. See the [`eks deploy`](https://github.com/gruntwork-io/kubergrunt#deploy) subcommand.
|
||||
|
||||
## How do I create kubernetes resources when creating the cluster?
|
||||
|
||||
You do not need to do anything extra since v12.1.0 of the module as long as the following conditions are met:
|
||||
|
||||
- `manage_aws_auth = true` on the module (default)
|
||||
- the kubernetes provider is correctly configured like in the [Usage Example](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/README.md#usage-example). Primarily the module's `cluster_id` output is used as input to the `aws_eks_cluster*` data sources.
|
||||
|
||||
The `cluster_id` depends on a `data.http.wait_for_cluster` that polls the EKS cluster's endpoint until it is alive. This blocks initialisation of the kubernetes provider.
|
||||
|
||||
## `aws_auth.tf: At 2:14: Unknown token: 2:14 IDENT`
|
||||
|
||||
You are attempting to use a Terraform 0.12 module with Terraform 0.11.
|
||||
|
||||
We highly recommend that you upgrade your EKS Terraform config to 0.12 to take advantage of new features in the module.
|
||||
|
||||
Alternatively you can lock your module to a compatible version if you must stay with terraform 0.11:
|
||||
|
||||
```hcl
|
||||
module "eks" {
|
||||
source = "terraform-aws-modules/eks/aws"
|
||||
version = "~> 4.0"
|
||||
# ...
|
||||
}
|
||||
```
|
||||
|
||||
## How can I use Windows workers?
|
||||
|
||||
To enable Windows support for your EKS cluster, you should apply some configs manually. See the [Enabling Windows Support (Windows/MacOS/Linux)](https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support).
|
||||
|
||||
Windows worker nodes requires additional cluster role (eks:kube-proxy-windows). If you are adding windows workers to existing cluster, you should apply config-map-aws-auth again.
|
||||
|
||||
#### Example configuration
|
||||
|
||||
Amazon EKS clusters must contain one or more Linux worker nodes to run core system pods that only run on Linux, such as coredns and the VPC resource controller.
|
||||
|
||||
1. Build AWS EKS cluster with the next workers configuration (default Linux):
|
||||
|
||||
```hcl
|
||||
worker_groups = [
|
||||
{
|
||||
name = "worker-group-linux"
|
||||
instance_type = "m5.large"
|
||||
platform = "linux"
|
||||
asg_desired_capacity = 2
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
2. Apply commands from https://docs.aws.amazon.com/eks/latest/userguide/windows-support.html#enable-windows-support (use tab with name `Windows`)
|
||||
3. Add one more worker group for Windows with required field `platform = "windows"` and update your cluster. Worker group example:
|
||||
|
||||
```hcl
|
||||
worker_groups = [
|
||||
{
|
||||
name = "worker-group-linux"
|
||||
instance_type = "m5.large"
|
||||
platform = "linux"
|
||||
asg_desired_capacity = 2
|
||||
},
|
||||
{
|
||||
name = "worker-group-windows"
|
||||
instance_type = "m5.large"
|
||||
platform = "windows"
|
||||
asg_desired_capacity = 1
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
4. With `kubectl get nodes` you can see cluster with mixed (Linux/Windows) nodes support.
|
||||
|
||||
## Worker nodes with labels do not join a 1.16+ cluster
|
||||
|
||||
Kubelet restricts the allowed list of labels in the `kubernetes.io` namespace that can be applied to nodes starting in 1.16.
|
||||
|
||||
Older configurations used labels like `kubernetes.io/lifecycle=spot` and this is no longer allowed. Use `node.kubernetes.io/lifecycle=spot` instead.
|
||||
|
||||
Reference the `--node-labels` argument for your version of Kubenetes for the allowed prefixes. [Documentation for 1.16](https://v1-16.docs.kubernetes.io/docs/reference/command-line-tools-reference/kubelet/)
|
||||
|
||||
## What is the difference between `node_groups` and `worker_groups`?
|
||||
|
||||
`node_groups` are [AWS-managed node groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) (configures "Node Groups" that you can find on the EKS dashboard). This system is supposed to ease some of the lifecycle around upgrading nodes. Although they do not do this automatically and you still need to manually trigger the updates.
|
||||
|
||||
`worker_groups` are [self-managed nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) (provisions a typical "Autoscaling group" on EC2). It gives you full control over nodes in the cluster like using custom AMI for the nodes. As AWS says, "with worker groups the customer controls the data plane & AWS controls the control plane".
|
||||
|
||||
Both can be used together in the same cluster.
|
||||
|
||||
## I'm using both AWS-Managed node groups and Self-Managed worker groups and pods scheduled on a AWS Managed node groups are unable resolve DNS (even communication between pods)
|
||||
|
||||
This happen because Core DNS can be scheduled on Self-Managed worker groups and by default, the terraform module doesn't create security group rules to ensure communication between pods schedulled on Self-Managed worker group and AWS-Managed node groups.
|
||||
|
||||
You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules.
|
||||
|
||||
## Dedicated control plane subnets
|
||||
|
||||
[AWS recommends](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html) to create dedicated subnets for EKS created network interfaces (control plane). The module fully supports this approach. To set up this, you must configure the module by adding additional `subnets` into workers default specification `workers_group_defaults` map or directly `subnets` definition in worker definition.
|
||||
|
||||
```hcl
|
||||
module "eks" {
|
||||
source = "terraform-aws-modules/eks/aws"
|
||||
|
||||
cluster_version = "1.21"
|
||||
cluster_name = "my-cluster"
|
||||
vpc_id = "vpc-1234556abcdef"
|
||||
subnets = ["subnet-abcde123", "subnet-abcde456", "subnet-abcde789"]
|
||||
|
||||
workers_group_defaults = {
|
||||
subnets = ["subnet-xyz123", "subnet-xyz456", "subnet-xyz789"]
|
||||
}
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
instance_type = "m4.large"
|
||||
asg_max_size = 5
|
||||
},
|
||||
{
|
||||
name = "worker-group-2"
|
||||
subnets = ["subnet-qwer123"]
|
||||
instance_type = "t3.medium"
|
||||
asg_desired_capacity = 1
|
||||
public_ip = true
|
||||
ebs_optimized = true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -1,155 +0,0 @@
|
||||
# IAM Permissions
|
||||
|
||||
Following IAM permissions are the minimum permissions needed for your IAM user or IAM role to create an EKS cluster.
|
||||
|
||||
```json
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "VisualEditor0",
|
||||
"Effect": "Allow",
|
||||
"Action": [
|
||||
"autoscaling:AttachInstances",
|
||||
"autoscaling:CreateAutoScalingGroup",
|
||||
"autoscaling:CreateLaunchConfiguration",
|
||||
"autoscaling:CreateOrUpdateTags",
|
||||
"autoscaling:DeleteAutoScalingGroup",
|
||||
"autoscaling:DeleteLaunchConfiguration",
|
||||
"autoscaling:DeleteTags",
|
||||
"autoscaling:Describe*",
|
||||
"autoscaling:DetachInstances",
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:UpdateAutoScalingGroup",
|
||||
"autoscaling:SuspendProcesses",
|
||||
"ec2:AllocateAddress",
|
||||
"ec2:AssignPrivateIpAddresses",
|
||||
"ec2:Associate*",
|
||||
"ec2:AttachInternetGateway",
|
||||
"ec2:AttachNetworkInterface",
|
||||
"ec2:AuthorizeSecurityGroupEgress",
|
||||
"ec2:AuthorizeSecurityGroupIngress",
|
||||
"ec2:CreateDefaultSubnet",
|
||||
"ec2:CreateDhcpOptions",
|
||||
"ec2:CreateEgressOnlyInternetGateway",
|
||||
"ec2:CreateInternetGateway",
|
||||
"ec2:CreateNatGateway",
|
||||
"ec2:CreateNetworkInterface",
|
||||
"ec2:CreateRoute",
|
||||
"ec2:CreateRouteTable",
|
||||
"ec2:CreateSecurityGroup",
|
||||
"ec2:CreateSubnet",
|
||||
"ec2:CreateTags",
|
||||
"ec2:CreateVolume",
|
||||
"ec2:CreateVpc",
|
||||
"ec2:CreateVpcEndpoint",
|
||||
"ec2:DeleteDhcpOptions",
|
||||
"ec2:DeleteEgressOnlyInternetGateway",
|
||||
"ec2:DeleteInternetGateway",
|
||||
"ec2:DeleteNatGateway",
|
||||
"ec2:DeleteNetworkInterface",
|
||||
"ec2:DeleteRoute",
|
||||
"ec2:DeleteRouteTable",
|
||||
"ec2:DeleteSecurityGroup",
|
||||
"ec2:DeleteSubnet",
|
||||
"ec2:DeleteTags",
|
||||
"ec2:DeleteVolume",
|
||||
"ec2:DeleteVpc",
|
||||
"ec2:DeleteVpnGateway",
|
||||
"ec2:Describe*",
|
||||
"ec2:DetachInternetGateway",
|
||||
"ec2:DetachNetworkInterface",
|
||||
"ec2:DetachVolume",
|
||||
"ec2:Disassociate*",
|
||||
"ec2:ModifySubnetAttribute",
|
||||
"ec2:ModifyVpcAttribute",
|
||||
"ec2:ModifyVpcEndpoint",
|
||||
"ec2:ReleaseAddress",
|
||||
"ec2:RevokeSecurityGroupEgress",
|
||||
"ec2:RevokeSecurityGroupIngress",
|
||||
"ec2:UpdateSecurityGroupRuleDescriptionsEgress",
|
||||
"ec2:UpdateSecurityGroupRuleDescriptionsIngress",
|
||||
"ec2:CreateLaunchTemplate",
|
||||
"ec2:CreateLaunchTemplateVersion",
|
||||
"ec2:DeleteLaunchTemplate",
|
||||
"ec2:DeleteLaunchTemplateVersions",
|
||||
"ec2:DescribeLaunchTemplates",
|
||||
"ec2:DescribeLaunchTemplateVersions",
|
||||
"ec2:GetLaunchTemplateData",
|
||||
"ec2:ModifyLaunchTemplate",
|
||||
"ec2:RunInstances",
|
||||
"eks:CreateCluster",
|
||||
"eks:DeleteCluster",
|
||||
"eks:DescribeCluster",
|
||||
"eks:ListClusters",
|
||||
"eks:UpdateClusterConfig",
|
||||
"eks:UpdateClusterVersion",
|
||||
"eks:DescribeUpdate",
|
||||
"eks:TagResource",
|
||||
"eks:UntagResource",
|
||||
"eks:ListTagsForResource",
|
||||
"eks:CreateFargateProfile",
|
||||
"eks:DeleteFargateProfile",
|
||||
"eks:DescribeFargateProfile",
|
||||
"eks:ListFargateProfiles",
|
||||
"eks:CreateNodegroup",
|
||||
"eks:DeleteNodegroup",
|
||||
"eks:DescribeNodegroup",
|
||||
"eks:ListNodegroups",
|
||||
"eks:UpdateNodegroupConfig",
|
||||
"eks:UpdateNodegroupVersion",
|
||||
"iam:AddRoleToInstanceProfile",
|
||||
"iam:AttachRolePolicy",
|
||||
"iam:CreateInstanceProfile",
|
||||
"iam:CreateOpenIDConnectProvider",
|
||||
"iam:CreateServiceLinkedRole",
|
||||
"iam:CreatePolicy",
|
||||
"iam:CreatePolicyVersion",
|
||||
"iam:CreateRole",
|
||||
"iam:DeleteInstanceProfile",
|
||||
"iam:DeleteOpenIDConnectProvider",
|
||||
"iam:DeletePolicy",
|
||||
"iam:DeletePolicyVersion",
|
||||
"iam:DeleteRole",
|
||||
"iam:DeleteRolePolicy",
|
||||
"iam:DeleteServiceLinkedRole",
|
||||
"iam:DetachRolePolicy",
|
||||
"iam:GetInstanceProfile",
|
||||
"iam:GetOpenIDConnectProvider",
|
||||
"iam:GetPolicy",
|
||||
"iam:GetPolicyVersion",
|
||||
"iam:GetRole",
|
||||
"iam:GetRolePolicy",
|
||||
"iam:List*",
|
||||
"iam:PassRole",
|
||||
"iam:PutRolePolicy",
|
||||
"iam:RemoveRoleFromInstanceProfile",
|
||||
"iam:TagOpenIDConnectProvider",
|
||||
"iam:TagRole",
|
||||
"iam:UntagRole",
|
||||
"iam:TagPolicy",
|
||||
"iam:TagInstanceProfile",
|
||||
"iam:UpdateAssumeRolePolicy",
|
||||
// Following permissions are needed if cluster_enabled_log_types is enabled
|
||||
"logs:CreateLogGroup",
|
||||
"logs:DescribeLogGroups",
|
||||
"logs:DeleteLogGroup",
|
||||
"logs:ListTagsLogGroup",
|
||||
"logs:PutRetentionPolicy",
|
||||
// Following permissions for working with secrets_encryption example
|
||||
"kms:CreateAlias",
|
||||
"kms:CreateGrant",
|
||||
"kms:CreateKey",
|
||||
"kms:DeleteAlias",
|
||||
"kms:DescribeKey",
|
||||
"kms:GetKeyPolicy",
|
||||
"kms:GetKeyRotationStatus",
|
||||
"kms:ListAliases",
|
||||
"kms:ListResourceTags",
|
||||
"kms:ScheduleKeyDeletion"
|
||||
],
|
||||
"Resource": "*"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -1,114 +0,0 @@
|
||||
# Using spot instances
|
||||
|
||||
Spot instances usually cost around 30-70% less than an on-demand instance. So using them for your EKS workloads can save a lot of money but requires some special considerations as they could be terminated with only 2 minutes warning.
|
||||
|
||||
You need to install a daemonset to catch the 2 minute warning before termination. This will ensure the node is gracefully drained before termination. You can install the [k8s-spot-termination-handler](https://github.com/kube-aws/kube-spot-termination-notice-handler) for this. There's a [Helm chart](https://github.com/helm/charts/tree/master/stable/k8s-spot-termination-handler):
|
||||
|
||||
```shell
|
||||
helm install stable/k8s-spot-termination-handler --namespace kube-system
|
||||
```
|
||||
|
||||
In the following examples at least 1 worker group that uses on-demand instances is included. This worker group has an added node label that can be used in scheduling. This could be used to schedule any workload not suitable for spot instances but is important for the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) as it might be end up unscheduled when spot instances are terminated. You can add this to the values of the [cluster-autoscaler helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler-chart):
|
||||
|
||||
```yaml
|
||||
nodeSelector:
|
||||
kubernetes.io/lifecycle: normal
|
||||
```
|
||||
|
||||
Notes:
|
||||
|
||||
- The `spot_price` is set to the on-demand price so that the spot instances will run as long as they are the cheaper.
|
||||
- It's best to have a broad range of instance types to ensure there's always some instances to run when prices fluctuate.
|
||||
- There is an AWS blog article about this [here](https://aws.amazon.com/blogs/compute/run-your-kubernetes-workloads-on-amazon-ec2-spot-instances-with-amazon-eks/).
|
||||
- Consider using [k8s-spot-rescheduler](https://github.com/pusher/k8s-spot-rescheduler) to move pods from on-demand to spot instances.
|
||||
|
||||
## Using Launch Configuration
|
||||
|
||||
Example worker group configuration that uses an ASG with launch configuration for each worker group:
|
||||
|
||||
```hcl
|
||||
worker_groups = [
|
||||
{
|
||||
name = "on-demand-1"
|
||||
instance_type = "m4.xlarge"
|
||||
asg_max_size = 1
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=normal"
|
||||
suspended_processes = ["AZRebalance"]
|
||||
},
|
||||
{
|
||||
name = "spot-1"
|
||||
spot_price = "0.199"
|
||||
instance_type = "c4.xlarge"
|
||||
asg_max_size = 20
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
suspended_processes = ["AZRebalance"]
|
||||
},
|
||||
{
|
||||
name = "spot-2"
|
||||
spot_price = "0.20"
|
||||
instance_type = "m4.xlarge"
|
||||
asg_max_size = 20
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
suspended_processes = ["AZRebalance"]
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
## Using Launch Templates
|
||||
|
||||
Launch Template support is a recent addition to both AWS and this module. It might not be as tried and tested but it's more suitable for spot instances as it allowed multiple instance types in the same worker group:
|
||||
|
||||
```hcl
|
||||
worker_groups = [
|
||||
{
|
||||
name = "on-demand-1"
|
||||
instance_type = "m4.xlarge"
|
||||
asg_max_size = 10
|
||||
kubelet_extra_args = "--node-labels=spot=false"
|
||||
suspended_processes = ["AZRebalance"]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
name = "spot-1"
|
||||
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
|
||||
spot_instance_pools = 4
|
||||
asg_max_size = 5
|
||||
asg_desired_capacity = 5
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
public_ip = true
|
||||
},
|
||||
]
|
||||
```
|
||||
|
||||
## Using Launch Templates With Both Spot and On Demand
|
||||
|
||||
Example launch template to launch 2 on demand instances of type m5.large, and have the ability to scale up using spot instances and on demand instances. The `node.kubernetes.io/lifecycle` node label will be set to the value queried from the EC2 meta-data service: either "on-demand" or "spot".
|
||||
|
||||
`on_demand_percentage_above_base_capacity` is set to 25 so 1 in 4 new nodes, when auto-scaling, will be on-demand instances. If not set, all new nodes will be spot instances. The on-demand instances will be the primary instance type (first in the array if they are not weighted).
|
||||
|
||||
```hcl
|
||||
worker_groups_launch_template = [{
|
||||
name = "mixed-demand-spot"
|
||||
override_instance_types = ["m5.large", "m5a.large", "m4.large"]
|
||||
root_encrypted = true
|
||||
root_volume_size = 50
|
||||
|
||||
asg_min_size = 2
|
||||
asg_desired_capacity = 2
|
||||
on_demand_base_capacity = 3
|
||||
on_demand_percentage_above_base_capacity = 25
|
||||
asg_max_size = 20
|
||||
spot_instance_pools = 3
|
||||
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=`curl -s http://169.254.169.254/latest/meta-data/instance-life-cycle`"
|
||||
}]
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
An issue with the cluster-autoscaler: https://github.com/kubernetes/autoscaler/issues/1133
|
||||
|
||||
AWS have released their own termination handler now: https://github.com/aws/aws-node-termination-handler
|
||||
@@ -1,75 +0,0 @@
|
||||
# AWS EKS cluster running Bottlerocket AMI
|
||||
|
||||
Configuration in this directory creates EKS cluster with workers group running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket)
|
||||
|
||||
This is a minimalistic example which shows what knobs to turn to make Bottlerocket work.
|
||||
|
||||
See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 2.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 2.0 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource |
|
||||
| [aws_key_pair.nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
|
||||
| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
| <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | Outputs from node groups |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,159 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "bottlerocket-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
|
||||
fargate_subnets = [module.vpc.private_subnets[2]]
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
write_kubeconfig = false
|
||||
manage_aws_auth = true
|
||||
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
name = "bottlerocket-nodes"
|
||||
ami_id = data.aws_ami.bottlerocket_ami.id
|
||||
instance_type = "t3a.small"
|
||||
asg_desired_capacity = 2
|
||||
key_name = aws_key_pair.nodes.key_name
|
||||
|
||||
# Since we are using default VPC there is no NAT gateway so we need to
|
||||
# attach public ip to nodes so they can reach k8s API server
|
||||
# do not repeat this at home (i.e. production)
|
||||
public_ip = true
|
||||
|
||||
# This section overrides default userdata template to pass bottlerocket
|
||||
# specific user data
|
||||
userdata_template_file = "${path.module}/userdata.toml"
|
||||
# we are using this section to pass additional arguments for
|
||||
# userdata template rendering
|
||||
userdata_template_extra_args = {
|
||||
enable_admin_container = false
|
||||
enable_control_container = true
|
||||
aws_region = data.aws_region.current.name
|
||||
}
|
||||
# example of k8s/kubelet configuration via additional_userdata
|
||||
additional_userdata = <<EOT
|
||||
[settings.kubernetes.node-labels]
|
||||
ingress = "allowed"
|
||||
EOT
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
# SSM policy for bottlerocket control container access
|
||||
# https://github.com/bottlerocket-os/bottlerocket/blob/develop/QUICKSTART-EKS.md#enabling-ssm
|
||||
resource "aws_iam_role_policy_attachment" "ssm" {
|
||||
role = module.eks.worker_iam_role_name
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
data "aws_ami" "bottlerocket_ami" {
|
||||
most_recent = true
|
||||
owners = ["amazon"]
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_private_key" "nodes" {
|
||||
algorithm = "RSA"
|
||||
}
|
||||
|
||||
resource "aws_key_pair" "nodes" {
|
||||
key_name = "bottlerocket-nodes-${random_string.suffix.result}"
|
||||
public_key = tls_private_key.nodes.public_key_openssh
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "node_groups" {
|
||||
description = "Outputs from node groups"
|
||||
value = module.eks.node_groups
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
# https://github.com/bottlerocket-os/bottlerocket/blob/develop/README.md#description-of-settings
|
||||
[settings.kubernetes]
|
||||
api-server = "${endpoint}"
|
||||
cluster-certificate = "${cluster_auth_base64}"
|
||||
cluster-name = "${cluster_name}"
|
||||
${additional_userdata}
|
||||
|
||||
# Hardening based on https://github.com/bottlerocket-os/bottlerocket/blob/develop/SECURITY_GUIDANCE.md
|
||||
|
||||
# Enable kernel lockdown in "integrity" mode.
|
||||
# This prevents modifications to the running kernel, even by privileged users.
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
|
||||
# The admin host container provides SSH access and runs with "superpowers".
|
||||
# It is disabled by default, but can be disabled explicitly.
|
||||
[settings.host-containers.admin]
|
||||
enabled = ${enable_admin_container}
|
||||
|
||||
# The control host container provides out-of-band access via SSM.
|
||||
# It is enabled by default, and can be disabled if you do not expect to use SSM.
|
||||
# This could leave you with no way to access the API and change settings on an existing node!
|
||||
[settings.host-containers.control]
|
||||
enabled = ${enable_control_container}
|
||||
@@ -1,26 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
tls = {
|
||||
source = "hashicorp/tls"
|
||||
version = ">= 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,20 @@
|
||||
# Complete AWS EKS Cluster
|
||||
|
||||
Configuration in this directory creates EKS cluster with different features shown all-in-one cluster (e.g. Managed Node Groups, Worker Groups, Fargate, Spot instances, AWS Auth enabled).
|
||||
Configuration in this directory creates an AWS EKS cluster with a broad mix of various features and settings provided by this module:
|
||||
|
||||
This example can be used to do smoke test.
|
||||
|
||||
See configurations in other `examples` directories for more specific cases.
|
||||
- AWS EKS cluster
|
||||
- Disabled EKS cluster
|
||||
- Self managed node group
|
||||
- Externally attached self managed node group
|
||||
- Disabled self managed node group
|
||||
- EKS managed node group
|
||||
- Externally attached EKS managed node group
|
||||
- Disabled self managed node group
|
||||
- Fargate profile
|
||||
- Externally attached Fargate profile
|
||||
- Disabled Fargate profile
|
||||
- Cluster addons: CoreDNS, Kube-Proxy, and VPC-CNI
|
||||
- IAM roles for service accounts
|
||||
|
||||
## Usage
|
||||
|
||||
@@ -24,39 +34,38 @@ Note that this example may create resources which cost money. Run `terraform des
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.64 |
|
||||
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_disabled_eks"></a> [disabled\_eks](#module\_disabled\_eks) | ../.. | n/a |
|
||||
| <a name="module_disabled_fargate"></a> [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | n/a |
|
||||
| <a name="module_disabled_node_groups"></a> [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | n/a |
|
||||
| <a name="module_disabled_eks_managed_node_group"></a> [disabled\_eks\_managed\_node\_group](#module\_disabled\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
|
||||
| <a name="module_disabled_fargate_profile"></a> [disabled\_fargate\_profile](#module\_disabled\_fargate\_profile) | ../../modules/fargate-profile | n/a |
|
||||
| <a name="module_disabled_self_managed_node_group"></a> [disabled\_self\_managed\_node\_group](#module\_disabled\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_eks_managed_node_group"></a> [eks\_managed\_node\_group](#module\_eks\_managed\_node\_group) | ../../modules/eks-managed-node-group | n/a |
|
||||
| <a name="module_fargate_profile"></a> [fargate\_profile](#module\_fargate\_profile) | ../../modules/fargate-profile | n/a |
|
||||
| <a name="module_self_managed_node_group"></a> [self\_managed\_node\_group](#module\_self\_managed\_node\_group) | ../../modules/self-managed-node-group | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [aws_security_group.worker_group_mgmt_one](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [aws_security_group.worker_group_mgmt_two](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
|
||||
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
@@ -66,9 +75,25 @@ No inputs.
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
| <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | Outputs from node groups |
|
||||
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
|
||||
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
|
||||
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
|
||||
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
|
||||
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
|
||||
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
|
||||
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
|
||||
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
|
||||
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
|
||||
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
|
||||
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
|
||||
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
|
||||
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
|
||||
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
|
||||
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
|
||||
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
|
||||
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
|
||||
@@ -3,9 +3,15 @@ provider "aws" {
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "complete-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.21"
|
||||
region = "eu-west-1"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
@@ -15,87 +21,102 @@ locals {
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
|
||||
fargate_subnets = [module.vpc.private_subnets[2]]
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
|
||||
|
||||
worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id]
|
||||
|
||||
# Worker groups (using Launch Configurations)
|
||||
worker_groups = [
|
||||
{
|
||||
name = "worker-group-1"
|
||||
instance_type = "t3.small"
|
||||
additional_userdata = "echo foo bar"
|
||||
asg_desired_capacity = 2
|
||||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id]
|
||||
},
|
||||
{
|
||||
name = "worker-group-2"
|
||||
instance_type = "t3.medium"
|
||||
additional_userdata = "echo foo bar"
|
||||
additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id]
|
||||
asg_desired_capacity = 1
|
||||
},
|
||||
]
|
||||
|
||||
# Worker groups (using Launch Templates)
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
name = "spot-1"
|
||||
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"]
|
||||
spot_instance_pools = 4
|
||||
asg_max_size = 5
|
||||
asg_desired_capacity = 5
|
||||
kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
public_ip = true
|
||||
},
|
||||
]
|
||||
|
||||
# Managed Node Groups
|
||||
node_groups_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
cluster_addons = {
|
||||
coredns = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
kube-proxy = {}
|
||||
vpc-cni = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
}
|
||||
|
||||
node_groups = {
|
||||
example = {
|
||||
desired_capacity = 1
|
||||
max_capacity = 10
|
||||
min_capacity = 1
|
||||
cluster_encryption_config = [{
|
||||
provider_key_arn = aws_kms_key.eks.arn
|
||||
resources = ["secrets"]
|
||||
}]
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
# Self Managed Node Group(s)
|
||||
self_managed_node_group_defaults = {
|
||||
vpc_security_group_ids = [aws_security_group.additional.id]
|
||||
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
|
||||
}
|
||||
|
||||
self_managed_node_groups = {
|
||||
spot = {
|
||||
instance_type = "m5.large"
|
||||
instance_market_options = {
|
||||
market_type = "spot"
|
||||
}
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
echo "foo"
|
||||
export FOO=bar
|
||||
EOT
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
cd /tmp
|
||||
sudo yum install -y https://s3.amazonaws.com/ec2-downloads-windows/SSMAgent/latest/linux_amd64/amazon-ssm-agent.rpm
|
||||
sudo systemctl enable amazon-ssm-agent
|
||||
sudo systemctl start amazon-ssm-agent
|
||||
EOT
|
||||
}
|
||||
}
|
||||
|
||||
# EKS Managed Node Group(s)
|
||||
eks_managed_node_group_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
|
||||
vpc_security_group_ids = [aws_security_group.additional.id]
|
||||
}
|
||||
|
||||
eks_managed_node_groups = {
|
||||
blue = {}
|
||||
green = {
|
||||
min_size = 1
|
||||
max_size = 10
|
||||
desired_size = 1
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
capacity_type = "SPOT"
|
||||
k8s_labels = {
|
||||
labels = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
additional_tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
taints = [
|
||||
{
|
||||
|
||||
taints = {
|
||||
dedicated = {
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
|
||||
tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Fargate
|
||||
# Fargate Profile(s)
|
||||
fargate_profiles = {
|
||||
default = {
|
||||
name = "default"
|
||||
@@ -122,38 +143,59 @@ module "eks" {
|
||||
}
|
||||
}
|
||||
|
||||
# AWS Auth (kubernetes_config_map)
|
||||
map_roles = [
|
||||
{
|
||||
rolearn = "arn:aws:iam::66666666666:role/role1"
|
||||
username = "role1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Sub-Module Usage on Existing/Separate Cluster
|
||||
################################################################################
|
||||
|
||||
module "eks_managed_node_group" {
|
||||
source = "../../modules/eks-managed-node-group"
|
||||
|
||||
name = "separate-eks-mng"
|
||||
cluster_name = module.eks.cluster_id
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
tags = merge(local.tags, { Separate = "eks-managed-node-group" })
|
||||
}
|
||||
|
||||
module "self_managed_node_group" {
|
||||
source = "../../modules/self-managed-node-group"
|
||||
|
||||
name = "separate-self-mng"
|
||||
cluster_name = module.eks.cluster_id
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint = module.eks.cluster_endpoint
|
||||
cluster_auth_base64 = module.eks.cluster_certificate_authority_data
|
||||
|
||||
instance_type = "m5.large"
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
vpc_security_group_ids = [
|
||||
module.eks.cluster_primary_security_group_id,
|
||||
module.eks.cluster_security_group_id,
|
||||
]
|
||||
|
||||
map_users = [
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user1"
|
||||
username = "user1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user2"
|
||||
username = "user2"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
tags = merge(local.tags, { Separate = "self-managed-node-group" })
|
||||
}
|
||||
|
||||
map_accounts = [
|
||||
"777777777777",
|
||||
"888888888888",
|
||||
]
|
||||
module "fargate_profile" {
|
||||
source = "../../modules/fargate-profile"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
name = "separate-fargate-profile"
|
||||
cluster_name = module.eks.cluster_id
|
||||
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
selectors = [{
|
||||
namespace = "kube-system"
|
||||
}]
|
||||
|
||||
tags = merge(local.tags, { Separate = "fargate-profile" })
|
||||
}
|
||||
|
||||
################################################################################
|
||||
@@ -163,87 +205,98 @@ module "eks" {
|
||||
module "disabled_eks" {
|
||||
source = "../.."
|
||||
|
||||
create_eks = false
|
||||
create = false
|
||||
}
|
||||
|
||||
module "disabled_fargate" {
|
||||
source = "../../modules/fargate"
|
||||
module "disabled_fargate_profile" {
|
||||
source = "../../modules/fargate-profile"
|
||||
|
||||
create_fargate_pod_execution_role = false
|
||||
create = false
|
||||
}
|
||||
|
||||
module "disabled_node_groups" {
|
||||
source = "../../modules/node_groups"
|
||||
module "disabled_eks_managed_node_group" {
|
||||
source = "../../modules/eks-managed-node-group"
|
||||
|
||||
create_eks = false
|
||||
create = false
|
||||
}
|
||||
|
||||
module "disabled_self_managed_node_group" {
|
||||
source = "../../modules/self-managed-node-group"
|
||||
|
||||
create = false
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
# aws-auth configmap
|
||||
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
||||
# so we need to ensure fargate profiles and self-managed node roles are added
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
data "aws_eks_cluster_auth" "this" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
locals {
|
||||
kubeconfig = yamlencode({
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
current-context = "terraform"
|
||||
clusters = [{
|
||||
name = module.eks.cluster_id
|
||||
cluster = {
|
||||
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
||||
server = module.eks.cluster_endpoint
|
||||
}
|
||||
}]
|
||||
contexts = [{
|
||||
name = "terraform"
|
||||
context = {
|
||||
cluster = module.eks.cluster_id
|
||||
user = "terraform"
|
||||
}
|
||||
}]
|
||||
users = [{
|
||||
name = "terraform"
|
||||
user = {
|
||||
token = data.aws_eks_cluster_auth.this.token
|
||||
}
|
||||
}]
|
||||
})
|
||||
|
||||
# we have to combine the configmap created by the eks module with the externally created node group/profile sub-modules
|
||||
aws_auth_configmap_yaml = <<-EOT
|
||||
${chomp(module.eks.aws_auth_configmap_yaml)}
|
||||
- rolearn: ${module.eks_managed_node_group.iam_role_arn}
|
||||
username: system:node:{{EC2PrivateDNSName}}
|
||||
groups:
|
||||
- system:bootstrappers
|
||||
- system:nodes
|
||||
- rolearn: ${module.self_managed_node_group.iam_role_arn}
|
||||
username: system:node:{{EC2PrivateDNSName}}
|
||||
groups:
|
||||
- system:bootstrappers
|
||||
- system:nodes
|
||||
- rolearn: ${module.fargate_profile.fargate_profile_arn}
|
||||
username: system:node:{{SessionName}}
|
||||
groups:
|
||||
- system:bootstrappers
|
||||
- system:nodes
|
||||
- system:node-proxier
|
||||
EOT
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional security groups for workers
|
||||
################################################################################
|
||||
|
||||
resource "aws_security_group" "worker_group_mgmt_one" {
|
||||
name_prefix = "worker_group_mgmt_one"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
]
|
||||
resource "null_resource" "patch" {
|
||||
triggers = {
|
||||
kubeconfig = base64encode(local.kubeconfig)
|
||||
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${local.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "worker_group_mgmt_two" {
|
||||
name_prefix = "worker_group_mgmt_two"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
|
||||
cidr_blocks = [
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "all_worker_mgmt" {
|
||||
name_prefix = "all_worker_management"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = self.triggers.kubeconfig
|
||||
}
|
||||
command = self.triggers.cmd_patch
|
||||
}
|
||||
}
|
||||
|
||||
@@ -251,40 +304,60 @@ resource "aws_security_group" "all_worker_mgmt" {
|
||||
# Supporting resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
enable_flow_log = true
|
||||
create_flow_log_cloudwatch_iam_role = true
|
||||
create_flow_log_cloudwatch_log_group = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_security_group" "additional" {
|
||||
name_prefix = "${local.name}-additional"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "eks" {
|
||||
description = "EKS Secret Encryption Key"
|
||||
deletion_window_in_days = 7
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
@@ -1,24 +1,148 @@
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Base64 encoded certificate data required to communicate with the cluster"
|
||||
value = module.eks.cluster_certificate_authority_data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
description = "Endpoint for your Kubernetes API server"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "cluster_platform_version" {
|
||||
description = "Platform version for the cluster"
|
||||
value = module.eks.cluster_platform_version
|
||||
}
|
||||
|
||||
output "cluster_status" {
|
||||
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
|
||||
value = module.eks.cluster_status
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
################################################################################
|
||||
# Security Group
|
||||
################################################################################
|
||||
|
||||
output "cluster_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the cluster security group"
|
||||
value = module.eks.cluster_security_group_arn
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
################################################################################
|
||||
# IRSA
|
||||
################################################################################
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
|
||||
value = module.eks.oidc_provider_arn
|
||||
}
|
||||
|
||||
output "node_groups" {
|
||||
description = "Outputs from node groups"
|
||||
value = module.eks.node_groups
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "cluster_iam_role_unique_id" {
|
||||
description = "Stable and unique string identifying the IAM role"
|
||||
value = module.eks.cluster_iam_role_unique_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
output "cluster_addons" {
|
||||
description = "Map of attribute maps for all EKS cluster addons enabled"
|
||||
value = module.eks.cluster_addons
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
################################################################################
|
||||
|
||||
output "cluster_identity_providers" {
|
||||
description = "Map of attribute maps for all EKS identity providers enabled"
|
||||
value = module.eks.cluster_identity_providers
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CloudWatch Log Group
|
||||
################################################################################
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_name
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_arn" {
|
||||
description = "Arn of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Fargate Profile
|
||||
################################################################################
|
||||
|
||||
output "fargate_profiles" {
|
||||
description = "Map of attribute maps for all EKS Fargate Profiles created"
|
||||
value = module.eks.fargate_profiles
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "eks_managed_node_groups" {
|
||||
description = "Map of attribute maps for all EKS managed node groups created"
|
||||
value = module.eks.eks_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Self Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "self_managed_node_groups" {
|
||||
description = "Map of attribute maps for all self managed node groups created"
|
||||
value = module.eks.self_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional
|
||||
################################################################################
|
||||
|
||||
output "aws_auth_configmap_yaml" {
|
||||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
|
||||
value = module.eks.aws_auth_configmap_yaml
|
||||
}
|
||||
|
||||
@@ -4,19 +4,11 @@ terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
version = ">= 3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
95
examples/eks_managed_node_group/README.md
Normal file
95
examples/eks_managed_node_group/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# EKS Managed Node Group Example
|
||||
|
||||
Configuration in this directory creates an AWS EKS cluster with various EKS Managed Node Groups demonstrating the various methods of configuring/customizing:
|
||||
|
||||
- A default, "out of the box" EKS managed node group as supplied by AWS EKS
|
||||
- A default, "out of the box" Bottlerocket EKS managed node group as supplied by AWS EKS
|
||||
- A Bottlerocket EKS managed node group that supplies additional bootstrap settings
|
||||
- A Bottlerocket EKS managed node group that demonstrates many of the configuration/customizations offered by the `eks-managed-node-group` sub-module for the Bottlerocket OS
|
||||
- An EKS managed node group created from a launch template created outside of the module
|
||||
- An EKS managed node group that utilizes a custom AMI that is an EKS optimized AMI derivative
|
||||
- An EKS managed node group that demonstrates nearly all of the configurations/customizations offered by the `eks-managed-node-group` sub-module
|
||||
|
||||
See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.64 |
|
||||
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_launch_template.external](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
|
||||
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [null_resource.patch](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
|
||||
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
|
||||
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
|
||||
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
|
||||
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
|
||||
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
|
||||
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
|
||||
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
|
||||
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
|
||||
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
|
||||
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
|
||||
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
|
||||
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
|
||||
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
|
||||
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
|
||||
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
|
||||
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
|
||||
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
|
||||
| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
|
||||
| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
|
||||
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
|
||||
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
490
examples/eks_managed_node_group/main.tf
Normal file
490
examples/eks_managed_node_group/main.tf
Normal file
@@ -0,0 +1,490 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.21"
|
||||
region = "eu-west-1"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_service_ipv4_cidr = "172.16.0.0/16"
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
cluster_addons = {
|
||||
coredns = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
kube-proxy = {}
|
||||
vpc-cni = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
}
|
||||
|
||||
cluster_encryption_config = [{
|
||||
provider_key_arn = aws_kms_key.eks.arn
|
||||
resources = ["secrets"]
|
||||
}]
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
eks_managed_node_group_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large"]
|
||||
}
|
||||
|
||||
eks_managed_node_groups = {
|
||||
# Default node group - as provided by AWS EKS
|
||||
default_node_group = {}
|
||||
|
||||
# Default node group - as provided by AWS EKS using Bottlerocket
|
||||
bottlerocket_default = {
|
||||
ami_type = "BOTTLEROCKET_x86_64"
|
||||
platform = "bottlerocket"
|
||||
}
|
||||
|
||||
# Adds to the AWS provided user data
|
||||
bottlerocket_add = {
|
||||
ami_type = "BOTTLEROCKET_x86_64"
|
||||
platform = "bottlerocket"
|
||||
|
||||
# this will get added to what AWS provides
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Custom AMI, using module provided bootstrap data
|
||||
bottlerocket_custom = {
|
||||
# Current bottlerocket AMI
|
||||
ami_id = "ami-0ff61e0bcfc81dc94"
|
||||
platform = "bottlerocket"
|
||||
|
||||
# use module user data template to boostrap
|
||||
enable_bootstrap_user_data = true
|
||||
# this will get added to the template
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
|
||||
[settings.kubernetes.node-labels]
|
||||
"label1" = "foo"
|
||||
"label2" = "bar"
|
||||
|
||||
[settings.kubernetes.node-taints]
|
||||
"dedicated" = "experimental:PreferNoSchedule"
|
||||
"special" = "true:NoSchedule"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Use existing/external launch template
|
||||
external_lt = {
|
||||
create_launch_template = false
|
||||
launch_template_name = aws_launch_template.external.name
|
||||
launch_template_version = aws_launch_template.external.default_version
|
||||
}
|
||||
|
||||
# Use a custom AMI
|
||||
custom_ami = {
|
||||
# Current default AMI used by managed node groups - pseudo "custom"
|
||||
ami_id = "ami-0caf35bc73450c396"
|
||||
|
||||
# This will ensure the boostrap user data is used to join the node
|
||||
# By default, EKS managed node groups will not append bootstrap script;
|
||||
# this adds it back in using the default template provided by the module
|
||||
# Note: this assumes the AMI provided is an EKS optimized AMI derivative
|
||||
enable_bootstrap_user_data = true
|
||||
}
|
||||
|
||||
# Complete
|
||||
complete = {
|
||||
name = "complete-eks-mng"
|
||||
use_name_prefix = false
|
||||
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
min_size = 1
|
||||
max_size = 7
|
||||
desired_size = 1
|
||||
|
||||
ami_id = "ami-0caf35bc73450c396"
|
||||
enable_bootstrap_user_data = true
|
||||
bootstrap_extra_args = "--container-runtime containerd --kubelet-extra-args '--max-pods=20'"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
export CONTAINER_RUNTIME="containerd"
|
||||
export USE_MAX_PODS=false
|
||||
EOT
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "you are free little kubelet!"
|
||||
EOT
|
||||
|
||||
capacity_type = "SPOT"
|
||||
disk_size = 256
|
||||
force_update_version = true
|
||||
instance_types = ["m6i.large", "m5.large", "m5n.large", "m5zn.large", "m3.large", "m4.large"]
|
||||
labels = {
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
|
||||
taints = [
|
||||
{
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
|
||||
description = "EKS managed node group example launch template"
|
||||
|
||||
ebs_optimized = true
|
||||
vpc_security_group_ids = [aws_security_group.additional.id]
|
||||
disable_api_termination = false
|
||||
enable_monitoring = true
|
||||
|
||||
block_device_mappings = {
|
||||
xvda = {
|
||||
device_name = "/dev/xvda"
|
||||
ebs = {
|
||||
volume_size = 75
|
||||
volume_type = "gp3"
|
||||
iops = 3000
|
||||
throughput = 150
|
||||
encrypted = true
|
||||
kms_key_id = aws_kms_key.ebs.arn
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metadata_options = {
|
||||
http_endpoint = "enabled"
|
||||
http_tokens = "required"
|
||||
http_put_response_hop_limit = 2
|
||||
}
|
||||
|
||||
create_iam_role = true
|
||||
iam_role_name = "eks-managed-node-group-complete-example"
|
||||
iam_role_use_name_prefix = false
|
||||
iam_role_description = "EKS managed node group complete example role"
|
||||
iam_role_tags = {
|
||||
Purpose = "Protector of the kubelet"
|
||||
}
|
||||
iam_role_additional_policies = [
|
||||
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
]
|
||||
|
||||
create_security_group = true
|
||||
security_group_name = "eks-managed-node-group-complete-example"
|
||||
security_group_use_name_prefix = false
|
||||
security_group_description = "EKS managed node group complete example security group"
|
||||
security_group_rules = {
|
||||
phoneOut = {
|
||||
description = "Hello CloudFlare"
|
||||
protocol = "udp"
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
type = "egress"
|
||||
cidr_blocks = ["1.1.1.1/32"]
|
||||
}
|
||||
phoneHome = {
|
||||
description = "Hello cluster"
|
||||
protocol = "udp"
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
type = "egress"
|
||||
source_cluster_security_group = true # bit of reflection lookup
|
||||
}
|
||||
}
|
||||
security_group_tags = {
|
||||
Purpose = "Protector of the kubelet"
|
||||
}
|
||||
|
||||
tags = {
|
||||
ExtraTag = "EKS managed node group complete example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# aws-auth configmap
|
||||
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
||||
# so we need to ensure fargate profiles and self-managed node roles are added
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster_auth" "this" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
locals {
|
||||
kubeconfig = yamlencode({
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
current-context = "terraform"
|
||||
clusters = [{
|
||||
name = module.eks.cluster_id
|
||||
cluster = {
|
||||
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
||||
server = module.eks.cluster_endpoint
|
||||
}
|
||||
}]
|
||||
contexts = [{
|
||||
name = "terraform"
|
||||
context = {
|
||||
cluster = module.eks.cluster_id
|
||||
user = "terraform"
|
||||
}
|
||||
}]
|
||||
users = [{
|
||||
name = "terraform"
|
||||
user = {
|
||||
token = data.aws_eks_cluster_auth.this.token
|
||||
}
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
resource "null_resource" "patch" {
|
||||
triggers = {
|
||||
kubeconfig = base64encode(local.kubeconfig)
|
||||
cmd_patch = "kubectl patch configmap/aws-auth --patch \"${module.eks.aws_auth_configmap_yaml}\" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)"
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = self.triggers.kubeconfig
|
||||
}
|
||||
command = self.triggers.cmd_patch
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
enable_flow_log = true
|
||||
create_flow_log_cloudwatch_iam_role = true
|
||||
create_flow_log_cloudwatch_log_group = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_security_group" "additional" {
|
||||
name_prefix = "${local.name}-additional"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "eks" {
|
||||
description = "EKS Secret Encryption Key"
|
||||
deletion_window_in_days = 7
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "ebs" {
|
||||
description = "Customer managed key to encrypt EKS managed node group volumes"
|
||||
deletion_window_in_days = 7
|
||||
policy = data.aws_iam_policy_document.ebs.json
|
||||
}
|
||||
|
||||
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
|
||||
data "aws_iam_policy_document" "ebs" {
|
||||
# Copy of default KMS policy that lets you manage it
|
||||
statement {
|
||||
sid = "Enable IAM User Permissions"
|
||||
actions = ["kms:*"]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
|
||||
}
|
||||
}
|
||||
|
||||
# Required for EKS
|
||||
statement {
|
||||
sid = "Allow service-linked role use of the CMK"
|
||||
actions = [
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:ReEncrypt*",
|
||||
"kms:GenerateDataKey*",
|
||||
"kms:DescribeKey"
|
||||
]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = [
|
||||
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "Allow attachment of persistent resources"
|
||||
actions = ["kms:CreateGrant"]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = [
|
||||
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "Bool"
|
||||
variable = "kms:GrantIsForAWSResource"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
|
||||
# there are several more options one could set but you probably dont need to modify them
|
||||
# you can take the default and add your custom AMI and/or custom tags
|
||||
#
|
||||
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
|
||||
# then the default user-data for bootstrapping a cluster is merged in the copy.
|
||||
|
||||
resource "aws_launch_template" "external" {
|
||||
name_prefix = "external-eks-ex-"
|
||||
description = "EKS managed node group external launch template"
|
||||
update_default_version = true
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "/dev/xvda"
|
||||
|
||||
ebs {
|
||||
volume_size = 100
|
||||
volume_type = "gp2"
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
monitoring {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
network_interfaces {
|
||||
associate_public_ip_address = false
|
||||
delete_on_termination = true
|
||||
}
|
||||
|
||||
# if you want to use a custom AMI
|
||||
# image_id = var.ami_id
|
||||
|
||||
# If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
|
||||
# you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
|
||||
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
|
||||
# user_data = base64encode(data.template_file.launch_template_userdata.rendered)
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "instance"
|
||||
|
||||
tags = {
|
||||
CustomTag = "Instance custom tag"
|
||||
}
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "volume"
|
||||
|
||||
tags = {
|
||||
CustomTag = "Volume custom tag"
|
||||
}
|
||||
}
|
||||
|
||||
tag_specifications {
|
||||
resource_type = "network-interface"
|
||||
|
||||
tags = {
|
||||
CustomTag = "EKS example"
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
CustomTag = "Launch template custom tag"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
167
examples/eks_managed_node_group/outputs.tf
Normal file
167
examples/eks_managed_node_group/outputs.tf
Normal file
@@ -0,0 +1,167 @@
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Base64 encoded certificate data required to communicate with the cluster"
|
||||
value = module.eks.cluster_certificate_authority_data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for your Kubernetes API server"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "cluster_platform_version" {
|
||||
description = "Platform version for the cluster"
|
||||
value = module.eks.cluster_platform_version
|
||||
}
|
||||
|
||||
output "cluster_status" {
|
||||
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
|
||||
value = module.eks.cluster_status
|
||||
}
|
||||
|
||||
output "cluster_primary_security_group_id" {
|
||||
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
|
||||
value = module.eks.cluster_primary_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Security Group
|
||||
################################################################################
|
||||
|
||||
output "cluster_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the cluster security group"
|
||||
value = module.eks.cluster_security_group_arn
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "ID of the cluster security group"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Node Security Group
|
||||
################################################################################
|
||||
|
||||
output "node_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the node shared security group"
|
||||
value = module.eks.node_security_group_arn
|
||||
}
|
||||
|
||||
output "node_security_group_id" {
|
||||
description = "ID of the node shared security group"
|
||||
value = module.eks.node_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IRSA
|
||||
################################################################################
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
|
||||
value = module.eks.oidc_provider_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "cluster_iam_role_unique_id" {
|
||||
description = "Stable and unique string identifying the IAM role"
|
||||
value = module.eks.cluster_iam_role_unique_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
output "cluster_addons" {
|
||||
description = "Map of attribute maps for all EKS cluster addons enabled"
|
||||
value = module.eks.cluster_addons
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
################################################################################
|
||||
|
||||
output "cluster_identity_providers" {
|
||||
description = "Map of attribute maps for all EKS identity providers enabled"
|
||||
value = module.eks.cluster_identity_providers
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CloudWatch Log Group
|
||||
################################################################################
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_name
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_arn" {
|
||||
description = "Arn of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Fargate Profile
|
||||
################################################################################
|
||||
|
||||
output "fargate_profiles" {
|
||||
description = "Map of attribute maps for all EKS Fargate Profiles created"
|
||||
value = module.eks.fargate_profiles
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "eks_managed_node_groups" {
|
||||
description = "Map of attribute maps for all EKS managed node groups created"
|
||||
value = module.eks.eks_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Self Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "self_managed_node_groups" {
|
||||
description = "Map of attribute maps for all self managed node groups created"
|
||||
value = module.eks.self_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional
|
||||
################################################################################
|
||||
|
||||
output "aws_auth_configmap_yaml" {
|
||||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
|
||||
value = module.eks.aws_auth_configmap_yaml
|
||||
}
|
||||
14
examples/eks_managed_node_group/versions.tf
Normal file
14
examples/eks_managed_node_group/versions.tf
Normal file
@@ -0,0 +1,14 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
version = ">= 3.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
# AWS EKS Cluster with Fargate profiles
|
||||
|
||||
Configuration in this directory creates EKS cluster with Fargate profiles in two different ways:
|
||||
|
||||
- Using a root module, where EKS Cluster and Fargate profiles should be created at once. This is the default behaviour for most users.
|
||||
- Using `modules/fargate` submodule where Fargate profiles should be attached to the existing EKS Cluster.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 2.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_fargate_profile_existing_cluster"></a> [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_fargate_profile_arns"></a> [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Outputs from node groups |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,235 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "fargate-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]]
|
||||
fargate_subnets = [module.vpc.private_subnets[2]]
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
# You require a node group to schedule coredns which is critical for running correctly internal DNS.
|
||||
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
|
||||
# available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
|
||||
node_groups = {
|
||||
example = {
|
||||
desired_capacity = 1
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
k8s_labels = {
|
||||
Example = "managed_node_groups"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
additional_tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fargate_profiles = {
|
||||
default = {
|
||||
name = "default"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "kube-system"
|
||||
labels = {
|
||||
k8s-app = "kube-dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
namespace = "default"
|
||||
labels = {
|
||||
WorkerType = "fargate"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Owner = "default"
|
||||
}
|
||||
|
||||
timeouts = {
|
||||
create = "20m"
|
||||
delete = "20m"
|
||||
}
|
||||
}
|
||||
|
||||
secondary = {
|
||||
name = "secondary"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "default"
|
||||
labels = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
|
||||
subnets = [module.vpc.private_subnets[1]]
|
||||
|
||||
tags = {
|
||||
Owner = "secondary"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
manage_aws_auth = false
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
##############################################
|
||||
# Calling submodule with existing EKS cluster
|
||||
##############################################
|
||||
|
||||
module "fargate_profile_existing_cluster" {
|
||||
source = "../../modules/fargate"
|
||||
|
||||
cluster_name = module.eks.cluster_id
|
||||
subnets = [module.vpc.private_subnets[0], module.vpc.private_subnets[2]]
|
||||
|
||||
fargate_profiles = {
|
||||
profile1 = {
|
||||
name = "profile1"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "kube-system"
|
||||
labels = {
|
||||
k8s-app = "kube-dns"
|
||||
}
|
||||
},
|
||||
{
|
||||
namespace = "profile"
|
||||
labels = {
|
||||
WorkerType = "fargate"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Owner = "profile1"
|
||||
submodule = "true"
|
||||
}
|
||||
}
|
||||
|
||||
profile2 = {
|
||||
name = "profile2"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "default"
|
||||
labels = {
|
||||
Fargate = "profile2"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`)
|
||||
subnets = [module.vpc.private_subnets[0]]
|
||||
|
||||
tags = {
|
||||
Owner = "profile2"
|
||||
submodule = "true"
|
||||
}
|
||||
|
||||
timeouts = {
|
||||
delete = "20m"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "fargate_profile_arns" {
|
||||
description = "Outputs from node groups"
|
||||
value = module.eks.fargate_profile_arns
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
tls = {
|
||||
source = "hashicorp/tls"
|
||||
version = ">= 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
76
examples/fargate_profile/README.md
Normal file
76
examples/fargate_profile/README.md
Normal file
@@ -0,0 +1,76 @@
|
||||
# AWS EKS Cluster with Fargate profiles
|
||||
|
||||
Configuration in this directory creates an AWS EKS cluster utilizing Fargate profiles.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.64 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
|
||||
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
|
||||
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
|
||||
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
|
||||
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
|
||||
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
|
||||
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
|
||||
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
|
||||
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
|
||||
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
|
||||
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
|
||||
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
|
||||
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
|
||||
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
|
||||
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
|
||||
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
|
||||
| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
|
||||
| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
|
||||
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
|
||||
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
164
examples/fargate_profile/main.tf
Normal file
164
examples/fargate_profile/main.tf
Normal file
@@ -0,0 +1,164 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.21"
|
||||
region = "eu-west-1"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
cluster_addons = {
|
||||
# Note: https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html#fargate-gs-coredns
|
||||
coredns = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
kube-proxy = {}
|
||||
vpc-cni = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
}
|
||||
|
||||
cluster_encryption_config = [{
|
||||
provider_key_arn = aws_kms_key.eks.arn
|
||||
resources = ["secrets"]
|
||||
}]
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
# You require a node group to schedule coredns which is critical for running correctly internal DNS.
|
||||
# If you want to use only fargate you must follow docs `(Optional) Update CoreDNS`
|
||||
# available under https://docs.aws.amazon.com/eks/latest/userguide/fargate-getting-started.html
|
||||
eks_managed_node_groups = {
|
||||
example = {
|
||||
desired_size = 1
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
labels = {
|
||||
Example = "managed_node_groups"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fargate_profiles = {
|
||||
default = {
|
||||
name = "default"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "backend"
|
||||
labels = {
|
||||
Application = "backend"
|
||||
}
|
||||
},
|
||||
{
|
||||
namespace = "default"
|
||||
labels = {
|
||||
WorkerType = "fargate"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Owner = "default"
|
||||
}
|
||||
|
||||
timeouts = {
|
||||
create = "20m"
|
||||
delete = "20m"
|
||||
}
|
||||
}
|
||||
|
||||
secondary = {
|
||||
name = "secondary"
|
||||
selectors = [
|
||||
{
|
||||
namespace = "default"
|
||||
labels = {
|
||||
Environment = "test"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
# Using specific subnets instead of the subnets supplied for the cluster itself
|
||||
subnet_ids = [module.vpc.private_subnets[1]]
|
||||
|
||||
tags = {
|
||||
Owner = "secondary"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
enable_flow_log = true
|
||||
create_flow_log_cloudwatch_iam_role = true
|
||||
create_flow_log_cloudwatch_log_group = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "eks" {
|
||||
description = "EKS Secret Encryption Key"
|
||||
deletion_window_in_days = 7
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
167
examples/fargate_profile/outputs.tf
Normal file
167
examples/fargate_profile/outputs.tf
Normal file
@@ -0,0 +1,167 @@
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Base64 encoded certificate data required to communicate with the cluster"
|
||||
value = module.eks.cluster_certificate_authority_data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for your Kubernetes API server"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "cluster_platform_version" {
|
||||
description = "Platform version for the cluster"
|
||||
value = module.eks.cluster_platform_version
|
||||
}
|
||||
|
||||
output "cluster_status" {
|
||||
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
|
||||
value = module.eks.cluster_status
|
||||
}
|
||||
|
||||
output "cluster_primary_security_group_id" {
|
||||
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
|
||||
value = module.eks.cluster_primary_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Security Group
|
||||
################################################################################
|
||||
|
||||
output "cluster_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the cluster security group"
|
||||
value = module.eks.cluster_security_group_arn
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "ID of the cluster security group"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Node Security Group
|
||||
################################################################################
|
||||
|
||||
output "node_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the node shared security group"
|
||||
value = module.eks.node_security_group_arn
|
||||
}
|
||||
|
||||
output "node_security_group_id" {
|
||||
description = "ID of the node shared security group"
|
||||
value = module.eks.node_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IRSA
|
||||
################################################################################
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
|
||||
value = module.eks.oidc_provider_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "cluster_iam_role_unique_id" {
|
||||
description = "Stable and unique string identifying the IAM role"
|
||||
value = module.eks.cluster_iam_role_unique_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
output "cluster_addons" {
|
||||
description = "Map of attribute maps for all EKS cluster addons enabled"
|
||||
value = module.eks.cluster_addons
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
################################################################################
|
||||
|
||||
output "cluster_identity_providers" {
|
||||
description = "Map of attribute maps for all EKS identity providers enabled"
|
||||
value = module.eks.cluster_identity_providers
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CloudWatch Log Group
|
||||
################################################################################
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_name
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_arn" {
|
||||
description = "Arn of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Fargate Profile
|
||||
################################################################################
|
||||
|
||||
output "fargate_profiles" {
|
||||
description = "Map of attribute maps for all EKS Fargate Profiles created"
|
||||
value = module.eks.fargate_profiles
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "eks_managed_node_groups" {
|
||||
description = "Map of attribute maps for all EKS managed node groups created"
|
||||
value = module.eks.eks_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Self Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "self_managed_node_groups" {
|
||||
description = "Map of attribute maps for all self managed node groups created"
|
||||
value = module.eks.self_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional
|
||||
################################################################################
|
||||
|
||||
output "aws_auth_configmap_yaml" {
|
||||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
|
||||
value = module.eks.aws_auth_configmap_yaml
|
||||
}
|
||||
@@ -4,7 +4,7 @@ terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,82 +0,0 @@
|
||||
# Instance refresh example
|
||||
|
||||
This is EKS example using [instance refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for worker groups.
|
||||
|
||||
See [the official documentation](https://docs.aws.amazon.com/autoscaling/ec2/userguide/asg-instance-refresh.html) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.0 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.0 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_aws_node_termination_handler_role"></a> [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | 4.1.0 |
|
||||
| <a name="module_aws_node_termination_handler_sqs"></a> [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0.0 |
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_autoscaling_lifecycle_hook.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource |
|
||||
| [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
|
||||
| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
|
||||
| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
|
||||
| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
|
||||
| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
|
||||
| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
| [aws_iam_policy_document.aws_node_termination_handler_events](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
| <a name="output_sqs_queue_asg_notification_arn"></a> [sqs\_queue\_asg\_notification\_arn](#output\_sqs\_queue\_asg\_notification\_arn) | SQS queue ASG notification ARN |
|
||||
| <a name="output_sqs_queue_asg_notification_url"></a> [sqs\_queue\_asg\_notification\_url](#output\_sqs\_queue\_asg\_notification\_url) | SQS queue ASG notification URL |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,306 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "instance_refresh-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
# Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
data "aws_iam_policy_document" "aws_node_termination_handler" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"ec2:DescribeInstances",
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
"autoscaling:DescribeTags",
|
||||
]
|
||||
resources = [
|
||||
"*",
|
||||
]
|
||||
}
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"autoscaling:CompleteLifecycleAction",
|
||||
]
|
||||
resources = module.eks.workers_asg_arns
|
||||
}
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"sqs:DeleteMessage",
|
||||
"sqs:ReceiveMessage"
|
||||
]
|
||||
resources = [
|
||||
module.aws_node_termination_handler_sqs.sqs_queue_arn
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "aws_node_termination_handler" {
|
||||
name = "${local.name}-aws-node-termination-handler"
|
||||
policy = data.aws_iam_policy_document.aws_node_termination_handler.json
|
||||
}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
data "aws_iam_policy_document" "aws_node_termination_handler_events" {
|
||||
statement {
|
||||
effect = "Allow"
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = [
|
||||
"events.amazonaws.com",
|
||||
"sqs.amazonaws.com",
|
||||
]
|
||||
}
|
||||
actions = [
|
||||
"sqs:SendMessage",
|
||||
]
|
||||
resources = [
|
||||
"arn:aws:sqs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:${local.name}",
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
module "aws_node_termination_handler_sqs" {
|
||||
source = "terraform-aws-modules/sqs/aws"
|
||||
version = "~> 3.0.0"
|
||||
name = local.name
|
||||
message_retention_seconds = 300
|
||||
policy = data.aws_iam_policy_document.aws_node_termination_handler_events.json
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
|
||||
name = "${local.name}-asg-termination"
|
||||
description = "Node termination event rule"
|
||||
event_pattern = jsonencode(
|
||||
{
|
||||
"source" : [
|
||||
"aws.autoscaling"
|
||||
],
|
||||
"detail-type" : [
|
||||
"EC2 Instance-terminate Lifecycle Action"
|
||||
]
|
||||
"resources" : module.eks.workers_asg_arns
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
|
||||
target_id = "${local.name}-asg-termination"
|
||||
rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
|
||||
arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
|
||||
name = "${local.name}-spot-termination"
|
||||
description = "Node termination event rule"
|
||||
event_pattern = jsonencode(
|
||||
{
|
||||
"source" : [
|
||||
"aws.ec2"
|
||||
],
|
||||
"detail-type" : [
|
||||
"EC2 Spot Instance Interruption Warning"
|
||||
]
|
||||
"resources" : module.eks.workers_asg_arns
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
|
||||
target_id = "${local.name}-spot-termination"
|
||||
rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
|
||||
arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
|
||||
}
|
||||
|
||||
module "aws_node_termination_handler_role" {
|
||||
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
|
||||
version = "4.1.0"
|
||||
create_role = true
|
||||
role_description = "IRSA role for ANTH, cluster ${local.name}"
|
||||
role_name_prefix = local.name
|
||||
provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
|
||||
role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
|
||||
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
|
||||
}
|
||||
|
||||
resource "helm_release" "aws_node_termination_handler" {
|
||||
depends_on = [
|
||||
module.eks
|
||||
]
|
||||
|
||||
name = "aws-node-termination-handler"
|
||||
namespace = "kube-system"
|
||||
repository = "https://aws.github.io/eks-charts"
|
||||
chart = "aws-node-termination-handler"
|
||||
version = "0.15.0"
|
||||
create_namespace = true
|
||||
|
||||
set {
|
||||
name = "awsRegion"
|
||||
value = data.aws_region.current.name
|
||||
}
|
||||
set {
|
||||
name = "serviceAccount.name"
|
||||
value = "aws-node-termination-handler"
|
||||
}
|
||||
set {
|
||||
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.aws_node_termination_handler_role.iam_role_arn
|
||||
type = "string"
|
||||
}
|
||||
set {
|
||||
name = "enableSqsTerminationDraining"
|
||||
value = "true"
|
||||
}
|
||||
set {
|
||||
name = "enableSpotInterruptionDraining"
|
||||
value = "true"
|
||||
}
|
||||
set {
|
||||
name = "queueURL"
|
||||
value = module.aws_node_termination_handler_sqs.sqs_queue_id
|
||||
}
|
||||
set {
|
||||
name = "logLevel"
|
||||
value = "debug"
|
||||
}
|
||||
}
|
||||
|
||||
# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
|
||||
# ensures that node termination does not require the lifecycle action to be completed,
|
||||
# and thus allows the ASG to be destroyed cleanly.
|
||||
resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
|
||||
count = length(module.eks.workers_asg_names)
|
||||
name = "aws-node-termination-handler"
|
||||
autoscaling_group_name = module.eks.workers_asg_names[count.index]
|
||||
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
|
||||
heartbeat_timeout = 300
|
||||
default_result = "CONTINUE"
|
||||
}
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
enable_irsa = true
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
name = "refresh"
|
||||
asg_max_size = 2
|
||||
asg_desired_capacity = 2
|
||||
instance_refresh_enabled = true
|
||||
instance_refresh_instance_warmup = 60
|
||||
public_ip = true
|
||||
metadata_http_put_response_hop_limit = 3
|
||||
update_default_version = true
|
||||
instance_refresh_triggers = ["tag"]
|
||||
tags = [
|
||||
{
|
||||
key = "aws-node-termination-handler/managed"
|
||||
value = ""
|
||||
propagate_at_launch = true
|
||||
},
|
||||
{
|
||||
key = "foo"
|
||||
value = "buzz"
|
||||
propagate_at_launch = true
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "sqs_queue_asg_notification_arn" {
|
||||
description = "SQS queue ASG notification ARN"
|
||||
value = module.aws_node_termination_handler_sqs.sqs_queue_arn
|
||||
}
|
||||
|
||||
output "sqs_queue_asg_notification_url" {
|
||||
description = "SQS queue ASG notification URL"
|
||||
value = module.aws_node_termination_handler_sqs.sqs_queue_id
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = ">= 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
# IAM Roles for Service Accounts
|
||||
|
||||
This example shows how to create an IAM role to be used for a Kubernetes `ServiceAccount`. It will create a policy and role to be used by the [cluster-autoscaler](https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler) using the [public Helm chart](https://github.com/kubernetes/autoscaler/tree/master/charts/cluster-autoscaler).
|
||||
|
||||
See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.0 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.0 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_iam_assumable_role_admin"></a> [iam\_assumable\_role\_admin](#module\_iam\_assumable\_role\_admin) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
|
||||
| [helm_release.cluster-autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
| [aws_region.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/region) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_aws_account_id"></a> [aws\_account\_id](#output\_aws\_account\_id) | IAM AWS account id |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,114 +0,0 @@
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
data "aws_region" "current" {}
|
||||
|
||||
locals {
|
||||
k8s_service_account_namespace = "kube-system"
|
||||
k8s_service_account_name = "cluster-autoscaler-aws"
|
||||
}
|
||||
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
}
|
||||
|
||||
resource "helm_release" "cluster-autoscaler" {
|
||||
depends_on = [
|
||||
module.eks
|
||||
]
|
||||
|
||||
name = "cluster-autoscaler"
|
||||
namespace = local.k8s_service_account_namespace
|
||||
repository = "https://kubernetes.github.io/autoscaler"
|
||||
chart = "cluster-autoscaler"
|
||||
version = "9.10.7"
|
||||
create_namespace = false
|
||||
|
||||
set {
|
||||
name = "awsRegion"
|
||||
value = data.aws_region.current.name
|
||||
}
|
||||
set {
|
||||
name = "rbac.serviceAccount.name"
|
||||
value = local.k8s_service_account_name
|
||||
}
|
||||
set {
|
||||
name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.iam_assumable_role_admin.iam_role_arn
|
||||
type = "string"
|
||||
}
|
||||
set {
|
||||
name = "autoDiscovery.clusterName"
|
||||
value = local.name
|
||||
}
|
||||
set {
|
||||
name = "autoDiscovery.enabled"
|
||||
value = "true"
|
||||
}
|
||||
set {
|
||||
name = "rbac.create"
|
||||
value = "true"
|
||||
}
|
||||
}
|
||||
|
||||
module "iam_assumable_role_admin" {
|
||||
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
|
||||
version = "~> 4.0"
|
||||
|
||||
create_role = true
|
||||
role_name = "cluster-autoscaler"
|
||||
provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
|
||||
role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
|
||||
oidc_fully_qualified_subjects = ["system:serviceaccount:${local.k8s_service_account_namespace}:${local.k8s_service_account_name}"]
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "cluster_autoscaler" {
|
||||
name_prefix = "cluster-autoscaler"
|
||||
description = "EKS cluster-autoscaler policy for cluster ${module.eks.cluster_id}"
|
||||
policy = data.aws_iam_policy_document.cluster_autoscaler.json
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "cluster_autoscaler" {
|
||||
statement {
|
||||
sid = "clusterAutoscalerAll"
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
"ec2:DescribeLaunchTemplateVersions",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "clusterAutoscalerOwn"
|
||||
effect = "Allow"
|
||||
|
||||
actions = [
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
||||
"autoscaling:UpdateAutoScalingGroup",
|
||||
]
|
||||
|
||||
resources = ["*"]
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
|
||||
values = ["owned"]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,115 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "irsa-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
name = "worker-group-1"
|
||||
instance_type = "t3.medium"
|
||||
asg_desired_capacity = 1
|
||||
asg_max_size = 4
|
||||
tags = [
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/enabled"
|
||||
"propagate_at_launch" = "false"
|
||||
"value" = "true"
|
||||
},
|
||||
{
|
||||
"key" = "k8s.io/cluster-autoscaler/${local.name}"
|
||||
"propagate_at_launch" = "false"
|
||||
"value" = "owned"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +0,0 @@
|
||||
output "aws_account_id" {
|
||||
description = "IAM AWS account id"
|
||||
value = data.aws_caller_identity.current.account_id
|
||||
}
|
||||
@@ -1,26 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = ">= 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
102
examples/irsa_autoscale_refresh/README.md
Normal file
102
examples/irsa_autoscale_refresh/README.md
Normal file
@@ -0,0 +1,102 @@
|
||||
# IRSA, Cluster Autoscaler, and Instance Refresh example
|
||||
|
||||
Configuration in this directory creates an AWS EKS cluster with:
|
||||
- [IAM Roles for Service Accounts (IRSA)](https://docs.aws.amazon.com/eks/latest/userguide/iam-roles-for-service-accounts.html) enabled
|
||||
- [Cluster Autoscaler](https://github.com/kubernetes/autoscaler/blob/master/cluster-autoscaler/cloudprovider/aws/README.md) provisioned via a Helm Chart manifest
|
||||
- [Instance Refresh](https://aws.amazon.com/blogs/compute/introducing-instance-refresh-for-ec2-auto-scaling/) feature for self managed node groups
|
||||
- [Node Termination Handler](https://github.com/aws/aws-node-termination-handler) provisioned via a Helm Chart manifest
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
| <a name="requirement_helm"></a> [helm](#requirement\_helm) | >= 2.0 |
|
||||
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.64 |
|
||||
| <a name="provider_helm"></a> [helm](#provider\_helm) | >= 2.0 |
|
||||
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_aws_node_termination_handler_role"></a> [aws\_node\_termination\_handler\_role](#module\_aws\_node\_termination\_handler\_role) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
|
||||
| <a name="module_aws_node_termination_handler_sqs"></a> [aws\_node\_termination\_handler\_sqs](#module\_aws\_node\_termination\_handler\_sqs) | terraform-aws-modules/sqs/aws | ~> 3.0 |
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_iam_assumable_role_cluster_autoscaler"></a> [iam\_assumable\_role\_cluster\_autoscaler](#module\_iam\_assumable\_role\_cluster\_autoscaler) | terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc | ~> 4.0 |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_autoscaling_lifecycle_hook.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/autoscaling_lifecycle_hook) | resource |
|
||||
| [aws_cloudwatch_event_rule.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
|
||||
| [aws_cloudwatch_event_rule.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_rule) | resource |
|
||||
| [aws_cloudwatch_event_target.aws_node_termination_handler_asg](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
|
||||
| [aws_cloudwatch_event_target.aws_node_termination_handler_spot](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/cloudwatch_event_target) | resource |
|
||||
| [aws_iam_policy.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
|
||||
| [aws_iam_policy.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_policy) | resource |
|
||||
| [helm_release.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
|
||||
| [helm_release.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/helm/latest/docs/resources/release) | resource |
|
||||
| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
|
||||
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_iam_policy_document.aws_node_termination_handler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
| [aws_iam_policy_document.aws_node_termination_handler_sqs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
| [aws_iam_policy_document.cluster_autoscaler](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
|
||||
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
|
||||
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
|
||||
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
|
||||
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
|
||||
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
|
||||
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
|
||||
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
|
||||
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
|
||||
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
|
||||
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
|
||||
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
|
||||
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
|
||||
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
|
||||
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
|
||||
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
|
||||
| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
|
||||
| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
|
||||
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
|
||||
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
294
examples/irsa_autoscale_refresh/charts.tf
Normal file
294
examples/irsa_autoscale_refresh/charts.tf
Normal file
@@ -0,0 +1,294 @@
|
||||
provider "helm" {
|
||||
kubernetes {
|
||||
host = module.eks.cluster_endpoint
|
||||
cluster_ca_certificate = base64decode(module.eks.cluster_certificate_authority_data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Cluster Autoscaler
|
||||
# Based on the official docs at
|
||||
# https://github.com/kubernetes/autoscaler/tree/master/cluster-autoscaler
|
||||
################################################################################
|
||||
|
||||
resource "helm_release" "cluster_autoscaler" {
|
||||
name = "cluster-autoscaler"
|
||||
namespace = "kube-system"
|
||||
repository = "https://kubernetes.github.io/autoscaler"
|
||||
chart = "cluster-autoscaler"
|
||||
version = "9.10.8"
|
||||
create_namespace = false
|
||||
|
||||
set {
|
||||
name = "awsRegion"
|
||||
value = local.region
|
||||
}
|
||||
|
||||
set {
|
||||
name = "rbac.serviceAccount.name"
|
||||
value = "cluster-autoscaler-aws"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "rbac.serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.iam_assumable_role_cluster_autoscaler.iam_role_arn
|
||||
type = "string"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "autoDiscovery.clusterName"
|
||||
value = local.name
|
||||
}
|
||||
|
||||
set {
|
||||
name = "autoDiscovery.enabled"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "rbac.create"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
module.eks.cluster_id,
|
||||
null_resource.apply,
|
||||
]
|
||||
}
|
||||
|
||||
module "iam_assumable_role_cluster_autoscaler" {
|
||||
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
|
||||
version = "~> 4.0"
|
||||
|
||||
create_role = true
|
||||
role_name_prefix = "cluster-autoscaler"
|
||||
role_description = "IRSA role for cluster autoscaler"
|
||||
|
||||
provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
|
||||
role_policy_arns = [aws_iam_policy.cluster_autoscaler.arn]
|
||||
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:cluster-autoscaler-aws"]
|
||||
oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "cluster_autoscaler" {
|
||||
name = "KarpenterControllerPolicy-refresh"
|
||||
policy = data.aws_iam_policy_document.cluster_autoscaler.json
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "cluster_autoscaler" {
|
||||
statement {
|
||||
sid = "clusterAutoscalerAll"
|
||||
actions = [
|
||||
"autoscaling:DescribeAutoScalingGroups",
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
"autoscaling:DescribeLaunchConfigurations",
|
||||
"autoscaling:DescribeTags",
|
||||
"ec2:DescribeLaunchTemplateVersions",
|
||||
]
|
||||
resources = ["*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "clusterAutoscalerOwn"
|
||||
actions = [
|
||||
"autoscaling:SetDesiredCapacity",
|
||||
"autoscaling:TerminateInstanceInAutoScalingGroup",
|
||||
"autoscaling:UpdateAutoScalingGroup",
|
||||
]
|
||||
resources = ["*"]
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/${module.eks.cluster_id}"
|
||||
values = ["owned"]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "StringEquals"
|
||||
variable = "autoscaling:ResourceTag/k8s.io/cluster-autoscaler/enabled"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Node Termination Handler
|
||||
# Based on the official docs at
|
||||
# https://github.com/aws/aws-node-termination-handler
|
||||
################################################################################
|
||||
|
||||
resource "helm_release" "aws_node_termination_handler" {
|
||||
name = "aws-node-termination-handler"
|
||||
namespace = "kube-system"
|
||||
repository = "https://aws.github.io/eks-charts"
|
||||
chart = "aws-node-termination-handler"
|
||||
version = "0.16.0"
|
||||
create_namespace = false
|
||||
|
||||
set {
|
||||
name = "awsRegion"
|
||||
value = local.region
|
||||
}
|
||||
|
||||
set {
|
||||
name = "serviceAccount.name"
|
||||
value = "aws-node-termination-handler"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "serviceAccount.annotations.eks\\.amazonaws\\.com/role-arn"
|
||||
value = module.aws_node_termination_handler_role.iam_role_arn
|
||||
type = "string"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "enableSqsTerminationDraining"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "enableSpotInterruptionDraining"
|
||||
value = "true"
|
||||
}
|
||||
|
||||
set {
|
||||
name = "queueURL"
|
||||
value = module.aws_node_termination_handler_sqs.sqs_queue_id
|
||||
}
|
||||
|
||||
set {
|
||||
name = "logLevel"
|
||||
value = "debug"
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
module.eks.cluster_id,
|
||||
null_resource.apply,
|
||||
]
|
||||
}
|
||||
|
||||
module "aws_node_termination_handler_role" {
|
||||
source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc"
|
||||
version = "~> 4.0"
|
||||
|
||||
create_role = true
|
||||
role_name_prefix = "node-termination-handler"
|
||||
role_description = "IRSA role for node termination handler"
|
||||
|
||||
provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "")
|
||||
role_policy_arns = [aws_iam_policy.aws_node_termination_handler.arn]
|
||||
oidc_fully_qualified_subjects = ["system:serviceaccount:kube-system:aws-node-termination-handler"]
|
||||
oidc_fully_qualified_audiences = ["sts.amazonaws.com"]
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "aws_node_termination_handler" {
|
||||
name = "${local.name}-aws-node-termination-handler"
|
||||
policy = data.aws_iam_policy_document.aws_node_termination_handler.json
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "aws_node_termination_handler" {
|
||||
statement {
|
||||
actions = [
|
||||
"ec2:DescribeInstances",
|
||||
"autoscaling:DescribeAutoScalingInstances",
|
||||
"autoscaling:DescribeTags",
|
||||
]
|
||||
resources = ["*"]
|
||||
}
|
||||
|
||||
statement {
|
||||
actions = ["autoscaling:CompleteLifecycleAction"]
|
||||
resources = [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
|
||||
}
|
||||
|
||||
statement {
|
||||
actions = [
|
||||
"sqs:DeleteMessage",
|
||||
"sqs:ReceiveMessage"
|
||||
]
|
||||
resources = [module.aws_node_termination_handler_sqs.sqs_queue_arn]
|
||||
}
|
||||
}
|
||||
|
||||
module "aws_node_termination_handler_sqs" {
|
||||
source = "terraform-aws-modules/sqs/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
message_retention_seconds = 300
|
||||
policy = data.aws_iam_policy_document.aws_node_termination_handler_sqs.json
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "aws_node_termination_handler_sqs" {
|
||||
statement {
|
||||
actions = ["sqs:SendMessage"]
|
||||
resources = ["arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.name}"]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = [
|
||||
"events.amazonaws.com",
|
||||
"sqs.amazonaws.com",
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_asg" {
|
||||
name = "${local.name}-asg-termination"
|
||||
description = "Node termination event rule"
|
||||
|
||||
event_pattern = jsonencode({
|
||||
"source" : ["aws.autoscaling"],
|
||||
"detail-type" : ["EC2 Instance-terminate Lifecycle Action"]
|
||||
"resources" : [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
|
||||
})
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_target" "aws_node_termination_handler_asg" {
|
||||
target_id = "${local.name}-asg-termination"
|
||||
rule = aws_cloudwatch_event_rule.aws_node_termination_handler_asg.name
|
||||
arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_rule" "aws_node_termination_handler_spot" {
|
||||
name = "${local.name}-spot-termination"
|
||||
description = "Node termination event rule"
|
||||
event_pattern = jsonencode({
|
||||
"source" : ["aws.ec2"],
|
||||
"detail-type" : ["EC2 Spot Instance Interruption Warning"]
|
||||
"resources" : [for group in module.eks.self_managed_node_groups : group.autoscaling_group_arn]
|
||||
})
|
||||
}
|
||||
|
||||
resource "aws_cloudwatch_event_target" "aws_node_termination_handler_spot" {
|
||||
target_id = "${local.name}-spot-termination"
|
||||
rule = aws_cloudwatch_event_rule.aws_node_termination_handler_spot.name
|
||||
arn = module.aws_node_termination_handler_sqs.sqs_queue_arn
|
||||
}
|
||||
|
||||
# Creating the lifecycle-hook outside of the ASG resource's `initial_lifecycle_hook`
|
||||
# ensures that node termination does not require the lifecycle action to be completed,
|
||||
# and thus allows the ASG to be destroyed cleanly.
|
||||
resource "aws_autoscaling_lifecycle_hook" "aws_node_termination_handler" {
|
||||
for_each = module.eks.self_managed_node_groups
|
||||
|
||||
name = "aws-node-termination-handler-${each.value.autoscaling_group_name}"
|
||||
autoscaling_group_name = each.value.autoscaling_group_name
|
||||
lifecycle_transition = "autoscaling:EC2_INSTANCE_TERMINATING"
|
||||
heartbeat_timeout = 300
|
||||
default_result = "CONTINUE"
|
||||
}
|
||||
203
examples/irsa_autoscale_refresh/main.tf
Normal file
203
examples/irsa_autoscale_refresh/main.tf
Normal file
@@ -0,0 +1,203 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.21"
|
||||
region = "eu-west-1"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
# Self Managed Node Group(s)
|
||||
self_managed_node_groups = {
|
||||
refresh = {
|
||||
max_size = 5
|
||||
desired_size = 1
|
||||
|
||||
instance_type = "m5.large"
|
||||
|
||||
instance_refresh = {
|
||||
strategy = "Rolling"
|
||||
preferences = {
|
||||
checkpoint_delay = 600
|
||||
checkpoint_percentages = [35, 70, 100]
|
||||
instance_warmup = 300
|
||||
min_healthy_percentage = 50
|
||||
}
|
||||
triggers = ["tag"]
|
||||
}
|
||||
|
||||
propogate_tags = [{
|
||||
key = "aws-node-termination-handler/managed"
|
||||
value = true
|
||||
propagate_at_launch = true
|
||||
}]
|
||||
}
|
||||
|
||||
mixed_instance = {
|
||||
use_mixed_instances_policy = true
|
||||
mixed_instances_policy = {
|
||||
instances_distribution = {
|
||||
on_demand_base_capacity = 0
|
||||
on_demand_percentage_above_base_capacity = 10
|
||||
spot_allocation_strategy = "capacity-optimized"
|
||||
}
|
||||
|
||||
override = [
|
||||
{
|
||||
instance_type = "m5.large"
|
||||
weighted_capacity = "1"
|
||||
},
|
||||
{
|
||||
instance_type = "m6i.large"
|
||||
weighted_capacity = "2"
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
propogate_tags = [{
|
||||
key = "aws-node-termination-handler/managed"
|
||||
value = true
|
||||
propagate_at_launch = true
|
||||
}]
|
||||
}
|
||||
|
||||
spot = {
|
||||
instance_type = "m5.large"
|
||||
instance_market_options = {
|
||||
market_type = "spot"
|
||||
}
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
propogate_tags = [{
|
||||
key = "aws-node-termination-handler/managed"
|
||||
value = true
|
||||
propagate_at_launch = true
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
tags = merge(local.tags, { Foo = "bar" })
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# aws-auth configmap
|
||||
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
||||
# so we need to ensure fargate profiles and self-managed node roles are added
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster_auth" "this" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
locals {
|
||||
kubeconfig = yamlencode({
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
current-context = "terraform"
|
||||
clusters = [{
|
||||
name = module.eks.cluster_id
|
||||
cluster = {
|
||||
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
||||
server = module.eks.cluster_endpoint
|
||||
}
|
||||
}]
|
||||
contexts = [{
|
||||
name = "terraform"
|
||||
context = {
|
||||
cluster = module.eks.cluster_id
|
||||
user = "terraform"
|
||||
}
|
||||
}]
|
||||
users = [{
|
||||
name = "terraform"
|
||||
user = {
|
||||
token = data.aws_eks_cluster_auth.this.token
|
||||
}
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
resource "null_resource" "apply" {
|
||||
triggers = {
|
||||
kubeconfig = base64encode(local.kubeconfig)
|
||||
cmd_patch = <<-EOT
|
||||
kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
EOT
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = self.triggers.kubeconfig
|
||||
}
|
||||
command = self.triggers.cmd_patch
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
enable_flow_log = true
|
||||
create_flow_log_cloudwatch_iam_role = true
|
||||
create_flow_log_cloudwatch_log_group = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
167
examples/irsa_autoscale_refresh/outputs.tf
Normal file
167
examples/irsa_autoscale_refresh/outputs.tf
Normal file
@@ -0,0 +1,167 @@
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Base64 encoded certificate data required to communicate with the cluster"
|
||||
value = module.eks.cluster_certificate_authority_data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for your Kubernetes API server"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "cluster_platform_version" {
|
||||
description = "Platform version for the cluster"
|
||||
value = module.eks.cluster_platform_version
|
||||
}
|
||||
|
||||
output "cluster_status" {
|
||||
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
|
||||
value = module.eks.cluster_status
|
||||
}
|
||||
|
||||
output "cluster_primary_security_group_id" {
|
||||
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
|
||||
value = module.eks.cluster_primary_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Security Group
|
||||
################################################################################
|
||||
|
||||
output "cluster_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the cluster security group"
|
||||
value = module.eks.cluster_security_group_arn
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "ID of the cluster security group"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Node Security Group
|
||||
################################################################################
|
||||
|
||||
output "node_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the node shared security group"
|
||||
value = module.eks.node_security_group_arn
|
||||
}
|
||||
|
||||
output "node_security_group_id" {
|
||||
description = "ID of the node shared security group"
|
||||
value = module.eks.node_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IRSA
|
||||
################################################################################
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
|
||||
value = module.eks.oidc_provider_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "cluster_iam_role_unique_id" {
|
||||
description = "Stable and unique string identifying the IAM role"
|
||||
value = module.eks.cluster_iam_role_unique_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
output "cluster_addons" {
|
||||
description = "Map of attribute maps for all EKS cluster addons enabled"
|
||||
value = module.eks.cluster_addons
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
################################################################################
|
||||
|
||||
output "cluster_identity_providers" {
|
||||
description = "Map of attribute maps for all EKS identity providers enabled"
|
||||
value = module.eks.cluster_identity_providers
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CloudWatch Log Group
|
||||
################################################################################
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_name
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_arn" {
|
||||
description = "Arn of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Fargate Profile
|
||||
################################################################################
|
||||
|
||||
output "fargate_profiles" {
|
||||
description = "Map of attribute maps for all EKS Fargate Profiles created"
|
||||
value = module.eks.fargate_profiles
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "eks_managed_node_groups" {
|
||||
description = "Map of attribute maps for all EKS managed node groups created"
|
||||
value = module.eks.eks_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Self Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "self_managed_node_groups" {
|
||||
description = "Map of attribute maps for all self managed node groups created"
|
||||
value = module.eks.self_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional
|
||||
################################################################################
|
||||
|
||||
output "aws_auth_configmap_yaml" {
|
||||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
|
||||
value = module.eks.aws_auth_configmap_yaml
|
||||
}
|
||||
18
examples/irsa_autoscale_refresh/versions.tf
Normal file
18
examples/irsa_autoscale_refresh/versions.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
version = ">= 3.0"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = ">= 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
# Launch templates example
|
||||
|
||||
This is EKS example using workers launch template with worker groups feature.
|
||||
|
||||
See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,134 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "launch_template-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
worker_groups_launch_template = [
|
||||
{
|
||||
name = "worker-group-1"
|
||||
instance_type = "t3.small"
|
||||
asg_desired_capacity = 2
|
||||
public_ip = true
|
||||
tags = [{
|
||||
key = "ExtraTag"
|
||||
value = "TagValue"
|
||||
propagate_at_launch = true
|
||||
}]
|
||||
},
|
||||
{
|
||||
name = "worker-group-2"
|
||||
instance_type = "t3.medium"
|
||||
asg_desired_capacity = 1
|
||||
public_ip = true
|
||||
ebs_optimized = true
|
||||
},
|
||||
{
|
||||
name = "worker-group-3"
|
||||
instance_type = "t2.large"
|
||||
asg_desired_capacity = 1
|
||||
public_ip = true
|
||||
elastic_inference_accelerator = "eia2.medium"
|
||||
},
|
||||
{
|
||||
name = "worker-group-4"
|
||||
instance_type = "t3.small"
|
||||
asg_desired_capacity = 1
|
||||
public_ip = true
|
||||
root_volume_size = 150
|
||||
root_volume_type = "gp3"
|
||||
root_volume_throughput = 300
|
||||
additional_ebs_volumes = [
|
||||
{
|
||||
block_device_name = "/dev/xvdb"
|
||||
volume_size = 100
|
||||
volume_type = "gp3"
|
||||
throughput = 150
|
||||
},
|
||||
]
|
||||
},
|
||||
]
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
yum update -y
|
||||
@@ -1,22 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,70 +0,0 @@
|
||||
# Launch template with managed groups example
|
||||
|
||||
This is EKS example using workers custom launch template with managed groups feature in two different ways:
|
||||
|
||||
- Using a defined existing launch template created outside module
|
||||
- Using dlaunch template which will be created by module with user customization
|
||||
|
||||
See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_iam_service_linked_role.autoscaling](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_service_linked_role) | resource |
|
||||
| [aws_launch_template.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/launch_template) | resource |
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,78 +0,0 @@
|
||||
# if you have used ASGs before, that role got auto-created already and you need to import to TF state
|
||||
resource "aws_iam_service_linked_role" "autoscaling" {
|
||||
aws_service_name = "autoscaling.amazonaws.com"
|
||||
description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling"
|
||||
custom_suffix = "lt_with_managed_node_groups" # the full name is "AWSServiceRoleForAutoScaling_lt_with_managed_node_groups" < 64 characters
|
||||
}
|
||||
|
||||
#data "aws_caller_identity" "current" {}
|
||||
#
|
||||
## This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
|
||||
#data "aws_iam_policy_document" "ebs_decryption" {
|
||||
# # Copy of default KMS policy that lets you manage it
|
||||
# statement {
|
||||
# sid = "Enable IAM User Permissions"
|
||||
# effect = "Allow"
|
||||
#
|
||||
# principals {
|
||||
# type = "AWS"
|
||||
# identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
|
||||
# }
|
||||
#
|
||||
# actions = [
|
||||
# "kms:*"
|
||||
# ]
|
||||
#
|
||||
# resources = ["*"]
|
||||
# }
|
||||
#
|
||||
# # Required for EKS
|
||||
# statement {
|
||||
# sid = "Allow service-linked role use of the CMK"
|
||||
# effect = "Allow"
|
||||
#
|
||||
# principals {
|
||||
# type = "AWS"
|
||||
# identifiers = [
|
||||
# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# actions = [
|
||||
# "kms:Encrypt",
|
||||
# "kms:Decrypt",
|
||||
# "kms:ReEncrypt*",
|
||||
# "kms:GenerateDataKey*",
|
||||
# "kms:DescribeKey"
|
||||
# ]
|
||||
#
|
||||
# resources = ["*"]
|
||||
# }
|
||||
#
|
||||
# statement {
|
||||
# sid = "Allow attachment of persistent resources"
|
||||
# effect = "Allow"
|
||||
#
|
||||
# principals {
|
||||
# type = "AWS"
|
||||
# identifiers = [
|
||||
# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# actions = [
|
||||
# "kms:CreateGrant"
|
||||
# ]
|
||||
#
|
||||
# resources = ["*"]
|
||||
#
|
||||
# condition {
|
||||
# test = "Bool"
|
||||
# variable = "kms:GrantIsForAWSResource"
|
||||
# values = ["true"]
|
||||
# }
|
||||
#
|
||||
# }
|
||||
#}
|
||||
@@ -1,98 +0,0 @@
|
||||
#data "template_file" "launch_template_userdata" {
|
||||
# template = file("${path.module}/templates/userdata.sh.tpl")
|
||||
#
|
||||
# vars = {
|
||||
# cluster_name = local.name
|
||||
# endpoint = module.eks.cluster_endpoint
|
||||
# cluster_auth_base64 = module.eks.cluster_certificate_authority_data
|
||||
#
|
||||
# bootstrap_extra_args = ""
|
||||
# kubelet_extra_args = ""
|
||||
# }
|
||||
#}
|
||||
|
||||
# This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx)
|
||||
# there are several more options one could set but you probably dont need to modify them
|
||||
# you can take the default and add your custom AMI and/or custom tags
|
||||
#
|
||||
# Trivia: AWS transparently creates a copy of your LaunchTemplate and actually uses that copy then for the node group. If you DONT use a custom AMI,
|
||||
# then the default user-data for bootstrapping a cluster is merged in the copy.
|
||||
|
||||
resource "aws_launch_template" "default" {
|
||||
name_prefix = "eks-example-"
|
||||
description = "Default Launch-Template"
|
||||
update_default_version = true
|
||||
|
||||
block_device_mappings {
|
||||
device_name = "/dev/xvda"
|
||||
|
||||
ebs {
|
||||
volume_size = 100
|
||||
volume_type = "gp2"
|
||||
delete_on_termination = true
|
||||
# encrypted = true
|
||||
|
||||
# Enable this if you want to encrypt your node root volumes with a KMS/CMK. encryption of PVCs is handled via k8s StorageClass tho
|
||||
# you also need to attach data.aws_iam_policy_document.ebs_decryption.json from the disk_encryption_policy.tf to the KMS/CMK key then !!
|
||||
# kms_key_id = var.kms_key_arn
|
||||
}
|
||||
}
|
||||
|
||||
monitoring {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
network_interfaces {
|
||||
associate_public_ip_address = false
|
||||
delete_on_termination = true
|
||||
security_groups = [module.eks.worker_security_group_id]
|
||||
}
|
||||
|
||||
# if you want to use a custom AMI
|
||||
# image_id = var.ami_id
|
||||
|
||||
# If you use a custom AMI, you need to supply via user-data, the bootstrap script as EKS DOESNT merge its managed user-data then
|
||||
# you can add more than the minimum code you see in the template, e.g. install SSM agent, see https://github.com/aws/containers-roadmap/issues/593#issuecomment-577181345
|
||||
#
|
||||
# (optionally you can use https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/cloudinit_config to render the script, example: https://github.com/terraform-aws-modules/terraform-aws-eks/pull/997#issuecomment-705286151)
|
||||
|
||||
# user_data = base64encode(
|
||||
# data.template_file.launch_template_userdata.rendered,
|
||||
# )
|
||||
|
||||
# Supplying custom tags to EKS instances is another use-case for LaunchTemplates
|
||||
tag_specifications {
|
||||
resource_type = "instance"
|
||||
|
||||
tags = {
|
||||
CustomTag = "Instance custom tag"
|
||||
}
|
||||
}
|
||||
|
||||
# Supplying custom tags to EKS instances root volumes is another use-case for LaunchTemplates. (doesnt add tags to dynamically provisioned volumes via PVC)
|
||||
tag_specifications {
|
||||
resource_type = "volume"
|
||||
|
||||
tags = {
|
||||
CustomTag = "Volume custom tag"
|
||||
}
|
||||
}
|
||||
|
||||
# Supplying custom tags to EKS instances ENI's is another use-case for LaunchTemplates
|
||||
tag_specifications {
|
||||
resource_type = "network-interface"
|
||||
|
||||
tags = {
|
||||
CustomTag = "EKS example"
|
||||
}
|
||||
}
|
||||
|
||||
# Tag the LT itself
|
||||
tags = {
|
||||
CustomTag = "Launch template custom tag"
|
||||
}
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
}
|
||||
@@ -1,149 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "lt_with_mng-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
node_groups = {
|
||||
# use arleady defined launch template
|
||||
example1 = {
|
||||
name_prefix = "example1"
|
||||
desired_capacity = 1
|
||||
max_capacity = 15
|
||||
min_capacity = 1
|
||||
|
||||
launch_template_id = aws_launch_template.default.id
|
||||
launch_template_version = aws_launch_template.default.default_version
|
||||
|
||||
instance_types = ["t3.small"]
|
||||
|
||||
additional_tags = {
|
||||
ExtraTag = "example1"
|
||||
}
|
||||
}
|
||||
# create launch template
|
||||
example2 = {
|
||||
create_launch_template = true
|
||||
desired_capacity = 1
|
||||
max_capacity = 10
|
||||
min_capacity = 1
|
||||
|
||||
disk_size = 50
|
||||
disk_type = "gp3"
|
||||
disk_throughput = 150
|
||||
disk_iops = 3000
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
capacity_type = "SPOT"
|
||||
|
||||
bootstrap_env = {
|
||||
CONTAINER_RUNTIME = "containerd"
|
||||
USE_MAX_PODS = false
|
||||
}
|
||||
kubelet_extra_args = "--max-pods=110"
|
||||
k8s_labels = {
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
additional_tags = {
|
||||
ExtraTag = "example2"
|
||||
}
|
||||
taints = [
|
||||
{
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
@@ -1,12 +0,0 @@
|
||||
MIME-Version: 1.0
|
||||
Content-Type: multipart/mixed; boundary="//"
|
||||
|
||||
--//
|
||||
Content-Type: text/x-shellscript; charset="us-ascii"
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# Bootstrap and join the cluster
|
||||
/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' ${bootstrap_extra_args} --kubelet-extra-args "${kubelet_extra_args}" '${cluster_name}'
|
||||
|
||||
--//--
|
||||
@@ -1,22 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,73 +0,0 @@
|
||||
# Managed groups example
|
||||
|
||||
This is EKS example using managed groups feature in two different ways:
|
||||
|
||||
- Using SPOT instances in node group
|
||||
- Using ON_DEMAND instance in node group
|
||||
|
||||
See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
| Name | Description | Type | Default | Required |
|
||||
|------|-------------|------|---------|:--------:|
|
||||
| <a name="input_map_accounts"></a> [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | <pre>[<br> "777777777777",<br> "888888888888"<br>]</pre> | no |
|
||||
| <a name="input_map_roles"></a> [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. | <pre>list(object({<br> rolearn = string<br> username = string<br> groups = list(string)<br> }))</pre> | <pre>[<br> {<br> "groups": [<br> "system:masters"<br> ],<br> "rolearn": "arn:aws:iam::66666666666:role/role1",<br> "username": "role1"<br> }<br>]</pre> | no |
|
||||
| <a name="input_map_users"></a> [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. | <pre>list(object({<br> userarn = string<br> username = string<br> groups = list(string)<br> }))</pre> | <pre>[<br> {<br> "groups": [<br> "system:masters"<br> ],<br> "userarn": "arn:aws:iam::66666666666:user/user1",<br> "username": "user1"<br> },<br> {<br> "groups": [<br> "system:masters"<br> ],<br> "userarn": "arn:aws:iam::66666666666:user/user2",<br> "username": "user2"<br> }<br>]</pre> | no |
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
| <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | Outputs from node groups |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,148 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "managed_node_groups-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
node_groups_defaults = {
|
||||
ami_type = "AL2_x86_64"
|
||||
disk_size = 50
|
||||
}
|
||||
|
||||
node_groups = {
|
||||
example = {
|
||||
desired_capacity = 1
|
||||
max_capacity = 10
|
||||
min_capacity = 1
|
||||
|
||||
instance_types = ["t3.large"]
|
||||
capacity_type = "SPOT"
|
||||
k8s_labels = {
|
||||
Example = "managed_node_groups"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
additional_tags = {
|
||||
ExtraTag = "example"
|
||||
}
|
||||
taints = [
|
||||
{
|
||||
key = "dedicated"
|
||||
value = "gpuGroup"
|
||||
effect = "NO_SCHEDULE"
|
||||
}
|
||||
]
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
}
|
||||
example2 = {
|
||||
desired_capacity = 1
|
||||
max_capacity = 10
|
||||
min_capacity = 1
|
||||
|
||||
instance_types = ["t3.medium"]
|
||||
k8s_labels = {
|
||||
Example = "managed_node_groups"
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
additional_tags = {
|
||||
ExtraTag = "example2"
|
||||
}
|
||||
update_config = {
|
||||
max_unavailable_percentage = 50 # or set `max_unavailable`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
map_roles = var.map_roles
|
||||
map_users = var.map_users
|
||||
map_accounts = var.map_accounts
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
|
||||
output "node_groups" {
|
||||
description = "Outputs from node groups"
|
||||
value = module.eks.node_groups
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
variable "map_accounts" {
|
||||
description = "Additional AWS account numbers to add to the aws-auth configmap."
|
||||
type = list(string)
|
||||
|
||||
default = [
|
||||
"777777777777",
|
||||
"888888888888",
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_roles" {
|
||||
description = "Additional IAM roles to add to the aws-auth configmap."
|
||||
type = list(object({
|
||||
rolearn = string
|
||||
username = string
|
||||
groups = list(string)
|
||||
}))
|
||||
|
||||
default = [
|
||||
{
|
||||
rolearn = "arn:aws:iam::66666666666:role/role1"
|
||||
username = "role1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
variable "map_users" {
|
||||
description = "Additional IAM users to add to the aws-auth configmap."
|
||||
type = list(object({
|
||||
userarn = string
|
||||
username = string
|
||||
groups = list(string)
|
||||
}))
|
||||
|
||||
default = [
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user1"
|
||||
username = "user1"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
{
|
||||
userarn = "arn:aws:iam::66666666666:user/user2"
|
||||
username = "user2"
|
||||
groups = ["system:masters"]
|
||||
},
|
||||
]
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
# Managed groups example
|
||||
|
||||
This is EKS using [secrets encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/) feature.
|
||||
|
||||
See [the official blog](https://aws.amazon.com/blogs/containers/using-eks-encryption-provider-support-for-defense-in-depth/) for more details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.56 |
|
||||
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | >= 1.11.1 |
|
||||
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
|
||||
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.56 |
|
||||
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
|
||||
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
|
||||
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
|
||||
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
|
||||
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
|
||||
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
@@ -1,126 +0,0 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "secrets_encryption-${random_string.suffix.result}"
|
||||
cluster_version = "1.20"
|
||||
region = "eu-west-1"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnets = module.vpc.private_subnets
|
||||
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
|
||||
cluster_encryption_config = [
|
||||
{
|
||||
provider_key_arn = aws_kms_key.eks.arn
|
||||
resources = ["secrets"]
|
||||
}
|
||||
]
|
||||
|
||||
worker_groups = [
|
||||
{
|
||||
name = "worker-group-1"
|
||||
instance_type = "t3.small"
|
||||
additional_userdata = "echo foo bar"
|
||||
asg_desired_capacity = 2
|
||||
},
|
||||
]
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Kubernetes provider configuration
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
data "aws_eks_cluster_auth" "cluster" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
provider "kubernetes" {
|
||||
host = data.aws_eks_cluster.cluster.endpoint
|
||||
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
|
||||
token = data.aws_eks_cluster_auth.cluster.token
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# KMS for encrypting secrets
|
||||
################################################################################
|
||||
|
||||
resource "aws_kms_key" "eks" {
|
||||
description = "EKS Secret Encryption Key"
|
||||
deletion_window_in_days = 7
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
data "aws_availability_zones" "available" {
|
||||
}
|
||||
|
||||
resource "random_string" "suffix" {
|
||||
length = 8
|
||||
special = false
|
||||
}
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
azs = data.aws_availability_zones.available.names
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = "1"
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = "1"
|
||||
}
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for EKS control plane."
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
description = "kubectl config as generated by the module."
|
||||
value = module.eks.kubeconfig
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = module.eks.config_map_aws_auth
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.56"
|
||||
}
|
||||
local = {
|
||||
source = "hashicorp/local"
|
||||
version = ">= 1.4"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = ">= 1.11.1"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = ">= 2.1"
|
||||
}
|
||||
}
|
||||
}
|
||||
95
examples/self_managed_node_group/README.md
Normal file
95
examples/self_managed_node_group/README.md
Normal file
@@ -0,0 +1,95 @@
|
||||
# Self Managed Node Groups Example
|
||||
|
||||
Configuration in this directory creates an AWS EKS cluster with various Self Managed Node Groups (AutoScaling Groups) demonstrating the various methods of configuring/customizing:
|
||||
|
||||
- A default, "out of the box" self managed node group as supplied by the `self-managed-node-group` sub-module
|
||||
- A Bottlerocket self managed node group that demonstrates many of the configuration/customizations offered by the `self-manged-node-group` sub-module for the Bottlerocket OS
|
||||
- A self managed node group that demonstrates nearly all of the configurations/customizations offered by the `self-managed-node-group` sub-module
|
||||
|
||||
See the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for further details.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
| <a name="requirement_null"></a> [null](#requirement\_null) | >= 3.0 |
|
||||
| <a name="requirement_tls"></a> [tls](#requirement\_tls) | >= 2.2 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.64 |
|
||||
| <a name="provider_null"></a> [null](#provider\_null) | >= 3.0 |
|
||||
| <a name="provider_tls"></a> [tls](#provider\_tls) | >= 2.2 |
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | n/a |
|
||||
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [aws_key_pair.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource |
|
||||
| [aws_kms_key.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_kms_key.eks](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/kms_key) | resource |
|
||||
| [aws_security_group.additional](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource |
|
||||
| [null_resource.apply](https://registry.terraform.io/providers/hashicorp/null/latest/docs/resources/resource) | resource |
|
||||
| [tls_private_key.this](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource |
|
||||
| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source |
|
||||
| [aws_caller_identity.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/caller_identity) | data source |
|
||||
| [aws_eks_cluster_auth.this](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
|
||||
| [aws_iam_policy_document.ebs](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_aws_auth_configmap_yaml"></a> [aws\_auth\_configmap\_yaml](#output\_aws\_auth\_configmap\_yaml) | Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles |
|
||||
| <a name="output_cloudwatch_log_group_arn"></a> [cloudwatch\_log\_group\_arn](#output\_cloudwatch\_log\_group\_arn) | Arn of cloudwatch log group created |
|
||||
| <a name="output_cloudwatch_log_group_name"></a> [cloudwatch\_log\_group\_name](#output\_cloudwatch\_log\_group\_name) | Name of cloudwatch log group created |
|
||||
| <a name="output_cluster_addons"></a> [cluster\_addons](#output\_cluster\_addons) | Map of attribute maps for all EKS cluster addons enabled |
|
||||
| <a name="output_cluster_arn"></a> [cluster\_arn](#output\_cluster\_arn) | The Amazon Resource Name (ARN) of the cluster |
|
||||
| <a name="output_cluster_certificate_authority_data"></a> [cluster\_certificate\_authority\_data](#output\_cluster\_certificate\_authority\_data) | Base64 encoded certificate data required to communicate with the cluster |
|
||||
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for your Kubernetes API server |
|
||||
| <a name="output_cluster_iam_role_arn"></a> [cluster\_iam\_role\_arn](#output\_cluster\_iam\_role\_arn) | IAM role ARN of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_name"></a> [cluster\_iam\_role\_name](#output\_cluster\_iam\_role\_name) | IAM role name of the EKS cluster |
|
||||
| <a name="output_cluster_iam_role_unique_id"></a> [cluster\_iam\_role\_unique\_id](#output\_cluster\_iam\_role\_unique\_id) | Stable and unique string identifying the IAM role |
|
||||
| <a name="output_cluster_id"></a> [cluster\_id](#output\_cluster\_id) | The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready |
|
||||
| <a name="output_cluster_identity_providers"></a> [cluster\_identity\_providers](#output\_cluster\_identity\_providers) | Map of attribute maps for all EKS identity providers enabled |
|
||||
| <a name="output_cluster_oidc_issuer_url"></a> [cluster\_oidc\_issuer\_url](#output\_cluster\_oidc\_issuer\_url) | The URL on the EKS cluster for the OpenID Connect identity provider |
|
||||
| <a name="output_cluster_platform_version"></a> [cluster\_platform\_version](#output\_cluster\_platform\_version) | Platform version for the cluster |
|
||||
| <a name="output_cluster_primary_security_group_id"></a> [cluster\_primary\_security\_group\_id](#output\_cluster\_primary\_security\_group\_id) | Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console |
|
||||
| <a name="output_cluster_security_group_arn"></a> [cluster\_security\_group\_arn](#output\_cluster\_security\_group\_arn) | Amazon Resource Name (ARN) of the cluster security group |
|
||||
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | ID of the cluster security group |
|
||||
| <a name="output_cluster_status"></a> [cluster\_status](#output\_cluster\_status) | Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED` |
|
||||
| <a name="output_eks_managed_node_groups"></a> [eks\_managed\_node\_groups](#output\_eks\_managed\_node\_groups) | Map of attribute maps for all EKS managed node groups created |
|
||||
| <a name="output_fargate_profiles"></a> [fargate\_profiles](#output\_fargate\_profiles) | Map of attribute maps for all EKS Fargate Profiles created |
|
||||
| <a name="output_node_security_group_arn"></a> [node\_security\_group\_arn](#output\_node\_security\_group\_arn) | Amazon Resource Name (ARN) of the node shared security group |
|
||||
| <a name="output_node_security_group_id"></a> [node\_security\_group\_id](#output\_node\_security\_group\_id) | ID of the node shared security group |
|
||||
| <a name="output_oidc_provider_arn"></a> [oidc\_provider\_arn](#output\_oidc\_provider\_arn) | The ARN of the OIDC Provider if `enable_irsa = true` |
|
||||
| <a name="output_self_managed_node_groups"></a> [self\_managed\_node\_groups](#output\_self\_managed\_node\_groups) | Map of attribute maps for all self managed node groups created |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
392
examples/self_managed_node_group/main.tf
Normal file
392
examples/self_managed_node_group/main.tf
Normal file
@@ -0,0 +1,392 @@
|
||||
provider "aws" {
|
||||
region = local.region
|
||||
}
|
||||
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
cluster_version = "1.21"
|
||||
region = "eu-west-1"
|
||||
|
||||
tags = {
|
||||
Example = local.name
|
||||
GithubRepo = "terraform-aws-eks"
|
||||
GithubOrg = "terraform-aws-modules"
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_caller_identity" "current" {}
|
||||
|
||||
################################################################################
|
||||
# EKS Module
|
||||
################################################################################
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_version = local.cluster_version
|
||||
cluster_endpoint_private_access = true
|
||||
cluster_endpoint_public_access = true
|
||||
|
||||
cluster_addons = {
|
||||
coredns = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
kube-proxy = {}
|
||||
vpc-cni = {
|
||||
resolve_conflicts = "OVERWRITE"
|
||||
}
|
||||
}
|
||||
|
||||
cluster_encryption_config = [{
|
||||
provider_key_arn = aws_kms_key.eks.arn
|
||||
resources = ["secrets"]
|
||||
}]
|
||||
|
||||
vpc_id = module.vpc.vpc_id
|
||||
subnet_ids = module.vpc.private_subnets
|
||||
|
||||
enable_irsa = true
|
||||
|
||||
self_managed_node_group_defaults = {
|
||||
disk_size = 50
|
||||
}
|
||||
|
||||
self_managed_node_groups = {
|
||||
# Default node group - as provisioned by the module defaults
|
||||
default_node_group = {}
|
||||
|
||||
# Bottlerocket node group
|
||||
bottlerocket = {
|
||||
name = "bottlerocket-self-mng"
|
||||
|
||||
platform = "bottlerocket"
|
||||
ami_id = data.aws_ami.bottlerocket_ami.id
|
||||
instance_type = "m5.large"
|
||||
desired_size = 2
|
||||
key_name = aws_key_pair.this.key_name
|
||||
|
||||
iam_role_additional_policies = ["arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore"]
|
||||
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# The admin host container provides SSH access and runs with "superpowers".
|
||||
# It is disabled by default, but can be disabled explicitly.
|
||||
[settings.host-containers.admin]
|
||||
enabled = false
|
||||
|
||||
# The control host container provides out-of-band access via SSM.
|
||||
# It is enabled by default, and can be disabled if you do not expect to use SSM.
|
||||
# This could leave you with no way to access the API and change settings on an existing node!
|
||||
[settings.host-containers.control]
|
||||
enabled = true
|
||||
|
||||
[settings.kubernetes.node-labels]
|
||||
ingress = "allowed"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Complete
|
||||
complete = {
|
||||
name = "complete-self-mng"
|
||||
use_name_prefix = false
|
||||
|
||||
subnet_ids = module.vpc.public_subnets
|
||||
|
||||
min_size = 1
|
||||
max_size = 7
|
||||
desired_size = 1
|
||||
|
||||
ami_id = "ami-0caf35bc73450c396"
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--max-pods=110'"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
export CONTAINER_RUNTIME="containerd"
|
||||
export USE_MAX_PODS=false
|
||||
EOT
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "you are free little kubelet!"
|
||||
EOT
|
||||
|
||||
disk_size = 256
|
||||
instance_type = "m6i.large"
|
||||
|
||||
launch_template_name = "self-managed-ex"
|
||||
launch_template_use_name_prefix = true
|
||||
launch_template_description = "Self managed node group example launch template"
|
||||
|
||||
ebs_optimized = true
|
||||
vpc_security_group_ids = [aws_security_group.additional.id]
|
||||
enable_monitoring = true
|
||||
|
||||
block_device_mappings = {
|
||||
xvda = {
|
||||
device_name = "/dev/xvda"
|
||||
ebs = {
|
||||
volume_size = 75
|
||||
volume_type = "gp3"
|
||||
iops = 3000
|
||||
throughput = 150
|
||||
encrypted = true
|
||||
kms_key_id = aws_kms_key.ebs.arn
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
metadata_options = {
|
||||
http_endpoint = "enabled"
|
||||
http_tokens = "required"
|
||||
http_put_response_hop_limit = 2
|
||||
}
|
||||
|
||||
create_iam_role = true
|
||||
iam_role_name = "self-managed-node-group-complete-example"
|
||||
iam_role_use_name_prefix = false
|
||||
iam_role_description = "Self managed node group complete example role"
|
||||
iam_role_tags = {
|
||||
Purpose = "Protector of the kubelet"
|
||||
}
|
||||
iam_role_additional_policies = [
|
||||
"arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
]
|
||||
|
||||
create_security_group = true
|
||||
security_group_name = "self-managed-node-group-complete-example"
|
||||
security_group_use_name_prefix = false
|
||||
security_group_description = "Self managed node group complete example security group"
|
||||
security_group_rules = {
|
||||
phoneOut = {
|
||||
description = "Hello CloudFlare"
|
||||
protocol = "udp"
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
type = "egress"
|
||||
cidr_blocks = ["1.1.1.1/32"]
|
||||
}
|
||||
phoneHome = {
|
||||
description = "Hello cluster"
|
||||
protocol = "udp"
|
||||
from_port = 53
|
||||
to_port = 53
|
||||
type = "egress"
|
||||
source_cluster_security_group = true # bit of reflection lookup
|
||||
}
|
||||
}
|
||||
security_group_tags = {
|
||||
Purpose = "Protector of the kubelet"
|
||||
}
|
||||
|
||||
timeouts = {
|
||||
create = "80m"
|
||||
update = "80m"
|
||||
delete = "80m"
|
||||
}
|
||||
|
||||
tags = {
|
||||
ExtraTag = "Self managed node group complete example"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# aws-auth configmap
|
||||
# Only EKS managed node groups automatically add roles to aws-auth configmap
|
||||
# so we need to ensure fargate profiles and self-managed node roles are added
|
||||
################################################################################
|
||||
|
||||
data "aws_eks_cluster_auth" "this" {
|
||||
name = module.eks.cluster_id
|
||||
}
|
||||
|
||||
locals {
|
||||
kubeconfig = yamlencode({
|
||||
apiVersion = "v1"
|
||||
kind = "Config"
|
||||
current-context = "terraform"
|
||||
clusters = [{
|
||||
name = module.eks.cluster_id
|
||||
cluster = {
|
||||
certificate-authority-data = module.eks.cluster_certificate_authority_data
|
||||
server = module.eks.cluster_endpoint
|
||||
}
|
||||
}]
|
||||
contexts = [{
|
||||
name = "terraform"
|
||||
context = {
|
||||
cluster = module.eks.cluster_id
|
||||
user = "terraform"
|
||||
}
|
||||
}]
|
||||
users = [{
|
||||
name = "terraform"
|
||||
user = {
|
||||
token = data.aws_eks_cluster_auth.this.token
|
||||
}
|
||||
}]
|
||||
})
|
||||
}
|
||||
|
||||
resource "null_resource" "apply" {
|
||||
triggers = {
|
||||
kubeconfig = base64encode(local.kubeconfig)
|
||||
cmd_patch = <<-EOT
|
||||
kubectl create configmap aws-auth -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
kubectl patch configmap/aws-auth --patch "${module.eks.aws_auth_configmap_yaml}" -n kube-system --kubeconfig <(echo $KUBECONFIG | base64 --decode)
|
||||
EOT
|
||||
}
|
||||
|
||||
provisioner "local-exec" {
|
||||
interpreter = ["/bin/bash", "-c"]
|
||||
environment = {
|
||||
KUBECONFIG = self.triggers.kubeconfig
|
||||
}
|
||||
command = self.triggers.cmd_patch
|
||||
}
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Supporting Resources
|
||||
################################################################################
|
||||
|
||||
module "vpc" {
|
||||
source = "terraform-aws-modules/vpc/aws"
|
||||
version = "~> 3.0"
|
||||
|
||||
name = local.name
|
||||
cidr = "10.0.0.0/16"
|
||||
|
||||
azs = ["${local.region}a", "${local.region}b", "${local.region}c"]
|
||||
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
|
||||
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
|
||||
|
||||
enable_nat_gateway = true
|
||||
single_nat_gateway = true
|
||||
enable_dns_hostnames = true
|
||||
|
||||
enable_flow_log = true
|
||||
create_flow_log_cloudwatch_iam_role = true
|
||||
create_flow_log_cloudwatch_log_group = true
|
||||
|
||||
public_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/elb" = 1
|
||||
}
|
||||
|
||||
private_subnet_tags = {
|
||||
"kubernetes.io/cluster/${local.name}" = "shared"
|
||||
"kubernetes.io/role/internal-elb" = 1
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_security_group" "additional" {
|
||||
name_prefix = "${local.name}-additional"
|
||||
vpc_id = module.vpc.vpc_id
|
||||
|
||||
ingress {
|
||||
from_port = 22
|
||||
to_port = 22
|
||||
protocol = "tcp"
|
||||
cidr_blocks = [
|
||||
"10.0.0.0/8",
|
||||
"172.16.0.0/12",
|
||||
"192.168.0.0/16",
|
||||
]
|
||||
}
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "eks" {
|
||||
description = "EKS Secret Encryption Key"
|
||||
deletion_window_in_days = 7
|
||||
enable_key_rotation = true
|
||||
|
||||
tags = local.tags
|
||||
}
|
||||
|
||||
data "aws_ami" "bottlerocket_ami" {
|
||||
most_recent = true
|
||||
owners = ["amazon"]
|
||||
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["bottlerocket-aws-k8s-${local.cluster_version}-x86_64-*"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "tls_private_key" "this" {
|
||||
algorithm = "RSA"
|
||||
}
|
||||
|
||||
resource "aws_key_pair" "this" {
|
||||
key_name = local.name
|
||||
public_key = tls_private_key.this.public_key_openssh
|
||||
}
|
||||
|
||||
resource "aws_kms_key" "ebs" {
|
||||
description = "Customer managed key to encrypt self managed node group volumes"
|
||||
deletion_window_in_days = 7
|
||||
policy = data.aws_iam_policy_document.ebs.json
|
||||
}
|
||||
|
||||
# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes
|
||||
data "aws_iam_policy_document" "ebs" {
|
||||
# Copy of default KMS policy that lets you manage it
|
||||
statement {
|
||||
sid = "Enable IAM User Permissions"
|
||||
actions = ["kms:*"]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"]
|
||||
}
|
||||
}
|
||||
|
||||
# Required for EKS
|
||||
statement {
|
||||
sid = "Allow service-linked role use of the CMK"
|
||||
actions = [
|
||||
"kms:Encrypt",
|
||||
"kms:Decrypt",
|
||||
"kms:ReEncrypt*",
|
||||
"kms:GenerateDataKey*",
|
||||
"kms:DescribeKey"
|
||||
]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = [
|
||||
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
statement {
|
||||
sid = "Allow attachment of persistent resources"
|
||||
actions = ["kms:CreateGrant"]
|
||||
resources = ["*"]
|
||||
|
||||
principals {
|
||||
type = "AWS"
|
||||
identifiers = [
|
||||
"arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes
|
||||
module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs
|
||||
]
|
||||
}
|
||||
|
||||
condition {
|
||||
test = "Bool"
|
||||
variable = "kms:GrantIsForAWSResource"
|
||||
values = ["true"]
|
||||
}
|
||||
}
|
||||
}
|
||||
167
examples/self_managed_node_group/outputs.tf
Normal file
167
examples/self_managed_node_group/outputs.tf
Normal file
@@ -0,0 +1,167 @@
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
output "cluster_arn" {
|
||||
description = "The Amazon Resource Name (ARN) of the cluster"
|
||||
value = module.eks.cluster_arn
|
||||
}
|
||||
|
||||
output "cluster_certificate_authority_data" {
|
||||
description = "Base64 encoded certificate data required to communicate with the cluster"
|
||||
value = module.eks.cluster_certificate_authority_data
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "Endpoint for your Kubernetes API server"
|
||||
value = module.eks.cluster_endpoint
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready"
|
||||
value = module.eks.cluster_id
|
||||
}
|
||||
|
||||
output "cluster_oidc_issuer_url" {
|
||||
description = "The URL on the EKS cluster for the OpenID Connect identity provider"
|
||||
value = module.eks.cluster_oidc_issuer_url
|
||||
}
|
||||
|
||||
output "cluster_platform_version" {
|
||||
description = "Platform version for the cluster"
|
||||
value = module.eks.cluster_platform_version
|
||||
}
|
||||
|
||||
output "cluster_status" {
|
||||
description = "Status of the EKS cluster. One of `CREATING`, `ACTIVE`, `DELETING`, `FAILED`"
|
||||
value = module.eks.cluster_status
|
||||
}
|
||||
|
||||
output "cluster_primary_security_group_id" {
|
||||
description = "Cluster security group that was created by Amazon EKS for the cluster. Managed node groups use this security group for control-plane-to-data-plane communication. Referred to as 'Cluster security group' in the EKS console"
|
||||
value = module.eks.cluster_primary_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Security Group
|
||||
################################################################################
|
||||
|
||||
output "cluster_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the cluster security group"
|
||||
value = module.eks.cluster_security_group_arn
|
||||
}
|
||||
|
||||
output "cluster_security_group_id" {
|
||||
description = "ID of the cluster security group"
|
||||
value = module.eks.cluster_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Node Security Group
|
||||
################################################################################
|
||||
|
||||
output "node_security_group_arn" {
|
||||
description = "Amazon Resource Name (ARN) of the node shared security group"
|
||||
value = module.eks.node_security_group_arn
|
||||
}
|
||||
|
||||
output "node_security_group_id" {
|
||||
description = "ID of the node shared security group"
|
||||
value = module.eks.node_security_group_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IRSA
|
||||
################################################################################
|
||||
|
||||
output "oidc_provider_arn" {
|
||||
description = "The ARN of the OIDC Provider if `enable_irsa = true`"
|
||||
value = module.eks.oidc_provider_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
output "cluster_iam_role_name" {
|
||||
description = "IAM role name of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_name
|
||||
}
|
||||
|
||||
output "cluster_iam_role_arn" {
|
||||
description = "IAM role ARN of the EKS cluster"
|
||||
value = module.eks.cluster_iam_role_arn
|
||||
}
|
||||
|
||||
output "cluster_iam_role_unique_id" {
|
||||
description = "Stable and unique string identifying the IAM role"
|
||||
value = module.eks.cluster_iam_role_unique_id
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
output "cluster_addons" {
|
||||
description = "Map of attribute maps for all EKS cluster addons enabled"
|
||||
value = module.eks.cluster_addons
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
################################################################################
|
||||
|
||||
output "cluster_identity_providers" {
|
||||
description = "Map of attribute maps for all EKS identity providers enabled"
|
||||
value = module.eks.cluster_identity_providers
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# CloudWatch Log Group
|
||||
################################################################################
|
||||
|
||||
output "cloudwatch_log_group_name" {
|
||||
description = "Name of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_name
|
||||
}
|
||||
|
||||
output "cloudwatch_log_group_arn" {
|
||||
description = "Arn of cloudwatch log group created"
|
||||
value = module.eks.cloudwatch_log_group_arn
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Fargate Profile
|
||||
################################################################################
|
||||
|
||||
output "fargate_profiles" {
|
||||
description = "Map of attribute maps for all EKS Fargate Profiles created"
|
||||
value = module.eks.fargate_profiles
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# EKS Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "eks_managed_node_groups" {
|
||||
description = "Map of attribute maps for all EKS managed node groups created"
|
||||
value = module.eks.eks_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Self Managed Node Group
|
||||
################################################################################
|
||||
|
||||
output "self_managed_node_groups" {
|
||||
description = "Map of attribute maps for all self managed node groups created"
|
||||
value = module.eks.self_managed_node_groups
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Additional
|
||||
################################################################################
|
||||
|
||||
output "aws_auth_configmap_yaml" {
|
||||
description = "Formatted yaml output for base aws-auth configmap containing roles used in cluster node groups/fargate profiles"
|
||||
value = module.eks.aws_auth_configmap_yaml
|
||||
}
|
||||
18
examples/self_managed_node_group/versions.tf
Normal file
18
examples/self_managed_node_group/versions.tf
Normal file
@@ -0,0 +1,18 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
null = {
|
||||
source = "hashicorp/null"
|
||||
version = ">= 3.0"
|
||||
}
|
||||
tls = {
|
||||
source = "hashicorp/tls"
|
||||
version = ">= 2.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
78
examples/user_data/README.md
Normal file
78
examples/user_data/README.md
Normal file
@@ -0,0 +1,78 @@
|
||||
# Internal User Data Module
|
||||
|
||||
Configuration in this directory render various user data outputs used for testing and validating the internal `_user-data` sub-module.
|
||||
|
||||
## Usage
|
||||
|
||||
To run this example you need to execute:
|
||||
|
||||
```bash
|
||||
$ terraform init
|
||||
$ terraform plan
|
||||
$ terraform apply
|
||||
```
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.64 |
|
||||
|
||||
## Providers
|
||||
|
||||
No providers.
|
||||
|
||||
## Modules
|
||||
|
||||
| Name | Source | Version |
|
||||
|------|--------|---------|
|
||||
| <a name="module_eks_mng_bottlerocket_additional"></a> [eks\_mng\_bottlerocket\_additional](#module\_eks\_mng\_bottlerocket\_additional) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_bottlerocket_custom_ami"></a> [eks\_mng\_bottlerocket\_custom\_ami](#module\_eks\_mng\_bottlerocket\_custom\_ami) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_bottlerocket_custom_template"></a> [eks\_mng\_bottlerocket\_custom\_template](#module\_eks\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_bottlerocket_no_op"></a> [eks\_mng\_bottlerocket\_no\_op](#module\_eks\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_linux_additional"></a> [eks\_mng\_linux\_additional](#module\_eks\_mng\_linux\_additional) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_linux_custom_ami"></a> [eks\_mng\_linux\_custom\_ami](#module\_eks\_mng\_linux\_custom\_ami) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_linux_custom_template"></a> [eks\_mng\_linux\_custom\_template](#module\_eks\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_eks_mng_linux_no_op"></a> [eks\_mng\_linux\_no\_op](#module\_eks\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_bottlerocket_bootstrap"></a> [self\_mng\_bottlerocket\_bootstrap](#module\_self\_mng\_bottlerocket\_bootstrap) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_bottlerocket_custom_template"></a> [self\_mng\_bottlerocket\_custom\_template](#module\_self\_mng\_bottlerocket\_custom\_template) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_bottlerocket_no_op"></a> [self\_mng\_bottlerocket\_no\_op](#module\_self\_mng\_bottlerocket\_no\_op) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_linux_bootstrap"></a> [self\_mng\_linux\_bootstrap](#module\_self\_mng\_linux\_bootstrap) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_linux_custom_template"></a> [self\_mng\_linux\_custom\_template](#module\_self\_mng\_linux\_custom\_template) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_linux_no_op"></a> [self\_mng\_linux\_no\_op](#module\_self\_mng\_linux\_no\_op) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_windows_bootstrap"></a> [self\_mng\_windows\_bootstrap](#module\_self\_mng\_windows\_bootstrap) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_windows_custom_template"></a> [self\_mng\_windows\_custom\_template](#module\_self\_mng\_windows\_custom\_template) | ../../modules/_user_data | n/a |
|
||||
| <a name="module_self_mng_windows_no_op"></a> [self\_mng\_windows\_no\_op](#module\_self\_mng\_windows\_no\_op) | ../../modules/_user_data | n/a |
|
||||
|
||||
## Resources
|
||||
|
||||
No resources.
|
||||
|
||||
## Inputs
|
||||
|
||||
No inputs.
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_eks_mng_bottlerocket_additional"></a> [eks\_mng\_bottlerocket\_additional](#output\_eks\_mng\_bottlerocket\_additional) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_bottlerocket_custom_ami"></a> [eks\_mng\_bottlerocket\_custom\_ami](#output\_eks\_mng\_bottlerocket\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_bottlerocket_custom_template"></a> [eks\_mng\_bottlerocket\_custom\_template](#output\_eks\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_bottlerocket_no_op"></a> [eks\_mng\_bottlerocket\_no\_op](#output\_eks\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_linux_additional"></a> [eks\_mng\_linux\_additional](#output\_eks\_mng\_linux\_additional) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_linux_custom_ami"></a> [eks\_mng\_linux\_custom\_ami](#output\_eks\_mng\_linux\_custom\_ami) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_linux_custom_template"></a> [eks\_mng\_linux\_custom\_template](#output\_eks\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_eks_mng_linux_no_op"></a> [eks\_mng\_linux\_no\_op](#output\_eks\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_bottlerocket_bootstrap"></a> [self\_mng\_bottlerocket\_bootstrap](#output\_self\_mng\_bottlerocket\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_bottlerocket_custom_template"></a> [self\_mng\_bottlerocket\_custom\_template](#output\_self\_mng\_bottlerocket\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_bottlerocket_no_op"></a> [self\_mng\_bottlerocket\_no\_op](#output\_self\_mng\_bottlerocket\_no\_op) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_linux_bootstrap"></a> [self\_mng\_linux\_bootstrap](#output\_self\_mng\_linux\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_linux_custom_template"></a> [self\_mng\_linux\_custom\_template](#output\_self\_mng\_linux\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_linux_no_op"></a> [self\_mng\_linux\_no\_op](#output\_self\_mng\_linux\_no\_op) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_windows_bootstrap"></a> [self\_mng\_windows\_bootstrap](#output\_self\_mng\_windows\_bootstrap) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_windows_custom_template"></a> [self\_mng\_windows\_custom\_template](#output\_self\_mng\_windows\_custom\_template) | Base64 decoded user data rendered for the provided inputs |
|
||||
| <a name="output_self_mng_windows_no_op"></a> [self\_mng\_windows\_no\_op](#output\_self\_mng\_windows\_no\_op) | Base64 decoded user data rendered for the provided inputs |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
289
examples/user_data/main.tf
Normal file
289
examples/user_data/main.tf
Normal file
@@ -0,0 +1,289 @@
|
||||
locals {
|
||||
name = "ex-${replace(basename(path.cwd), "_", "-")}"
|
||||
|
||||
cluster_endpoint = "https://012345678903AB2BAE5D1E0BFE0E2B50.gr7.us-east-1.eks.amazonaws.com"
|
||||
cluster_auth_base64 = "LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM1ekNDQWMrZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKbXFqQ1VqNGdGR2w3ZW5PeWthWnZ2RjROOTVOUEZCM2o0cGhVZUsrWGFtN2ZSQnZya0d6OGxKZmZEZWF2b2plTwpQK2xOZFlqdHZncmxCUEpYdHZIZmFzTzYxVzdIZmdWQ2EvamdRM2w3RmkvL1dpQmxFOG9oWUZkdWpjc0s1SXM2CnNkbk5KTTNYUWN2TysrSitkV09NT2ZlNzlsSWdncmdQLzgvRU9CYkw3eUY1aU1hS3lsb1RHL1V3TlhPUWt3ZUcKblBNcjdiUmdkQ1NCZTlXYXowOGdGRmlxV2FOditsTDhsODBTdFZLcWVNVlUxbjQyejVwOVpQRTd4T2l6L0xTNQpYV2lXWkVkT3pMN0xBWGVCS2gzdkhnczFxMkI2d1BKZnZnS1NzWllQRGFpZTloT1NNOUJkNFNPY3JrZTRYSVBOCkVvcXVhMlYrUDRlTWJEQzhMUkVWRDdCdVZDdWdMTldWOTBoL3VJUy9WU2VOcEdUOGVScE5DakszSjc2aFlsWm8KWjNGRG5QWUY0MWpWTHhiOXF0U1ROdEp6amYwWXBEYnFWci9xZzNmQWlxbVorMzd3YWM1eHlqMDZ4cmlaRUgzZgpUM002d2lCUEVHYVlGeWN5TmNYTk5aYW9DWDJVL0N1d2JsUHAKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQ=="
|
||||
cluster_service_ipv4_cidr = "172.16.0.0/16"
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# User Data Module
|
||||
################################################################################
|
||||
|
||||
# EKS managed node group - linux
|
||||
module "eks_mng_linux_no_op" {
|
||||
source = "../../modules/_user_data"
|
||||
}
|
||||
|
||||
module "eks_mng_linux_additional" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
echo "foo"
|
||||
export FOO=bar
|
||||
EOT
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "All done"
|
||||
EOT
|
||||
}
|
||||
|
||||
module "eks_mng_linux_custom_ami" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
cluster_service_ipv4_cidr = local.cluster_service_ipv4_cidr
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
echo "foo"
|
||||
export FOO=bar
|
||||
EOT
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "All done"
|
||||
EOT
|
||||
}
|
||||
|
||||
|
||||
module "eks_mng_linux_custom_template" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
user_data_template_path = "${path.module}/templates/linux_custom.tpl"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
echo "foo"
|
||||
export FOO=bar
|
||||
EOT
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "All done"
|
||||
EOT
|
||||
}
|
||||
|
||||
# EKS managed node group - bottlerocket
|
||||
module "eks_mng_bottlerocket_no_op" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
}
|
||||
|
||||
module "eks_mng_bottlerocket_additional" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
module "eks_mng_bottlerocket_custom_ami" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
module "eks_mng_bottlerocket_custom_template" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
|
||||
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Self managed node group - linux
|
||||
module "self_mng_linux_no_op" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
is_eks_managed_node_group = false
|
||||
}
|
||||
|
||||
module "self_mng_linux_bootstrap" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
is_eks_managed_node_group = false
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
echo "foo"
|
||||
export FOO=bar
|
||||
EOT
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "All done"
|
||||
EOT
|
||||
}
|
||||
|
||||
module "self_mng_linux_custom_template" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
is_eks_managed_node_group = false
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
user_data_template_path = "${path.module}/templates/linux_custom.tpl"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
echo "foo"
|
||||
export FOO=bar
|
||||
EOT
|
||||
|
||||
bootstrap_extra_args = "--kubelet-extra-args '--node-labels=node.kubernetes.io/lifecycle=spot'"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
echo "All done"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Self managed node group - bottlerocket
|
||||
module "self_mng_bottlerocket_no_op" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
|
||||
is_eks_managed_node_group = false
|
||||
}
|
||||
|
||||
module "self_mng_bottlerocket_bootstrap" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
is_eks_managed_node_group = false
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
module "self_mng_bottlerocket_custom_template" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "bottlerocket"
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
is_eks_managed_node_group = false
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
user_data_template_path = "${path.module}/templates/bottlerocket_custom.tpl"
|
||||
|
||||
bootstrap_extra_args = <<-EOT
|
||||
# extra args added
|
||||
[settings.kernel]
|
||||
lockdown = "integrity"
|
||||
EOT
|
||||
}
|
||||
|
||||
# Self managed node group - windows
|
||||
module "self_mng_windows_no_op" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "windows"
|
||||
|
||||
is_eks_managed_node_group = false
|
||||
}
|
||||
|
||||
module "self_mng_windows_bootstrap" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "windows"
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
is_eks_managed_node_group = false
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
|
||||
EOT
|
||||
# I don't know if this is the right way on WindowsOS, but its just a string check here anyways
|
||||
bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
|
||||
EOT
|
||||
}
|
||||
|
||||
module "self_mng_windows_custom_template" {
|
||||
source = "../../modules/_user_data"
|
||||
|
||||
platform = "windows"
|
||||
|
||||
enable_bootstrap_user_data = true
|
||||
is_eks_managed_node_group = false
|
||||
|
||||
cluster_name = local.name
|
||||
cluster_endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
|
||||
user_data_template_path = "${path.module}/templates/windows_custom.tpl"
|
||||
|
||||
pre_bootstrap_user_data = <<-EOT
|
||||
[string]$Something = 'IDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
|
||||
EOT
|
||||
# I don't know if this is the right way on WindowsOS, but its just a string check here anyways
|
||||
bootstrap_extra_args = "-KubeletExtraArgs --node-labels=node.kubernetes.io/lifecycle=spot"
|
||||
|
||||
post_bootstrap_user_data = <<-EOT
|
||||
[string]$Something = 'IStillDoNotKnowAnyPowerShell ¯\_(ツ)_/¯'
|
||||
EOT
|
||||
}
|
||||
89
examples/user_data/outputs.tf
Normal file
89
examples/user_data/outputs.tf
Normal file
@@ -0,0 +1,89 @@
|
||||
# EKS managed node group - linux
|
||||
output "eks_mng_linux_no_op" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_linux_no_op.user_data)
|
||||
}
|
||||
|
||||
output "eks_mng_linux_additional" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_linux_additional.user_data)
|
||||
}
|
||||
|
||||
output "eks_mng_linux_custom_ami" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_linux_custom_ami.user_data)
|
||||
}
|
||||
|
||||
output "eks_mng_linux_custom_template" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_linux_custom_template.user_data)
|
||||
}
|
||||
|
||||
# EKS managed node group - bottlerocket
|
||||
output "eks_mng_bottlerocket_no_op" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_bottlerocket_no_op.user_data)
|
||||
}
|
||||
|
||||
output "eks_mng_bottlerocket_additional" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_bottlerocket_additional.user_data)
|
||||
}
|
||||
|
||||
output "eks_mng_bottlerocket_custom_ami" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_bottlerocket_custom_ami.user_data)
|
||||
}
|
||||
|
||||
output "eks_mng_bottlerocket_custom_template" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.eks_mng_bottlerocket_custom_template.user_data)
|
||||
}
|
||||
|
||||
# Self managed node group - linux
|
||||
output "self_mng_linux_no_op" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_linux_no_op.user_data)
|
||||
}
|
||||
|
||||
output "self_mng_linux_bootstrap" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_linux_bootstrap.user_data)
|
||||
}
|
||||
|
||||
output "self_mng_linux_custom_template" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_linux_custom_template.user_data)
|
||||
}
|
||||
|
||||
# Self managed node group - bottlerocket
|
||||
output "self_mng_bottlerocket_no_op" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_bottlerocket_no_op.user_data)
|
||||
}
|
||||
|
||||
output "self_mng_bottlerocket_bootstrap" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_bottlerocket_bootstrap.user_data)
|
||||
}
|
||||
|
||||
output "self_mng_bottlerocket_custom_template" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_bottlerocket_custom_template.user_data)
|
||||
}
|
||||
|
||||
# Self managed node group - windows
|
||||
output "self_mng_windows_no_op" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_windows_no_op.user_data)
|
||||
}
|
||||
|
||||
output "self_mng_windows_bootstrap" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_windows_bootstrap.user_data)
|
||||
}
|
||||
|
||||
output "self_mng_windows_custom_template" {
|
||||
description = "Base64 decoded user data rendered for the provided inputs"
|
||||
value = base64decode(module.self_mng_windows_custom_template.user_data)
|
||||
}
|
||||
7
examples/user_data/templates/bottlerocket_custom.tpl
Normal file
7
examples/user_data/templates/bottlerocket_custom.tpl
Normal file
@@ -0,0 +1,7 @@
|
||||
# Custom user data template provided for rendering
|
||||
[settings.kubernetes]
|
||||
"cluster-name" = "${cluster_name}"
|
||||
"api-server" = "${cluster_endpoint}"
|
||||
"cluster-certificate" = "${cluster_auth_base64}"
|
||||
|
||||
${bootstrap_extra_args~}
|
||||
10
examples/user_data/templates/linux_custom.tpl
Normal file
10
examples/user_data/templates/linux_custom.tpl
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
${pre_bootstrap_user_data ~}
|
||||
|
||||
# Custom user data template provided for rendering
|
||||
B64_CLUSTER_CA=${cluster_auth_base64}
|
||||
API_SERVER_URL=${cluster_endpoint}
|
||||
/etc/eks/bootstrap.sh ${cluster_name} ${bootstrap_extra_args} --b64-cluster-ca $B64_CLUSTER_CA --apiserver-endpoint $API_SERVER_URL
|
||||
${post_bootstrap_user_data ~}
|
||||
10
examples/user_data/templates/windows_custom.tpl
Normal file
10
examples/user_data/templates/windows_custom.tpl
Normal file
@@ -0,0 +1,10 @@
|
||||
# Custom user data template provided for rendering
|
||||
<powershell>
|
||||
${pre_bootstrap_user_data ~}
|
||||
[string]$EKSBinDir = "$env:ProgramFiles\Amazon\EKS"
|
||||
[string]$EKSBootstrapScriptName = 'Start-EKSBootstrap.ps1'
|
||||
[string]$EKSBootstrapScriptFile = "$EKSBinDir\$EKSBootstrapScriptName"
|
||||
& $EKSBootstrapScriptFile -EKSClusterName ${cluster_name} -APIServerEndpoint ${cluster_endpoint} -Base64ClusterCA ${cluster_auth_base64} ${bootstrap_extra_args} 3>&1 4>&1 5>&1 6>&1
|
||||
$LastError = if ($?) { 0 } else { $Error[0].Exception.HResult }
|
||||
${post_bootstrap_user_data ~}
|
||||
</powershell>
|
||||
10
examples/user_data/versions.tf
Normal file
10
examples/user_data/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = ">= 3.64"
|
||||
}
|
||||
}
|
||||
}
|
||||
16
fargate.tf
16
fargate.tf
@@ -1,16 +0,0 @@
|
||||
module "fargate" {
|
||||
source = "./modules/fargate"
|
||||
|
||||
create_eks = var.create_eks
|
||||
create_fargate_pod_execution_role = var.create_fargate_pod_execution_role
|
||||
|
||||
cluster_name = local.cluster_name
|
||||
fargate_pod_execution_role_name = var.fargate_pod_execution_role_name
|
||||
permissions_boundary = var.permissions_boundary
|
||||
iam_path = var.iam_path
|
||||
subnets = coalescelist(var.fargate_subnets, var.subnets, [""])
|
||||
|
||||
fargate_profiles = var.fargate_profiles
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
23
irsa.tf
23
irsa.tf
@@ -1,23 +0,0 @@
|
||||
# Enable IAM Roles for EKS Service-Accounts (IRSA).
|
||||
|
||||
# The Root CA Thumbprint for an OpenID Connect Identity Provider is currently
|
||||
# Being passed as a default value which is the same for all regions and
|
||||
# Is valid until (Jun 28 17:39:16 2034 GMT).
|
||||
# https://crt.sh/?q=9E99A48A9960B14926BB7F3B02E22DA2B0AB7280
|
||||
# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_providers_create_oidc_verify-thumbprint.html
|
||||
# https://github.com/terraform-providers/terraform-provider-aws/issues/10104
|
||||
|
||||
resource "aws_iam_openid_connect_provider" "oidc_provider" {
|
||||
count = var.enable_irsa && var.create_eks ? 1 : 0
|
||||
|
||||
client_id_list = local.client_id_list
|
||||
thumbprint_list = [var.eks_oidc_root_ca_thumbprint]
|
||||
url = local.cluster_oidc_issuer_url
|
||||
|
||||
tags = merge(
|
||||
{
|
||||
Name = "${var.cluster_name}-eks-irsa"
|
||||
},
|
||||
var.tags
|
||||
)
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
resource "local_file" "kubeconfig" {
|
||||
count = var.write_kubeconfig && var.create_eks ? 1 : 0
|
||||
|
||||
content = local.kubeconfig
|
||||
filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path
|
||||
file_permission = var.kubeconfig_file_permission
|
||||
directory_permission = "0755"
|
||||
}
|
||||
263
locals.tf
263
locals.tf
@@ -1,263 +0,0 @@
|
||||
locals {
|
||||
|
||||
# EKS Cluster
|
||||
cluster_id = coalescelist(aws_eks_cluster.this[*].id, [""])[0]
|
||||
cluster_arn = coalescelist(aws_eks_cluster.this[*].arn, [""])[0]
|
||||
cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0]
|
||||
cluster_endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0]
|
||||
cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]
|
||||
cluster_oidc_issuer_url = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc[0].issuer, [""]))[0]
|
||||
cluster_primary_security_group_id = coalescelist(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, [""])[0]
|
||||
|
||||
cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id
|
||||
cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name
|
||||
cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn)
|
||||
|
||||
# Worker groups
|
||||
worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id
|
||||
|
||||
default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0]
|
||||
default_ami_id_linux = local.workers_group_defaults.ami_id != "" ? local.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0]
|
||||
default_ami_id_windows = local.workers_group_defaults.ami_id_windows != "" ? local.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0]
|
||||
|
||||
worker_group_launch_configuration_count = length(var.worker_groups)
|
||||
worker_group_launch_template_count = length(var.worker_groups_launch_template)
|
||||
|
||||
worker_groups_platforms = [for x in concat(var.worker_groups, var.worker_groups_launch_template) : try(x.platform, var.workers_group_defaults["platform"], var.default_platform)]
|
||||
|
||||
worker_ami_name_filter = coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*")
|
||||
worker_ami_name_filter_windows = coalesce(var.worker_ami_name_filter_windows, "Windows_Server-2019-English-Core-EKS_Optimized-${coalesce(var.cluster_version, "cluster_version")}-*")
|
||||
|
||||
ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}"
|
||||
sts_principal = "sts.${data.aws_partition.current.dns_suffix}"
|
||||
client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences)))
|
||||
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
|
||||
|
||||
workers_group_defaults_defaults = {
|
||||
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
|
||||
tags = [] # A list of maps defining extra tags to be applied to the worker group autoscaling group and volumes.
|
||||
ami_id = "" # AMI ID for the eks linux based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
|
||||
ami_id_windows = "" # AMI ID for the eks windows based workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI based on platform.
|
||||
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group and changing its value will not affect the autoscaling group's desired capacity because the cluster-autoscaler manages up and down scaling of the nodes. Cluster-autoscaler add nodes when pods are in pending state and remove the nodes when they are not required by modifying the desired_capacity of the autoscaling group. Although an issue exists in which if the value of the asg_min_size is changed it modifies the value of asg_desired_capacity.
|
||||
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
|
||||
asg_min_size = "1" # Minimum worker capacity in the autoscaling group. NOTE: Change in this paramater will affect the asg_desired_capacity, like changing its value to 2 will change asg_desired_capacity value to 2 but bringing back it to 1 will not affect the asg_desired_capacity.
|
||||
asg_force_delete = false # Enable forced deletion for the autoscaling group.
|
||||
asg_initial_lifecycle_hooks = [] # Initital lifecycle hook for the autoscaling group.
|
||||
default_cooldown = null # The amount of time, in seconds, after a scaling activity completes before another scaling activity can start.
|
||||
health_check_type = null # Controls how health checking is done. Valid values are "EC2" or "ELB".
|
||||
health_check_grace_period = null # Time in seconds after instance comes into service before checking health.
|
||||
instance_type = "m4.large" # Size of the workers instances.
|
||||
instance_store_virtual_name = "ephemeral0" # "virtual_name" of the instance store volume.
|
||||
spot_price = "" # Cost of spot instance.
|
||||
placement_tenancy = "" # The tenancy of the instance. Valid values are "default" or "dedicated".
|
||||
root_volume_size = "100" # root volume size of workers instances.
|
||||
root_volume_type = "gp2" # root volume type of workers instances, can be "standard", "gp3", "gp2", or "io1"
|
||||
root_iops = "0" # The amount of provisioned IOPS. This must be set with a volume_type of "io1".
|
||||
root_volume_throughput = null # The amount of throughput to provision for a gp3 volume.
|
||||
key_name = "" # The key pair name that should be used for the instances in the autoscaling group
|
||||
pre_userdata = "" # userdata to pre-append to the default userdata.
|
||||
userdata_template_file = "" # alternate template to use for userdata
|
||||
userdata_template_extra_args = {} # Additional arguments to use when expanding the userdata template file
|
||||
bootstrap_extra_args = "" # Extra arguments passed to the bootstrap.sh script from the EKS AMI (Amazon Machine Image).
|
||||
additional_userdata = "" # userdata to append to the default userdata.
|
||||
ebs_optimized = true # sets whether to use ebs optimization on supported types.
|
||||
enable_monitoring = true # Enables/disables detailed monitoring.
|
||||
enclave_support = false # Enables/disables enclave support
|
||||
public_ip = false # Associate a public ip address with a worker
|
||||
kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints.
|
||||
subnets = var.subnets # A list of subnets to place the worker nodes in. i.e. ["subnet-123", "subnet-456", "subnet-789"]
|
||||
additional_security_group_ids = [] # A list of additional security group ids to include in worker launch config
|
||||
protect_from_scale_in = false # Prevent AWS from scaling in, so that cluster-autoscaler is solely responsible.
|
||||
iam_instance_profile_name = "" # A custom IAM instance profile name. Used when manage_worker_iam_resources is set to false. Incompatible with iam_role_id.
|
||||
iam_role_id = "local.default_iam_role_id" # A custom IAM role id. Incompatible with iam_instance_profile_name. Literal local.default_iam_role_id will never be used but if iam_role_id is not set, the local.default_iam_role_id interpolation will be used.
|
||||
suspended_processes = ["AZRebalance"] # A list of processes to suspend. i.e. ["AZRebalance", "HealthCheck", "ReplaceUnhealthy"]
|
||||
target_group_arns = null # A list of Application LoadBalancer (ALB) target group ARNs to be associated to the autoscaling group
|
||||
load_balancers = null # A list of Classic LoadBalancer (CLB)'s name to be associated to the autoscaling group
|
||||
enabled_metrics = [] # A list of metrics to be collected i.e. ["GroupMinSize", "GroupMaxSize", "GroupDesiredCapacity"]
|
||||
placement_group = null # The name of the placement group into which to launch the instances, if any.
|
||||
service_linked_role_arn = "" # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS
|
||||
termination_policies = [] # A list of policies to decide how the instances in the auto scale group should be terminated.
|
||||
platform = var.default_platform # Platform of workers. Either "linux" or "windows".
|
||||
additional_ebs_volumes = [] # A list of additional volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), volume_size, volume_type, iops, throughput, encrypted, kms_key_id (only on launch-template), delete_on_termination, snapshot_id. Optional values are grabbed from root volume or from defaults
|
||||
additional_instance_store_volumes = [] # A list of additional instance store (local disk) volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), virtual_name.
|
||||
warm_pool = null # If this block is configured, add a Warm Pool to the specified Auto Scaling group.
|
||||
timeouts = {} # A map of timeouts for create/update/delete operations
|
||||
snapshot_id = null # A custom snapshot ID.
|
||||
|
||||
# Settings for launch templates
|
||||
root_block_device_name = concat(data.aws_ami.eks_worker.*.root_device_name, [""])[0] # Root device name for Linux workers. If not provided, will assume default Linux AMI was used.
|
||||
root_block_device_name_windows = concat(data.aws_ami.eks_worker_windows.*.root_device_name, [""])[0] # Root device name for Windows workers. If not provided, will assume default Windows AMI was used.
|
||||
root_kms_key_id = "" # The KMS key to use when encrypting the root storage device
|
||||
launch_template_id = null # The id of the launch template used for managed node_groups
|
||||
launch_template_version = "$Latest" # The latest version of the launch template to use in the autoscaling group
|
||||
update_default_version = false # Update the autoscaling group launch template's default version upon each update
|
||||
launch_template_placement_tenancy = "default" # The placement tenancy for instances
|
||||
launch_template_placement_group = null # The name of the placement group into which to launch the instances, if any.
|
||||
root_encrypted = false # Whether the volume should be encrypted or not
|
||||
eni_delete = true # Delete the Elastic Network Interface (ENI) on termination (if set to false you will have to manually delete before destroying)
|
||||
interface_type = null # The type of network interface. To create an Elastic Fabric Adapter (EFA), specify 'efa'.
|
||||
cpu_credits = "standard" # T2/T3 unlimited mode, can be 'standard' or 'unlimited'. Used 'standard' mode as default to avoid paying higher costs
|
||||
market_type = null
|
||||
metadata_http_endpoint = "enabled" # The state of the metadata service: enabled, disabled.
|
||||
metadata_http_tokens = "optional" # If session tokens are required: optional, required.
|
||||
metadata_http_put_response_hop_limit = null # The desired HTTP PUT response hop limit for instance metadata requests.
|
||||
# Settings for launch templates with mixed instances policy
|
||||
override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] # A list of override instance types for mixed instances policy
|
||||
on_demand_allocation_strategy = null # Strategy to use when launching on-demand instances. Valid values: prioritized.
|
||||
on_demand_base_capacity = "0" # Absolute minimum amount of desired capacity that must be fulfilled by on-demand instances
|
||||
on_demand_percentage_above_base_capacity = "0" # Percentage split between on-demand and Spot instances above the base on-demand capacity
|
||||
spot_allocation_strategy = "lowest-price" # Valid options are 'lowest-price' and 'capacity-optimized'. If 'lowest-price', the Auto Scaling group launches instances using the Spot pools with the lowest price, and evenly allocates your instances across the number of Spot pools. If 'capacity-optimized', the Auto Scaling group launches instances using Spot pools that are optimally chosen based on the available Spot capacity.
|
||||
spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify."
|
||||
spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price
|
||||
max_instance_lifetime = 0 # Maximum number of seconds instances can run in the ASG. 0 is unlimited.
|
||||
elastic_inference_accelerator = null # Type of elastic inference accelerator to be attached. Example values are eia1.medium, eia2.large, etc.
|
||||
instance_refresh_enabled = false # Enable instance refresh for the worker autoscaling group.
|
||||
instance_refresh_strategy = "Rolling" # Strategy to use for instance refresh. Default is 'Rolling' which the only valid value.
|
||||
instance_refresh_min_healthy_percentage = 90 # The amount of capacity in the ASG that must remain healthy during an instance refresh, as a percentage of the ASG's desired capacity.
|
||||
instance_refresh_instance_warmup = null # The number of seconds until a newly launched instance is configured and ready to use. Defaults to the ASG's health check grace period.
|
||||
instance_refresh_triggers = [] # Set of additional property names that will trigger an Instance Refresh. A refresh will always be triggered by a change in any of launch_configuration, launch_template, or mixed_instances_policy.
|
||||
capacity_rebalance = false # Enable capacity rebalance
|
||||
}
|
||||
|
||||
workers_group_defaults = merge(
|
||||
local.workers_group_defaults_defaults,
|
||||
var.workers_group_defaults,
|
||||
)
|
||||
|
||||
ebs_optimized_not_supported = [
|
||||
"c1.medium",
|
||||
"c3.8xlarge",
|
||||
"c3.large",
|
||||
"c5d.12xlarge",
|
||||
"c5d.24xlarge",
|
||||
"c5d.metal",
|
||||
"cc2.8xlarge",
|
||||
"cr1.8xlarge",
|
||||
"g2.8xlarge",
|
||||
"g4dn.metal",
|
||||
"hs1.8xlarge",
|
||||
"i2.8xlarge",
|
||||
"m1.medium",
|
||||
"m1.small",
|
||||
"m2.xlarge",
|
||||
"m3.large",
|
||||
"m3.medium",
|
||||
"m5ad.16xlarge",
|
||||
"m5ad.8xlarge",
|
||||
"m5dn.metal",
|
||||
"m5n.metal",
|
||||
"r3.8xlarge",
|
||||
"r3.large",
|
||||
"r5ad.16xlarge",
|
||||
"r5ad.8xlarge",
|
||||
"r5dn.metal",
|
||||
"r5n.metal",
|
||||
"t1.micro",
|
||||
"t2.2xlarge",
|
||||
"t2.large",
|
||||
"t2.medium",
|
||||
"t2.micro",
|
||||
"t2.nano",
|
||||
"t2.small",
|
||||
"t2.xlarge"
|
||||
]
|
||||
|
||||
kubeconfig = var.create_eks ? templatefile("${path.module}/templates/kubeconfig.tpl", {
|
||||
kubeconfig_name = coalesce(var.kubeconfig_name, "eks_${var.cluster_name}")
|
||||
endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
aws_authenticator_kubeconfig_apiversion = var.kubeconfig_api_version
|
||||
aws_authenticator_command = var.kubeconfig_aws_authenticator_command
|
||||
aws_authenticator_command_args = coalescelist(var.kubeconfig_aws_authenticator_command_args, ["token", "-i", local.cluster_name])
|
||||
aws_authenticator_additional_args = var.kubeconfig_aws_authenticator_additional_args
|
||||
aws_authenticator_env_variables = var.kubeconfig_aws_authenticator_env_variables
|
||||
}) : ""
|
||||
|
||||
launch_configuration_userdata_rendered = [
|
||||
for index in range(var.create_eks ? local.worker_group_launch_configuration_count : 0) : templatefile(
|
||||
lookup(
|
||||
var.worker_groups[index],
|
||||
"userdata_template_file",
|
||||
lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"]) == "windows"
|
||||
? "${path.module}/templates/userdata_windows.tpl"
|
||||
: "${path.module}/templates/userdata.sh.tpl"
|
||||
),
|
||||
merge({
|
||||
platform = lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"])
|
||||
cluster_name = local.cluster_name
|
||||
endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
pre_userdata = lookup(
|
||||
var.worker_groups[index],
|
||||
"pre_userdata",
|
||||
local.workers_group_defaults["pre_userdata"],
|
||||
)
|
||||
additional_userdata = lookup(
|
||||
var.worker_groups[index],
|
||||
"additional_userdata",
|
||||
local.workers_group_defaults["additional_userdata"],
|
||||
)
|
||||
bootstrap_extra_args = lookup(
|
||||
var.worker_groups[index],
|
||||
"bootstrap_extra_args",
|
||||
local.workers_group_defaults["bootstrap_extra_args"],
|
||||
)
|
||||
kubelet_extra_args = lookup(
|
||||
var.worker_groups[index],
|
||||
"kubelet_extra_args",
|
||||
local.workers_group_defaults["kubelet_extra_args"],
|
||||
)
|
||||
},
|
||||
lookup(
|
||||
var.worker_groups[index],
|
||||
"userdata_template_extra_args",
|
||||
local.workers_group_defaults["userdata_template_extra_args"]
|
||||
)
|
||||
)
|
||||
)
|
||||
]
|
||||
|
||||
launch_template_userdata_rendered = [
|
||||
for index in range(var.create_eks ? local.worker_group_launch_template_count : 0) : templatefile(
|
||||
lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"userdata_template_file",
|
||||
lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"]) == "windows"
|
||||
? "${path.module}/templates/userdata_windows.tpl"
|
||||
: "${path.module}/templates/userdata.sh.tpl"
|
||||
),
|
||||
merge({
|
||||
platform = lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"])
|
||||
cluster_name = local.cluster_name
|
||||
endpoint = local.cluster_endpoint
|
||||
cluster_auth_base64 = local.cluster_auth_base64
|
||||
pre_userdata = lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"pre_userdata",
|
||||
local.workers_group_defaults["pre_userdata"],
|
||||
)
|
||||
additional_userdata = lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"additional_userdata",
|
||||
local.workers_group_defaults["additional_userdata"],
|
||||
)
|
||||
bootstrap_extra_args = lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"bootstrap_extra_args",
|
||||
local.workers_group_defaults["bootstrap_extra_args"],
|
||||
)
|
||||
kubelet_extra_args = lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"kubelet_extra_args",
|
||||
local.workers_group_defaults["kubelet_extra_args"],
|
||||
)
|
||||
},
|
||||
lookup(
|
||||
var.worker_groups_launch_template[index],
|
||||
"userdata_template_extra_args",
|
||||
local.workers_group_defaults["userdata_template_extra_args"]
|
||||
)
|
||||
)
|
||||
)
|
||||
]
|
||||
}
|
||||
319
main.tf
319
main.tf
@@ -1,24 +1,20 @@
|
||||
resource "aws_cloudwatch_log_group" "this" {
|
||||
count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
|
||||
data "aws_partition" "current" {}
|
||||
|
||||
name = "/aws/eks/${var.cluster_name}/cluster"
|
||||
retention_in_days = var.cluster_log_retention_in_days
|
||||
kms_key_id = var.cluster_log_kms_key_id
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
################################################################################
|
||||
# Cluster
|
||||
################################################################################
|
||||
|
||||
resource "aws_eks_cluster" "this" {
|
||||
count = var.create_eks ? 1 : 0
|
||||
count = var.create ? 1 : 0
|
||||
|
||||
name = var.cluster_name
|
||||
enabled_cluster_log_types = var.cluster_enabled_log_types
|
||||
role_arn = local.cluster_iam_role_arn
|
||||
role_arn = try(aws_iam_role.this[0].arn, var.iam_role_arn)
|
||||
version = var.cluster_version
|
||||
enabled_cluster_log_types = var.cluster_enabled_log_types
|
||||
|
||||
vpc_config {
|
||||
security_group_ids = compact([local.cluster_security_group_id])
|
||||
subnet_ids = var.subnets
|
||||
security_group_ids = distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))
|
||||
subnet_ids = var.subnet_ids
|
||||
endpoint_private_access = var.cluster_endpoint_private_access
|
||||
endpoint_public_access = var.cluster_endpoint_public_access
|
||||
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
|
||||
@@ -45,188 +41,221 @@ resource "aws_eks_cluster" "this" {
|
||||
)
|
||||
|
||||
timeouts {
|
||||
create = var.cluster_create_timeout
|
||||
delete = var.cluster_delete_timeout
|
||||
update = var.cluster_update_timeout
|
||||
create = lookup(var.cluster_timeouts, "create", null)
|
||||
delete = lookup(var.cluster_timeouts, "update", null)
|
||||
update = lookup(var.cluster_timeouts, "delete", null)
|
||||
}
|
||||
|
||||
depends_on = [
|
||||
aws_security_group_rule.cluster_egress_internet,
|
||||
aws_security_group_rule.cluster_https_worker_ingress,
|
||||
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
|
||||
aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
|
||||
aws_iam_role_policy_attachment.cluster_AmazonEKSVPCResourceControllerPolicy,
|
||||
aws_iam_role_policy_attachment.this,
|
||||
aws_security_group_rule.cluster,
|
||||
aws_security_group_rule.node,
|
||||
aws_cloudwatch_log_group.this
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_security_group" "cluster" {
|
||||
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
|
||||
resource "aws_cloudwatch_log_group" "this" {
|
||||
count = var.create && var.create_cloudwatch_log_group ? 1 : 0
|
||||
|
||||
name_prefix = var.cluster_name
|
||||
description = "EKS cluster security group."
|
||||
name = "/aws/eks/${var.cluster_name}/cluster"
|
||||
retention_in_days = var.cloudwatch_log_group_retention_in_days
|
||||
kms_key_id = var.cloudwatch_log_group_kms_key_id
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
################################################################################
|
||||
# Cluster Security Group
|
||||
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
|
||||
################################################################################
|
||||
|
||||
locals {
|
||||
cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
|
||||
create_cluster_sg = var.create && var.create_cluster_security_group
|
||||
|
||||
cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
|
||||
|
||||
cluster_security_group_rules = {
|
||||
ingress_nodes_443 = {
|
||||
description = "Node groups to cluster API"
|
||||
protocol = "tcp"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "ingress"
|
||||
source_node_security_group = true
|
||||
}
|
||||
egress_nodes_443 = {
|
||||
description = "Cluster API to node groups"
|
||||
protocol = "tcp"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "egress"
|
||||
source_node_security_group = true
|
||||
}
|
||||
egress_nodes_kubelet = {
|
||||
description = "Cluster API to node kubelets"
|
||||
protocol = "tcp"
|
||||
from_port = 10250
|
||||
to_port = 10250
|
||||
type = "egress"
|
||||
source_node_security_group = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "cluster" {
|
||||
count = local.create_cluster_sg ? 1 : 0
|
||||
|
||||
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
|
||||
name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}-" : null
|
||||
description = var.cluster_security_group_description
|
||||
vpc_id = var.vpc_id
|
||||
|
||||
tags = merge(
|
||||
var.tags,
|
||||
{
|
||||
"Name" = "${var.cluster_name}-eks_cluster_sg"
|
||||
},
|
||||
{ "Name" = local.cluster_sg_name },
|
||||
var.cluster_security_group_tags
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_egress_internet" {
|
||||
count = var.cluster_create_security_group && var.create_eks ? 1 : 0
|
||||
resource "aws_security_group_rule" "cluster" {
|
||||
for_each = local.create_cluster_sg ? merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : {}
|
||||
|
||||
description = "Allow cluster egress access to the Internet."
|
||||
protocol = "-1"
|
||||
security_group_id = local.cluster_security_group_id
|
||||
cidr_blocks = var.cluster_egress_cidrs
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
type = "egress"
|
||||
# Required
|
||||
security_group_id = aws_security_group.cluster[0].id
|
||||
protocol = each.value.protocol
|
||||
from_port = each.value.from_port
|
||||
to_port = each.value.to_port
|
||||
type = each.value.type
|
||||
|
||||
# Optional
|
||||
description = try(each.value.description, null)
|
||||
cidr_blocks = try(each.value.cidr_blocks, null)
|
||||
ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
|
||||
prefix_list_ids = try(each.value.prefix_list_ids, [])
|
||||
self = try(each.value.self, null)
|
||||
source_security_group_id = try(
|
||||
each.value.source_security_group_id,
|
||||
try(each.value.source_node_security_group, false) ? local.node_security_group_id : null
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
|
||||
count = var.cluster_create_security_group && var.create_eks && var.worker_create_security_group ? 1 : 0
|
||||
################################################################################
|
||||
# IRSA
|
||||
# Note - this is different from EKS identity provider
|
||||
################################################################################
|
||||
|
||||
description = "Allow pods to communicate with the EKS cluster API."
|
||||
protocol = "tcp"
|
||||
security_group_id = local.cluster_security_group_id
|
||||
source_security_group_id = local.worker_security_group_id
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "ingress"
|
||||
data "tls_certificate" "this" {
|
||||
count = var.create && var.enable_irsa ? 1 : 0
|
||||
|
||||
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_private_access_cidrs_source" {
|
||||
for_each = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : []
|
||||
resource "aws_iam_openid_connect_provider" "oidc_provider" {
|
||||
count = var.create && var.enable_irsa ? 1 : 0
|
||||
|
||||
description = "Allow private K8S API ingress from custom CIDR source."
|
||||
type = "ingress"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
cidr_blocks = [each.value]
|
||||
client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences)))
|
||||
thumbprint_list = [data.tls_certificate.this[0].certificates[0].sha1_fingerprint]
|
||||
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
|
||||
|
||||
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
|
||||
tags = merge(
|
||||
{ Name = "${var.cluster_name}-eks-irsa" },
|
||||
var.tags
|
||||
)
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_private_access_sg_source" {
|
||||
count = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0
|
||||
################################################################################
|
||||
# IAM Role
|
||||
################################################################################
|
||||
|
||||
description = "Allow private K8S API ingress from custom Security Groups source."
|
||||
type = "ingress"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
protocol = "tcp"
|
||||
source_security_group_id = var.cluster_endpoint_private_access_sg[count.index]
|
||||
|
||||
security_group_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
|
||||
locals {
|
||||
iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster")
|
||||
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "cluster" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
data "aws_iam_policy_document" "assume_role_policy" {
|
||||
count = var.create && var.create_iam_role ? 1 : 0
|
||||
|
||||
name_prefix = var.cluster_iam_role_name != "" ? null : var.cluster_name
|
||||
name = var.cluster_iam_role_name != "" ? var.cluster_iam_role_name : null
|
||||
assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
|
||||
permissions_boundary = var.permissions_boundary
|
||||
path = var.iam_path
|
||||
statement {
|
||||
sid = "EKSClusterAssumeRole"
|
||||
actions = ["sts:AssumeRole"]
|
||||
|
||||
principals {
|
||||
type = "Service"
|
||||
identifiers = ["eks.amazonaws.com"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "this" {
|
||||
count = var.create && var.create_iam_role ? 1 : 0
|
||||
|
||||
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
|
||||
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}-" : null
|
||||
path = var.iam_role_path
|
||||
description = var.iam_role_description
|
||||
|
||||
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
|
||||
permissions_boundary = var.iam_role_permissions_boundary
|
||||
force_detach_policies = true
|
||||
|
||||
tags = var.tags
|
||||
tags = merge(var.tags, var.iam_role_tags)
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
|
||||
resource "aws_iam_role_policy_attachment" "this" {
|
||||
for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
|
||||
"${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
|
||||
"${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
|
||||
], var.iam_role_additional_policies)))) : toset([])
|
||||
|
||||
policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy"
|
||||
role = local.cluster_iam_role_name
|
||||
policy_arn = each.value
|
||||
role = aws_iam_role.this[0].name
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
################################################################################
|
||||
# EKS Addons
|
||||
################################################################################
|
||||
|
||||
policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy"
|
||||
role = local.cluster_iam_role_name
|
||||
}
|
||||
resource "aws_eks_addon" "this" {
|
||||
for_each = { for k, v in var.cluster_addons : k => v if var.create }
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceControllerPolicy" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
cluster_name = aws_eks_cluster.this[0].name
|
||||
addon_name = try(each.value.name, each.key)
|
||||
|
||||
policy_arn = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController"
|
||||
role = local.cluster_iam_role_name
|
||||
}
|
||||
addon_version = lookup(each.value, "addon_version", null)
|
||||
resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
|
||||
service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
|
||||
|
||||
/*
|
||||
Adding a policy to cluster IAM role that allow permissions
|
||||
required to create AWSServiceRoleForElasticLoadBalancing service-linked role by EKS during ELB provisioning
|
||||
*/
|
||||
|
||||
data "aws_iam_policy_document" "cluster_elb_sl_role_creation" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
|
||||
statement {
|
||||
effect = "Allow"
|
||||
actions = [
|
||||
"ec2:DescribeAccountAttributes",
|
||||
"ec2:DescribeInternetGateways",
|
||||
"ec2:DescribeAddresses"
|
||||
lifecycle {
|
||||
ignore_changes = [
|
||||
modified_at
|
||||
]
|
||||
resources = ["*"]
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "cluster_elb_sl_role_creation" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
|
||||
name_prefix = "${var.cluster_name}-elb-sl-role-creation"
|
||||
description = "Permissions for EKS to create AWSServiceRoleForElasticLoadBalancing service-linked role"
|
||||
policy = data.aws_iam_policy_document.cluster_elb_sl_role_creation[0].json
|
||||
path = var.iam_path
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_elb_sl_role_creation" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
################################################################################
|
||||
# EKS Identity Provider
|
||||
# Note - this is different from IRSA
|
||||
################################################################################
|
||||
|
||||
policy_arn = aws_iam_policy.cluster_elb_sl_role_creation[0].arn
|
||||
role = local.cluster_iam_role_name
|
||||
}
|
||||
resource "aws_eks_identity_provider_config" "this" {
|
||||
for_each = { for k, v in var.cluster_identity_providers : k => v if var.create }
|
||||
|
||||
/*
|
||||
Adding a policy to cluster IAM role that deny permissions to logs:CreateLogGroup
|
||||
it is not needed since we create the log group ourselve in this module, and it is causing trouble during cleanup/deletion
|
||||
*/
|
||||
cluster_name = aws_eks_cluster.this[0].name
|
||||
|
||||
data "aws_iam_policy_document" "cluster_deny_log_group" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
|
||||
statement {
|
||||
effect = "Deny"
|
||||
actions = [
|
||||
"logs:CreateLogGroup"
|
||||
]
|
||||
resources = ["*"]
|
||||
oidc {
|
||||
client_id = each.value.client_id
|
||||
groups_claim = lookup(each.value, "groups_claim", null)
|
||||
groups_prefix = lookup(each.value, "groups_prefix", null)
|
||||
identity_provider_config_name = try(each.value.identity_provider_config_name, each.key)
|
||||
issuer_url = each.value.issuer_url
|
||||
required_claims = lookup(each.value, "required_claims", null)
|
||||
username_claim = lookup(each.value, "username_claim", null)
|
||||
username_prefix = lookup(each.value, "username_prefix", null)
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_iam_policy" "cluster_deny_log_group" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
|
||||
name_prefix = "${var.cluster_name}-deny-log-group"
|
||||
description = "Deny CreateLogGroup"
|
||||
policy = data.aws_iam_policy_document.cluster_deny_log_group[0].json
|
||||
path = var.iam_path
|
||||
|
||||
tags = var.tags
|
||||
}
|
||||
|
||||
resource "aws_iam_role_policy_attachment" "cluster_deny_log_group" {
|
||||
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
|
||||
|
||||
policy_arn = aws_iam_policy.cluster_deny_log_group[0].arn
|
||||
role = local.cluster_iam_role_name
|
||||
}
|
||||
|
||||
123
modules/_user_data/README.md
Normal file
123
modules/_user_data/README.md
Normal file
@@ -0,0 +1,123 @@
|
||||
# Internal User Data Module
|
||||
|
||||
Configuration in this directory renders the appropriate user data for the given inputs. There are a number of different ways that user data can be utilized and this internal module is designed to aid in making that flexibility possible as well as providing a means for out of bands testing and validation.
|
||||
|
||||
See the [`examples/user_data/` directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/user_data) for various examples of using the module.
|
||||
|
||||
## Combinations
|
||||
|
||||
At a high level, AWS EKS users have two methods for launching nodes within this EKS module (ignoring Fargate profiles):
|
||||
|
||||
1. EKS managed node group
|
||||
2. Self managed node group
|
||||
|
||||
### EKS Managed Node Group
|
||||
|
||||
When using an EKS managed node group, users have 2 primary routes for interacting with the bootstrap user data:
|
||||
|
||||
1. If the EKS managed node group does **NOT** utilize a custom AMI, then users can elect to supply additional user data that is pre-pended before the EKS managed node group bootstrap user data. You can read more about this process from the [AWS supplied documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data)
|
||||
|
||||
- Users can use the following variables to facilitate this process:
|
||||
|
||||
```hcl
|
||||
pre_bootstrap_user_data = "..."
|
||||
bootstrap_extra_args = "..."
|
||||
```
|
||||
|
||||
2. If the EKS managed node group does utilize a custom AMI, then per the [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami), users will need to supply the necessary bootstrap configuration via user data to ensure that the node is configured to register with the cluster when launched. There are two routes that users can utilize to facilitate this bootstrapping process:
|
||||
- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post bootstrap user data as well as bootstrap additional args that are supplied to the [AWS EKS bootstrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
|
||||
- Users can use the following variables to facilitate this process:
|
||||
```hcl
|
||||
enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
|
||||
pre_bootstrap_user_data = "..."
|
||||
bootstrap_extra_args = "..."
|
||||
post_bootstrap_user_data = "..."
|
||||
```
|
||||
- If the AMI is not an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node when launched, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
|
||||
- Users can use the following variables to facilitate this process:
|
||||
```hcl
|
||||
user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
|
||||
pre_bootstrap_user_data = "..."
|
||||
bootstrap_extra_args = "..."
|
||||
post_bootstrap_user_data = "..."
|
||||
```
|
||||
|
||||
| ℹ️ When using bottlerocket as the desired platform, since the user data for bottlerocket is TOML, all configurations are merged in the one file supplied as user data. Therefore, `pre_bootstrap_user_data` and `post_bootstrap_user_data` are not valid since the bottlerocket OS handles when various settings are applied. If you wish to supply additional configuration settings when using bottlerocket, supply them via the `bootstrap_extra_args` variable. For the linux platform, `bootstrap_extra_args` are settings that will be supplied to the [AWS EKS Optimized AMI bootstrap script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh#L14) such as kubelet extra args, etc. See the [bottlerocket GitHub repository documentation](https://github.com/bottlerocket-os/bottlerocket#description-of-settings) for more details on what settings can be supplied via the `bootstrap_extra_args` variable. |
|
||||
| :--- |
|
||||
|
||||
### Self Managed Node Group
|
||||
|
||||
When using a self managed node group, the options presented to users is very similar to the 2nd option listed above for EKS managed node groups. Since self managed node groups require users to provide the bootstrap user data, there is no concept of appending to user data that AWS provides; users can either elect to use the user data template provided for their platform/OS by the module or provide their own user data template for rendering by the module.
|
||||
|
||||
- If the AMI used is a derivative of the [AWS EKS Optimized AMI ](https://github.com/awslabs/amazon-eks-ami), users can opt in to using a template provided by the module that provides the minimum necessary configuration to bootstrap the node when launched, with the option to add additional pre and post bootstrap user data as well as bootstrap additional args that are supplied to the [AWS EKS bootstrap.sh script](https://github.com/awslabs/amazon-eks-ami/blob/master/files/bootstrap.sh)
|
||||
- Users can use the following variables to facilitate this process:
|
||||
```hcl
|
||||
enable_bootstrap_user_data = true # to opt in to using the module supplied bootstrap user data template
|
||||
pre_bootstrap_user_data = "..."
|
||||
bootstrap_extra_args = "..."
|
||||
post_bootstrap_user_data = "..."
|
||||
```
|
||||
- If the AMI is not an AWS EKS Optimized AMI derivative, or if users wish to have more control over the user data that is supplied to the node upon launch, users have the ability to supply their own user data template that will be rendered instead of the module supplied template. Note - only the variables that are supplied to the `templatefile()` for the respective platform/OS are available for use in the supplied template, otherwise users will need to pre-render/pre-populate the template before supplying the final template to the module for rendering as user data.
|
||||
- Users can use the following variables to facilitate this process:
|
||||
```hcl
|
||||
user_data_template_path = "./your/user_data.sh" # user supplied bootstrap user data template
|
||||
pre_bootstrap_user_data = "..."
|
||||
bootstrap_extra_args = "..."
|
||||
post_bootstrap_user_data = "..."
|
||||
```
|
||||
|
||||
### Logic Diagram
|
||||
|
||||
The rough flow of logic that is encapsulated within the `_user_data` internal module can be represented by the following diagram to better highlight the various manners in which user data can be populated.
|
||||
|
||||
<p align="center">
|
||||
<img src="https://raw.githubusercontent.com/terraform-aws-modules/terraform-aws-eks/master/.github/images/user_data.svg" alt="User Data" width="60%">
|
||||
</p>
|
||||
|
||||
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
## Requirements
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
|
||||
| <a name="requirement_cloudinit"></a> [cloudinit](#requirement\_cloudinit) | >= 2.0 |
|
||||
|
||||
## Providers
|
||||
|
||||
| Name | Version |
|
||||
|------|---------|
|
||||
| <a name="provider_cloudinit"></a> [cloudinit](#provider\_cloudinit) | >= 2.0 |
|
||||
|
||||
## Modules
|
||||
|
||||
No modules.
|
||||
|
||||
## Resources
|
||||
|
||||
| Name | Type |
|
||||
|------|------|
|
||||
| [cloudinit_config.linux_eks_managed_node_group](https://registry.terraform.io/providers/hashicorp/cloudinit/latest/docs/data-sources/config) | data source |
|
||||
|
||||
## Inputs
|
||||
|
||||
| Name | Description | Type | Default | Required |
|
||||
|------|-------------|------|---------|:--------:|
|
||||
| <a name="input_bootstrap_extra_args"></a> [bootstrap\_extra\_args](#input\_bootstrap\_extra\_args) | Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data | `string` | `""` | no |
|
||||
| <a name="input_cluster_auth_base64"></a> [cluster\_auth\_base64](#input\_cluster\_auth\_base64) | Base64 encoded CA of associated EKS cluster | `string` | `""` | no |
|
||||
| <a name="input_cluster_endpoint"></a> [cluster\_endpoint](#input\_cluster\_endpoint) | Endpoint of associated EKS cluster | `string` | `""` | no |
|
||||
| <a name="input_cluster_name"></a> [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster | `string` | `""` | no |
|
||||
| <a name="input_cluster_service_ipv4_cidr"></a> [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks | `string` | `null` | no |
|
||||
| <a name="input_create"></a> [create](#input\_create) | Determines whether to create user-data or not | `bool` | `true` | no |
|
||||
| <a name="input_enable_bootstrap_user_data"></a> [enable\_bootstrap\_user\_data](#input\_enable\_bootstrap\_user\_data) | Determines whether the bootstrap configurations are populated within the user data template | `bool` | `false` | no |
|
||||
| <a name="input_is_eks_managed_node_group"></a> [is\_eks\_managed\_node\_group](#input\_is\_eks\_managed\_node\_group) | Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not | `bool` | `true` | no |
|
||||
| <a name="input_platform"></a> [platform](#input\_platform) | Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based | `string` | `"linux"` | no |
|
||||
| <a name="input_post_bootstrap_user_data"></a> [post\_bootstrap\_user\_data](#input\_post\_bootstrap\_user\_data) | User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
|
||||
| <a name="input_pre_bootstrap_user_data"></a> [pre\_bootstrap\_user\_data](#input\_pre\_bootstrap\_user\_data) | User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket` | `string` | `""` | no |
|
||||
| <a name="input_user_data_template_path"></a> [user\_data\_template\_path](#input\_user\_data\_template\_path) | Path to a local, custom user data template file to use when rendering user data | `string` | `""` | no |
|
||||
|
||||
## Outputs
|
||||
|
||||
| Name | Description |
|
||||
|------|-------------|
|
||||
| <a name="output_user_data"></a> [user\_data](#output\_user\_data) | Base64 encoded user data rendered for the provided inputs |
|
||||
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
|
||||
78
modules/_user_data/main.tf
Normal file
78
modules/_user_data/main.tf
Normal file
@@ -0,0 +1,78 @@
|
||||
|
||||
locals {
|
||||
int_linux_default_user_data = var.create && var.platform == "linux" && (var.enable_bootstrap_user_data || var.user_data_template_path != "") ? base64encode(templatefile(
|
||||
coalesce(var.user_data_template_path, "${path.module}/../../templates/linux_user_data.tpl"),
|
||||
{
|
||||
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
|
||||
enable_bootstrap_user_data = var.enable_bootstrap_user_data
|
||||
# Required to bootstrap node
|
||||
cluster_name = var.cluster_name
|
||||
cluster_endpoint = var.cluster_endpoint
|
||||
cluster_auth_base64 = var.cluster_auth_base64
|
||||
# Optional
|
||||
cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr != null ? var.cluster_service_ipv4_cidr : ""
|
||||
bootstrap_extra_args = var.bootstrap_extra_args
|
||||
pre_bootstrap_user_data = var.pre_bootstrap_user_data
|
||||
post_bootstrap_user_data = var.post_bootstrap_user_data
|
||||
}
|
||||
)) : ""
|
||||
platform = {
|
||||
bottlerocket = {
|
||||
user_data = var.create && var.platform == "bottlerocket" && (var.enable_bootstrap_user_data || var.user_data_template_path != "" || var.bootstrap_extra_args != "") ? base64encode(templatefile(
|
||||
coalesce(var.user_data_template_path, "${path.module}/../../templates/bottlerocket_user_data.tpl"),
|
||||
{
|
||||
# https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-custom-ami
|
||||
enable_bootstrap_user_data = var.enable_bootstrap_user_data
|
||||
# Required to bootstrap node
|
||||
cluster_name = var.cluster_name
|
||||
cluster_endpoint = var.cluster_endpoint
|
||||
cluster_auth_base64 = var.cluster_auth_base64
|
||||
# Optional - is appended if using EKS managed node group without custom AMI
|
||||
# cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/bottlerocket-os/bottlerocket/issues/1866
|
||||
bootstrap_extra_args = var.bootstrap_extra_args
|
||||
}
|
||||
)) : ""
|
||||
}
|
||||
linux = {
|
||||
user_data = try(data.cloudinit_config.linux_eks_managed_node_group[0].rendered, local.int_linux_default_user_data)
|
||||
|
||||
}
|
||||
windows = {
|
||||
user_data = var.create && var.platform == "windows" && var.enable_bootstrap_user_data ? base64encode(templatefile(
|
||||
coalesce(var.user_data_template_path, "${path.module}/../../templates/windows_user_data.tpl"),
|
||||
{
|
||||
# Required to bootstrap node
|
||||
cluster_name = var.cluster_name
|
||||
cluster_endpoint = var.cluster_endpoint
|
||||
cluster_auth_base64 = var.cluster_auth_base64
|
||||
# Optional - is appended if using EKS managed node group without custom AMI
|
||||
# cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr # Not supported yet: https://github.com/awslabs/amazon-eks-ami/issues/805
|
||||
bootstrap_extra_args = var.bootstrap_extra_args
|
||||
pre_bootstrap_user_data = var.pre_bootstrap_user_data
|
||||
post_bootstrap_user_data = var.post_bootstrap_user_data
|
||||
}
|
||||
)) : ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# https://github.com/aws/containers-roadmap/issues/596#issuecomment-675097667
|
||||
# An important note is that user data must in MIME multi-part archive format,
|
||||
# as by default, EKS will merge the bootstrapping command required for nodes to join the
|
||||
# cluster with your user data. If you use a custom AMI in your launch template,
|
||||
# this merging will NOT happen and you are responsible for nodes joining the cluster.
|
||||
# See docs for more details -> https://docs.aws.amazon.com/eks/latest/userguide/launch-templates.html#launch-template-user-data
|
||||
|
||||
data "cloudinit_config" "linux_eks_managed_node_group" {
|
||||
count = var.create && var.platform == "linux" && var.is_eks_managed_node_group && !var.enable_bootstrap_user_data && var.pre_bootstrap_user_data != "" && var.user_data_template_path == "" ? 1 : 0
|
||||
|
||||
base64_encode = true
|
||||
gzip = false
|
||||
boundary = "//"
|
||||
|
||||
# Prepend to existing user data suppled by AWS EKS
|
||||
part {
|
||||
content_type = "text/x-shellscript"
|
||||
content = var.pre_bootstrap_user_data
|
||||
}
|
||||
}
|
||||
4
modules/_user_data/outputs.tf
Normal file
4
modules/_user_data/outputs.tf
Normal file
@@ -0,0 +1,4 @@
|
||||
output "user_data" {
|
||||
description = "Base64 encoded user data rendered for the provided inputs"
|
||||
value = try(local.platform[var.platform].user_data, "")
|
||||
}
|
||||
71
modules/_user_data/variables.tf
Normal file
71
modules/_user_data/variables.tf
Normal file
@@ -0,0 +1,71 @@
|
||||
variable "create" {
|
||||
description = "Determines whether to create user-data or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "platform" {
|
||||
description = "Identifies if the OS platform is `bottlerocket`, `linux`, or `windows` based"
|
||||
type = string
|
||||
default = "linux"
|
||||
}
|
||||
|
||||
variable "enable_bootstrap_user_data" {
|
||||
description = "Determines whether the bootstrap configurations are populated within the user data template"
|
||||
type = bool
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "is_eks_managed_node_group" {
|
||||
description = "Determines whether the user data is used on nodes in an EKS managed node group. Used to determine if user data will be appended or not"
|
||||
type = bool
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_endpoint" {
|
||||
description = "Endpoint of associated EKS cluster"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_auth_base64" {
|
||||
description = "Base64 encoded CA of associated EKS cluster"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_service_ipv4_cidr" {
|
||||
description = "The CIDR block to assign Kubernetes service IP addresses from. If you don't specify a block, Kubernetes assigns addresses from either the 10.100.0.0/16 or 172.20.0.0/16 CIDR blocks"
|
||||
type = string
|
||||
default = null
|
||||
}
|
||||
|
||||
variable "pre_bootstrap_user_data" {
|
||||
description = "User data that is injected into the user data script ahead of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "post_bootstrap_user_data" {
|
||||
description = "User data that is appended to the user data script after of the EKS bootstrap script. Not used when `platform` = `bottlerocket`"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "bootstrap_extra_args" {
|
||||
description = "Additional arguments passed to the bootstrap script. When `platform` = `bottlerocket`; these are additional [settings](https://github.com/bottlerocket-os/bottlerocket#settings) that are provided to the Bottlerocket user data"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "user_data_template_path" {
|
||||
description = "Path to a local, custom user data template file to use when rendering user data"
|
||||
type = string
|
||||
default = ""
|
||||
}
|
||||
10
modules/_user_data/versions.tf
Normal file
10
modules/_user_data/versions.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13.1"
|
||||
|
||||
required_providers {
|
||||
cloudinit = {
|
||||
source = "hashicorp/cloudinit"
|
||||
version = ">= 2.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user