From 2bdf7d7dd6e4705fdfa267bed40e147bd9287a21 Mon Sep 17 00:00:00 2001 From: Anton Babenko Date: Thu, 16 Sep 2021 11:35:44 +0200 Subject: [PATCH] refactor: Refactoring to match the rest of terraform-aws-modules (#1583) --- .pre-commit-config.yaml | 32 ++- README.md | 144 +++++----- aws_auth.tf | 7 +- data.tf | 16 +- examples/README.md | 11 - examples/_bootstrap/README.md | 60 +++++ examples/_bootstrap/main.tf | 50 ++++ examples/_bootstrap/outputs.tf | 14 + examples/_bootstrap/variables.tf | 0 .../versions.tf | 3 +- examples/basic/main.tf | 138 ---------- examples/basic/variables.tf | 52 ---- examples/bottlerocket/README.md | 74 +++++- examples/bottlerocket/data.tf | 22 -- examples/bottlerocket/main.tf | 72 +++-- examples/{basic => bottlerocket}/outputs.tf | 12 +- examples/bottlerocket/variables.tf | 17 -- .../versions.tf | 5 +- examples/complete/README.md | 73 +++++ examples/complete/main.tf | 247 +++++++++++++++++ examples/complete/outputs.tf | 29 ++ examples/complete/variables.tf | 1 + examples/{basic => complete}/versions.tf | 2 +- examples/create_false/main.tf | 30 --- examples/create_false/variables.tf | 3 - examples/fargate/README.md | 68 +++++ examples/fargate/main.tf | 251 ++++++++++++------ examples/fargate/outputs.tf | 5 - examples/fargate/variables.tf | 52 ---- examples/fargate/versions.tf | 2 +- examples/instance_refresh/main.tf | 12 +- examples/instance_refresh/outputs.tf | 5 - examples/instance_refresh/variables.tf | 7 +- .../irsa/cluster-autoscaler-chart-values.yaml | 2 +- examples/irsa/irsa.tf | 5 +- examples/irsa/main.tf | 5 +- examples/irsa/outputs.tf | 3 +- examples/irsa/variables.tf | 3 - examples/launch_templates/main.tf | 5 +- examples/launch_templates/outputs.tf | 6 - examples/launch_templates/variables.tf | 4 - .../disk_encryption_policy.tf | 142 +++++----- .../launchtemplate.tf | 24 +- .../main.tf | 5 +- .../outputs.tf | 6 - .../variables.tf | 13 +- examples/managed_node_groups/main.tf | 5 +- examples/managed_node_groups/outputs.tf | 5 - examples/managed_node_groups/variables.tf | 4 - examples/secrets_encryption/main.tf | 5 +- examples/secrets_encryption/outputs.tf | 5 - examples/secrets_encryption/variables.tf | 4 - examples/secrets_encryption/versions.tf | 2 +- examples/spot_instances/main.tf | 61 ----- examples/spot_instances/variables.tf | 4 - fargate.tf | 29 +- irsa.tf | 5 +- kubectl.tf | 3 +- local.tf => locals.tf | 89 +++---- cluster.tf => main.tf | 68 +++-- modules/fargate/README.md | 15 +- modules/fargate/data.tf | 17 -- modules/fargate/fargate.tf | 33 --- modules/fargate/locals.tf | 10 - modules/fargate/main.tf | 67 +++++ modules/fargate/variables.tf | 36 +-- modules/node_groups/README.md | 20 +- .../node_groups/{node_groups.tf => main.tf} | 2 +- modules/node_groups/variables.tf | 10 +- modules/node_groups/versions.tf | 3 +- node_groups.tf | 19 +- outputs.tf | 12 +- variables.tf | 23 +- versions.tf | 1 + workers.tf | 69 +++-- workers_launch_template.tf | 22 +- 76 files changed, 1350 insertions(+), 1037 deletions(-) delete mode 100644 examples/README.md create mode 100644 examples/_bootstrap/README.md create mode 100644 examples/_bootstrap/main.tf create mode 100644 examples/_bootstrap/outputs.tf create mode 100644 examples/_bootstrap/variables.tf rename examples/{spot_instances => _bootstrap}/versions.tf (71%) delete mode 100644 examples/basic/main.tf delete mode 100644 examples/basic/variables.tf delete mode 100644 examples/bottlerocket/data.tf rename examples/{basic => bottlerocket}/outputs.tf (82%) rename examples/{create_false => bottlerocket}/versions.tf (52%) create mode 100644 examples/complete/README.md create mode 100644 examples/complete/main.tf create mode 100644 examples/complete/outputs.tf create mode 100644 examples/complete/variables.tf rename examples/{basic => complete}/versions.tf (85%) delete mode 100644 examples/create_false/main.tf delete mode 100644 examples/create_false/variables.tf create mode 100644 examples/fargate/README.md rename examples/{spot_instances => launch_templates_with_managed_node_groups}/outputs.tf (88%) delete mode 100644 examples/spot_instances/main.tf delete mode 100644 examples/spot_instances/variables.tf rename local.tf => locals.tf (83%) rename cluster.tf => main.tf (79%) delete mode 100644 modules/fargate/data.tf delete mode 100644 modules/fargate/fargate.tf delete mode 100644 modules/fargate/locals.tf create mode 100644 modules/fargate/main.tf rename modules/node_groups/{node_groups.tf => main.tf} (98%) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6396765..4fd440d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,8 +1,26 @@ repos: -- repo: git://github.com/antonbabenko/pre-commit-terraform - rev: v1.50.0 - hooks: - - id: terraform_fmt - - id: terraform_docs - - id: terraform_validate - - id: terraform_tflint + - repo: git://github.com/antonbabenko/pre-commit-terraform + rev: v1.50.0 + hooks: + - id: terraform_fmt + - id: terraform_validate + - id: terraform_docs + - id: terraform_tflint + args: + - '--args=--only=terraform_deprecated_interpolation' + - '--args=--only=terraform_deprecated_index' + - '--args=--only=terraform_unused_declarations' + - '--args=--only=terraform_comment_syntax' + - '--args=--only=terraform_documented_outputs' + - '--args=--only=terraform_documented_variables' + - '--args=--only=terraform_typed_variables' + - '--args=--only=terraform_module_pinned_source' +# - '--args=--only=terraform_naming_convention' + - '--args=--only=terraform_required_version' + - '--args=--only=terraform_required_providers' + - '--args=--only=terraform_standard_module_structure' + - '--args=--only=terraform_workspace_remote' + - repo: git://github.com/pre-commit/pre-commit-hooks + rev: v4.0.1 + hooks: + - id: check-merge-conflict diff --git a/README.md b/README.md index 06b05eb..505a872 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,54 @@ -# terraform-aws-eks +# AWS EKS Terraform module [![Lint Status](https://github.com/terraform-aws-modules/terraform-aws-eks/workflows/Lint/badge.svg)](https://github.com/terraform-aws-modules/terraform-aws-eks/actions) [![LICENSE](https://img.shields.io/github/license/terraform-aws-modules/terraform-aws-eks)](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/LICENSE) -A terraform module to create a managed Kubernetes cluster on AWS EKS. Available -through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). -Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) -and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). -Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html). +Terraform module which creates Kubernetes cluster resources on AWS EKS. -## Assumptions +## Features + +- Create an EKS cluster +- All node types are supported: + - [Managed Node Groups](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) + - [Self-managed Nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) + - [Fargate](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) +- Support AWS EKS Optimized or Custom AMI +- Create or manage security groups that allow communication and coordination -* You want to create an EKS cluster and an autoscaling group of workers for the cluster. -* You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module. -* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources. The VPC satisfies [EKS requirements](https://docs.aws.amazon.com/eks/latest/userguide/network_reqs.html). ## Important note -The `cluster_version` is the required variable. Kubernetes is evolving a lot, and each major version includes new features, fixes, or changes. +Kubernetes is evolving a lot, and each minor version includes new features, fixes, or changes. -**Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version.** +**Always check [Kubernetes Release Notes](https://kubernetes.io/docs/setup/release/notes/) before updating the major version, and [CHANGELOG.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) for all changes in this EKS module.** -You also need to ensure your applications and add ons are updated, or workloads could fail after the upgrade is complete. For action, you may need to take before upgrading, see the steps in the [EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). +You also need to ensure that your applications and add ons are updated, or workloads could fail after the upgrade is complete. For action, you may need to take before upgrading, see the steps in the [EKS documentation](https://docs.aws.amazon.com/eks/latest/userguide/update-cluster.html). -An example of harming update was the removal of several commonly used, but deprecated APIs, in Kubernetes 1.16. More information on the API removals, see the [Kubernetes blog post](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/). - -By default, this module manages the `aws-auth` configmap for you (`manage_aws_auth=true`). To avoid the following [issue](https://github.com/aws/containers-roadmap/issues/654) where the EKS creation is `ACTIVE` but not ready. We implemented a "retry" logic with a fork of the http provider https://github.com/terraform-aws-modules/terraform-provider-http. This fork adds the support of a self-signed CA certificate. The original PR can be found at https://github.com/hashicorp/terraform-provider-http/pull/29. - -Setting `instance_refresh_enabled` to true will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. Find the complete example here [instance_refresh](examples/instance_refresh). ## Usage example -A full example leveraging other community modules is contained in the [examples/basic directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/basic). - ```hcl -data "aws_eks_cluster" "cluster" { - name = module.my-cluster.cluster_id +data "aws_eks_cluster" "eks" { + name = module.eks.cluster_id } -data "aws_eks_cluster_auth" "cluster" { - name = module.my-cluster.cluster_id +data "aws_eks_cluster_auth" "eks" { + name = module.eks.cluster_id } provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false - version = "~> 1.9" + host = data.aws_eks_cluster.eks.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.eks.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.eks.token } -module "my-cluster" { +module "eks" { source = "terraform-aws-modules/eks/aws" + + cluster_version = "1.21" cluster_name = "my-cluster" - cluster_version = "1.17" - subnets = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] vpc_id = "vpc-1234556abcdef" + subnets = ["subnet-abcde012", "subnet-bcde012a", "subnet-fghi345a"] worker_groups = [ { @@ -65,56 +58,50 @@ module "my-cluster" { ] } ``` -## Conditional creation -Sometimes you need to have a way to create EKS resources conditionally but Terraform does not allow to use `count` inside `module` block, so the solution is to specify argument `create_eks`. +There is also a [complete example](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) which shows large set of features available in the module. -Using this feature _and_ having `manage_aws_auth=true` (the default) requires to set up the kubernetes provider in a way that allows the data sources to not exist. -```hcl -data "aws_eks_cluster" "cluster" { - count = var.create_eks ? 1 : 0 - name = module.eks.cluster_id -} +## Submodules -data "aws_eks_cluster_auth" "cluster" { - count = var.create_eks ? 1 : 0 - name = module.eks.cluster_id -} +Root module calls these modules which can also be used separately to create independent resources: -# In case of not creating the cluster, this will be an incompletely configured, unused provider, which poses no problem. -provider "kubernetes" { - host = element(concat(data.aws_eks_cluster.cluster[*].endpoint, [""]), 0) - cluster_ca_certificate = base64decode(element(concat(data.aws_eks_cluster.cluster[*].certificate_authority.0.data, [""]), 0)) - token = element(concat(data.aws_eks_cluster_auth.cluster[*].token, [""]), 0) - load_config_file = false - version = "1.10" -} +- [fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/modules/fargate) - creates Fargate profiles, see [examples/fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargat) for detailed examples. + -# This cluster will not be created -module "eks" { - source = "terraform-aws-modules/eks/aws" - create_eks = false - # ... omitted -} -``` +## Notes -## Other documentation +- By default, this module manages the `aws-auth` configmap for you (`manage_aws_auth=true`). To avoid the following [issue](https://github.com/aws/containers-roadmap/issues/654) where the EKS creation is `ACTIVE` but not ready. We implemented a "retry" logic with a [fork of the http provider](https://github.com/terraform-aws-modules/terraform-provider-http). This fork adds the support of a self-signed CA certificate. The original PR can be found [here](https://github.com/hashicorp/terraform-provider-http/pull/29). -* [Autoscaling](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/autoscaling.md): How to enable worker node autoscaling. -* [Enable Docker Bridge Network](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/enable-docker-bridge-network.md): How to enable the docker bridge network when using the EKS-optimized AMI, which disables it by default. -* [Spot instances](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/spot-instances.md): How to use spot instances with this module. -* [IAM Permissions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md): Minimum IAM permissions needed to setup EKS Cluster. -* [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md): Frequently Asked Questions +- Setting `instance_refresh_enabled = true` will recreate your worker nodes without draining them first. It is recommended to install [aws-node-termination-handler](https://github.com/aws/aws-node-termination-handler) for proper node draining. Find the complete example here [instance_refresh](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/instance_refresh). -## Doc generation -Code formatting and documentation for variables and outputs is generated using [pre-commit-terraform hooks](https://github.com/antonbabenko/pre-commit-terraform) which uses [terraform-docs](https://github.com/segmentio/terraform-docs). +## Documentation -Follow [these instructions](https://github.com/antonbabenko/pre-commit-terraform#how-to-install) to install pre-commit locally. +### Official docs -And install `terraform-docs` with `go get github.com/segmentio/terraform-docs` or `brew install terraform-docs`. +- [Amazon Elastic Kubernetes Service (Amazon EKS)](https://docs.aws.amazon.com/eks/latest/userguide/). + +### Module docs + +- [Autoscaling](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/autoscaling.md): How to enable worker node autoscaling. +- [Enable Docker Bridge Network](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/enable-docker-bridge-network.md): How to enable the docker bridge network when using the EKS-optimized AMI, which disables it by default. +- [Spot instances](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/spot-instances.md): How to use spot instances with this module. +- [IAM Permissions](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md): Minimum IAM permissions needed to setup EKS Cluster. +- [FAQ](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/faq.md): Frequently Asked Questions + + +## Examples + +There are detailed examples available for you to see how certain features of this module can be used in a straightforward way. Make sure to check them and run them before opening an issue. [Here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md) you can find the list of the minimum IAM Permissions required to create EKS cluster. + +- [Complete](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/complete) - Create EKS Cluster with all available workers types in various combinations with many of supported features. +- [Bottlerocket](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/bottlerocket) - Create EKS cluster using [Bottlerocket AMI](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html). +- [Fargate](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/fargate) - Create EKS cluster with [Fargate profiles](https://docs.aws.amazon.com/eks/latest/userguide/fargate.html) and attach Fargate profiles to an existing EKS cluster. + ## Contributing @@ -122,16 +109,10 @@ Report issues/questions/feature requests on in the [issues](https://github.com/t Full contributing [guidelines are covered here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/.github/CONTRIBUTING.md). -## Change log - -- The [changelog](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.md) captures all important release notes from v11.0.0 -- For older release notes, refer to [changelog.pre-v11.0.0.md](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/CHANGELOG.pre-v11.0.0.md) ## Authors -Created by [Brandon O'Connor](https://github.com/brandoconnor) - brandon@atscale.run. -Maintained by [Max Williams](https://github.com/max-rocket-internet) and [Thierno IB. BARRY](https://github.com/barryib). -Many thanks to [the contributors listed here](https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors)! +This module has been originally created by [Brandon O'Connor](https://github.com/brandoconnor), and was maintained by [Max Williams](https://github.com/max-rocket-internet), [Thierno IB. BARRY](https://github.com/barryib) and many more [contributors listed here](https://github.com/terraform-aws-modules/terraform-aws-eks/graphs/contributors)! ## License @@ -144,6 +125,7 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf |------|---------| | [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.56.0 | +| [cloudinit](#requirement\_cloudinit) | >= 2.0 | | [http](#requirement\_http) | >= 2.4.1 | | [kubernetes](#requirement\_kubernetes) | >= 1.11.1 | | [local](#requirement\_local) | >= 1.4 | @@ -236,13 +218,14 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf | [cluster\_iam\_role\_name](#input\_cluster\_iam\_role\_name) | IAM role name for the cluster. If manage\_cluster\_iam\_resources is set to false, set this to reuse an existing IAM role. If manage\_cluster\_iam\_resources is set to true, set this to force the created role name. | `string` | `""` | no | | [cluster\_log\_kms\_key\_id](#input\_cluster\_log\_kms\_key\_id) | If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html) | `string` | `""` | no | | [cluster\_log\_retention\_in\_days](#input\_cluster\_log\_retention\_in\_days) | Number of days to retain log events. Default retention - 90 days. | `number` | `90` | no | -| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. Also used as a prefix in names of related resources. | `string` | n/a | yes | +| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. Also used as a prefix in names of related resources. | `string` | `""` | no | | [cluster\_security\_group\_id](#input\_cluster\_security\_group\_id) | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the workers | `string` | `""` | no | | [cluster\_service\_ipv4\_cidr](#input\_cluster\_service\_ipv4\_cidr) | service ipv4 cidr for the kubernetes cluster | `string` | `null` | no | | [cluster\_tags](#input\_cluster\_tags) | A map of tags to add to just the eks resource. | `map(string)` | `{}` | no | -| [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for the EKS cluster. | `string` | n/a | yes | +| [cluster\_version](#input\_cluster\_version) | Kubernetes version to use for the EKS cluster. | `string` | `null` | no | | [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no | | [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the EKS Fargate pod execution IAM role should be created. | `bool` | `true` | no | +| [default\_platform](#input\_default\_platform) | Default platform name. Valid options are `linux` and `windows`. | `string` | `"linux"` | no | | [eks\_oidc\_root\_ca\_thumbprint](#input\_eks\_oidc\_root\_ca\_thumbprint) | Thumbprint of Root CA for EKS OIDC, Valid until 2037 | `string` | `"9e99a48a9960b14926bb7f3b02e22da2b0ab7280"` | no | | [enable\_irsa](#input\_enable\_irsa) | Whether to create OpenID Connect Provider for EKS to enable IRSA | `bool` | `false` | no | | [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no | @@ -266,10 +249,9 @@ Apache 2 Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraf | [node\_groups\_defaults](#input\_node\_groups\_defaults) | Map of values to be applied to all node groups. See `node_groups` module's documentation for more details | `any` | `{}` | no | | [openid\_connect\_audiences](#input\_openid\_connect\_audiences) | List of OpenID Connect audience client IDs to add to the IRSA provider. | `list(string)` | `[]` | no | | [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no | -| [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | n/a | yes | +| [subnets](#input\_subnets) | A list of subnets to place the EKS cluster and workers within. | `list(string)` | `[]` | no | | [tags](#input\_tags) | A map of tags to add to all resources. Tags added to launch configuration or templates override these values for ASG Tags only. | `map(string)` | `{}` | no | -| [timeouts](#input\_timeouts) | A map of timeouts for create/update/delete operations. | `map(string)` | `{}` | no | -| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | n/a | yes | +| [vpc\_id](#input\_vpc\_id) | VPC where the cluster and workers will be deployed. | `string` | `null` | no | | [wait\_for\_cluster\_timeout](#input\_wait\_for\_cluster\_timeout) | A timeout (in seconds) to wait for cluster to be available. | `number` | `300` | no | | [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no | | [worker\_ami\_name\_filter](#input\_worker\_ami\_name\_filter) | Name filter for AWS EKS worker AMI. If not provided, the latest official AMI for the specified 'cluster\_version' is used. | `string` | `""` | no | diff --git a/aws_auth.tf b/aws_auth.tf index 6eb5632..6de6976 100644 --- a/aws_auth.tf +++ b/aws_auth.tf @@ -18,7 +18,7 @@ locals { ] auth_worker_roles = [ - for index in range(0, var.create_eks ? local.worker_group_count : 0) : { + for index in range(0, var.create_eks ? local.worker_group_launch_configuration_count : 0) : { worker_role_arn = "arn:${data.aws_partition.current.partition}:iam::${data.aws_caller_identity.current.account_id}:role/${element( coalescelist( aws_iam_instance_profile.workers.*.role, @@ -61,8 +61,7 @@ locals { } resource "kubernetes_config_map" "aws_auth" { - count = var.create_eks && var.manage_aws_auth ? 1 : 0 - depends_on = [data.http.wait_for_cluster[0]] + count = var.create_eks && var.manage_aws_auth ? 1 : 0 metadata { name = "aws-auth" @@ -88,4 +87,6 @@ resource "kubernetes_config_map" "aws_auth" { mapUsers = yamlencode(var.map_users) mapAccounts = yamlencode(var.map_accounts) } + + depends_on = [data.http.wait_for_cluster[0]] } diff --git a/data.tf b/data.tf index bc80e74..fe3b6e3 100644 --- a/data.tf +++ b/data.tf @@ -18,7 +18,7 @@ data "aws_iam_policy_document" "workers_assume_role_policy" { } data "aws_ami" "eks_worker" { - count = local.worker_has_linux_ami ? 1 : 0 + count = contains(local.worker_groups_platforms, "linux") ? 1 : 0 filter { name = "name" @@ -31,7 +31,7 @@ data "aws_ami" "eks_worker" { } data "aws_ami" "eks_worker_windows" { - count = local.worker_has_windows_ami ? 1 : 0 + count = contains(local.worker_groups_platforms, "windows") ? 1 : 0 filter { name = "name" @@ -65,11 +65,13 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" { data "aws_iam_role" "custom_cluster_iam_role" { count = var.manage_cluster_iam_resources ? 0 : 1 - name = var.cluster_iam_role_name + + name = var.cluster_iam_role_name } data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" { - count = var.manage_worker_iam_resources ? 0 : local.worker_group_count + count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_configuration_count + name = lookup( var.worker_groups[count.index], "iam_instance_profile_name", @@ -79,6 +81,7 @@ data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" { data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" { count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count + name = lookup( var.worker_groups_launch_template[count.index], "iam_instance_profile_name", @@ -87,9 +90,10 @@ data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instanc } data "http" "wait_for_cluster" { - count = var.create_eks && var.manage_aws_auth ? 1 : 0 + count = var.create_eks && var.manage_aws_auth ? 1 : 0 + url = format("%s/healthz", aws_eks_cluster.this[0].endpoint) - ca_certificate = base64decode(coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0]) + ca_certificate = base64decode(local.cluster_auth_base64) timeout = var.wait_for_cluster_timeout depends_on = [ diff --git a/examples/README.md b/examples/README.md deleted file mode 100644 index 7ee5beb..0000000 --- a/examples/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# Examples - -These serve a few purposes: - -1. Shows developers how to use the module in a straightforward way as integrated with other terraform community supported modules. -2. Serves as the test infrastructure for CI on the project. -3. Provides a simple way to play with the Kubernetes cluster you create. - -## IAM Permissions - -You can see the minimum IAM Permissions required [here](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/iam-permissions.md). diff --git a/examples/_bootstrap/README.md b/examples/_bootstrap/README.md new file mode 100644 index 0000000..28e34f9 --- /dev/null +++ b/examples/_bootstrap/README.md @@ -0,0 +1,60 @@ +# Various bootstrap resources required for other EKS examples + +Configuration in this directory creates some resources required in other EKS examples (such as VPC). + +The resources created here are free (no NAT gateways here) and they can reside in test AWS account. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | >= 1.11 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [random](#provider\_random) | >= 2.1 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 | + +## Resources + +| Name | Type | +|------|------| +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_name](#output\_cluster\_name) | Name of EKS Cluster used in tags for subnets | +| [region](#output\_region) | AWS region | +| [vpc](#output\_vpc) | Complete output of VPC module | + diff --git a/examples/_bootstrap/main.tf b/examples/_bootstrap/main.tf new file mode 100644 index 0000000..8492741 --- /dev/null +++ b/examples/_bootstrap/main.tf @@ -0,0 +1,50 @@ +provider "aws" { + region = local.region +} + +locals { + region = "eu-west-1" + name = "bootstrap-example" + vpc_cidr = "10.0.0.0/16" + + cluster_name = "test-eks-${random_string.suffix.result}" +} + +data "aws_availability_zones" "available" {} + +resource "random_string" "suffix" { + length = 8 + special = false +} + +################################################################################ +# Supporting Resources +################################################################################ + +module "vpc" { + source = "terraform-aws-modules/vpc/aws" + version = "~> 3.0" + + name = local.name + cidr = "10.0.0.0/16" + + azs = data.aws_availability_zones.available.names + public_subnets = [for k, v in data.aws_availability_zones.available.names : cidrsubnet(local.vpc_cidr, 8, k)] + private_subnets = [for k, v in data.aws_availability_zones.available.names : cidrsubnet(local.vpc_cidr, 8, k + 10)] + + # NAT Gateway is disabled in the examples primarily to save costs and be able to recreate VPC faster. + enable_nat_gateway = false + single_nat_gateway = false + + enable_dns_hostnames = true + + public_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/elb" = "1" + } + + private_subnet_tags = { + "kubernetes.io/cluster/${local.cluster_name}" = "shared" + "kubernetes.io/role/internal-elb" = "1" + } +} diff --git a/examples/_bootstrap/outputs.tf b/examples/_bootstrap/outputs.tf new file mode 100644 index 0000000..87a2e49 --- /dev/null +++ b/examples/_bootstrap/outputs.tf @@ -0,0 +1,14 @@ +output "region" { + description = "AWS region" + value = local.region +} + +output "cluster_name" { + description = "Name of EKS Cluster used in tags for subnets" + value = local.cluster_name +} + +output "vpc" { + description = "Complete output of VPC module" + value = module.vpc +} diff --git a/examples/_bootstrap/variables.tf b/examples/_bootstrap/variables.tf new file mode 100644 index 0000000..e69de29 diff --git a/examples/spot_instances/versions.tf b/examples/_bootstrap/versions.tf similarity index 71% rename from examples/spot_instances/versions.tf rename to examples/_bootstrap/versions.tf index 6e29ae8..5539f13 100644 --- a/examples/spot_instances/versions.tf +++ b/examples/_bootstrap/versions.tf @@ -3,8 +3,7 @@ terraform { required_providers { aws = ">= 3.22.0" - local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = ">= 1.11" } } diff --git a/examples/basic/main.tf b/examples/basic/main.tf deleted file mode 100644 index c6257a2..0000000 --- a/examples/basic/main.tf +++ /dev/null @@ -1,138 +0,0 @@ -provider "aws" { - region = var.region -} - -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} - -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false -} - -data "aws_availability_zones" "available" { -} - -locals { - cluster_name = "test-eks-${random_string.suffix.result}" -} - -resource "random_string" "suffix" { - length = 8 - special = false -} - -resource "aws_security_group" "worker_group_mgmt_one" { - name_prefix = "worker_group_mgmt_one" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - - cidr_blocks = [ - "10.0.0.0/8", - ] - } -} - -resource "aws_security_group" "worker_group_mgmt_two" { - name_prefix = "worker_group_mgmt_two" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - - cidr_blocks = [ - "192.168.0.0/16", - ] - } -} - -resource "aws_security_group" "all_worker_mgmt" { - name_prefix = "all_worker_management" - vpc_id = module.vpc.vpc_id - - ingress { - from_port = 22 - to_port = 22 - protocol = "tcp" - - cidr_blocks = [ - "10.0.0.0/8", - "172.16.0.0/12", - "192.168.0.0/16", - ] - } -} - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" - - name = "test-vpc" - cidr = "10.0.0.0/16" - azs = data.aws_availability_zones.available.names - private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"] - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - } -} - -module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.private_subnets - - tags = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } - - vpc_id = module.vpc.vpc_id - - worker_groups = [ - { - name = "worker-group-1" - instance_type = "t3.small" - additional_userdata = "echo foo bar" - asg_desired_capacity = 2 - additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id] - }, - { - name = "worker-group-2" - instance_type = "t3.medium" - additional_userdata = "echo foo bar" - additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id] - asg_desired_capacity = 1 - }, - ] - - worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id] - map_roles = var.map_roles - map_users = var.map_users - map_accounts = var.map_accounts -} diff --git a/examples/basic/variables.tf b/examples/basic/variables.tf deleted file mode 100644 index 7085aea..0000000 --- a/examples/basic/variables.tf +++ /dev/null @@ -1,52 +0,0 @@ -variable "region" { - default = "us-west-2" -} - -variable "map_accounts" { - description = "Additional AWS account numbers to add to the aws-auth configmap." - type = list(string) - - default = [ - "777777777777", - "888888888888", - ] -} - -variable "map_roles" { - description = "Additional IAM roles to add to the aws-auth configmap." - type = list(object({ - rolearn = string - username = string - groups = list(string) - })) - - default = [ - { - rolearn = "arn:aws:iam::66666666666:role/role1" - username = "role1" - groups = ["system:masters"] - }, - ] -} - -variable "map_users" { - description = "Additional IAM users to add to the aws-auth configmap." - type = list(object({ - userarn = string - username = string - groups = list(string) - })) - - default = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] -} diff --git a/examples/bottlerocket/README.md b/examples/bottlerocket/README.md index cc73eb7..f51a0b6 100644 --- a/examples/bottlerocket/README.md +++ b/examples/bottlerocket/README.md @@ -1,7 +1,71 @@ -# AWS Bottlerocket based nodes +# AWS EKS cluster running Bottlerocket AMI -This is a minimalistic example that shows how to use functionality of this module to deploy -nodes based on [AWS Bottlerocket container OS](https://github.com/bottlerocket-os/bottlerocket) +Configuration in this directory creates EKS cluster with nodes running [AWS Bottlerocket OS](https://github.com/bottlerocket-os/bottlerocket) -Example is minimalistic by purpose - it shows what knobs to turn to make Bottlerocket work. -Do not use default VPC for your workloads deployment. \ No newline at end of file +This is a minimalistic example which shows what knobs to turn to make Bottlerocket work. + +See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/eks-optimized-ami-bottlerocket.html) for more details. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [random](#requirement\_random) | >= 2.1 | +| [tls](#requirement\_tls) | >= 2.0 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [random](#provider\_random) | >= 2.1 | +| [tls](#provider\_tls) | >= 2.0 | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [eks](#module\_eks) | ../.. | | + +## Resources + +| Name | Type | +|------|------| +| [aws_iam_role_policy_attachment.ssm](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | +| [aws_key_pair.nodes](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/key_pair) | resource | +| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource | +| [tls_private_key.nodes](https://registry.terraform.io/providers/hashicorp/tls/latest/docs/resources/private_key) | resource | +| [aws_ami.bottlerocket_ami](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/ami) | data source | +| [aws_subnet_ids.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/subnet_ids) | data source | +| [aws_vpc.default](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | +| [node\_groups](#output\_node\_groups) | Outputs from node groups | +| [region](#output\_region) | AWS region. | + diff --git a/examples/bottlerocket/data.tf b/examples/bottlerocket/data.tf deleted file mode 100644 index bf380b3..0000000 --- a/examples/bottlerocket/data.tf +++ /dev/null @@ -1,22 +0,0 @@ -data "aws_ami" "bottlerocket_ami" { - most_recent = true - owners = ["amazon"] - filter { - name = "name" - values = ["bottlerocket-aws-k8s-${var.k8s_version}-x86_64-*"] - } -} - -data "aws_region" "current" {} - -data "aws_vpc" "default" { - default = true -} - -data "aws_subnet_ids" "default" { - vpc_id = data.aws_vpc.default.id -} - -data "aws_iam_policy" "ssm" { - arn = "arn:aws:iam::aws:policy/AmazonSSMManagedInstanceCore" -} \ No newline at end of file diff --git a/examples/bottlerocket/main.tf b/examples/bottlerocket/main.tf index b254af0..9631ef5 100644 --- a/examples/bottlerocket/main.tf +++ b/examples/bottlerocket/main.tf @@ -1,31 +1,27 @@ -terraform { - required_version = ">= 0.13.0" +provider "aws" { + region = local.region } -resource "tls_private_key" "nodes" { - algorithm = "RSA" -} - -resource "aws_key_pair" "nodes" { - key_name = "bottlerocket-nodes" - public_key = tls_private_key.nodes.public_key_openssh +locals { + region = "eu-west-1" + k8s_version = "1.21" } module "eks" { - source = "../.." - cluster_name = "bottlerocket" - cluster_version = var.k8s_version - subnets = data.aws_subnet_ids.default.ids + source = "../.." - vpc_id = data.aws_vpc.default.id + cluster_name = "bottlerocket-${random_string.suffix.result}" + cluster_version = local.k8s_version + + vpc_id = data.aws_vpc.default.id + subnets = data.aws_subnet_ids.default.ids write_kubeconfig = false manage_aws_auth = false worker_groups_launch_template = [ { - name = "bottlerocket-nodes" - # passing bottlerocket ami id + name = "bottlerocket-nodes" ami_id = data.aws_ami.bottlerocket_ami.id instance_type = "t3a.small" asg_desired_capacity = 2 @@ -42,9 +38,9 @@ module "eks" { # we are using this section to pass additional arguments for # userdata template rendering userdata_template_extra_args = { - enable_admin_container = var.enable_admin_container - enable_control_container = var.enable_control_container - aws_region = data.aws_region.current.name + enable_admin_container = false + enable_control_container = true + aws_region = local.region } # example of k8s/kubelet configuration via additional_userdata additional_userdata = < +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | >= 1.11 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [terraform](#provider\_terraform) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [disabled\_eks](#module\_disabled\_eks) | ../.. | | +| [disabled\_fargate](#module\_disabled\_fargate) | ../../modules/fargate | | +| [disabled\_node\_groups](#module\_disabled\_node\_groups) | ../../modules/node_groups | | +| [eks](#module\_eks) | ../.. | | + +## Resources + +| Name | Type | +|------|------| +| [aws_security_group.all_worker_mgmt](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group.worker_group_mgmt_one](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_security_group.worker_group_mgmt_two](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/security_group) | resource | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [terraform_remote_state.bootstrap](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | +| [node\_groups](#output\_node\_groups) | Outputs from node groups | +| [region](#output\_region) | AWS region. | + diff --git a/examples/complete/main.tf b/examples/complete/main.tf new file mode 100644 index 0000000..c0e2c8b --- /dev/null +++ b/examples/complete/main.tf @@ -0,0 +1,247 @@ +provider "aws" { + region = local.region +} + +module "eks" { + source = "../.." + + cluster_name = local.cluster_name + cluster_version = "1.21" + + vpc_id = local.vpc.vpc_id + subnets = [local.vpc.private_subnets[0], local.vpc.public_subnets[1]] + fargate_subnets = [local.vpc.private_subnets[2]] + + worker_additional_security_group_ids = [aws_security_group.all_worker_mgmt.id] + + # Worker groups (using Launch Configurations) + worker_groups = [ + { + name = "worker-group-1" + instance_type = "t3.small" + additional_userdata = "echo foo bar" + asg_desired_capacity = 2 + additional_security_group_ids = [aws_security_group.worker_group_mgmt_one.id] + }, + { + name = "worker-group-2" + instance_type = "t3.medium" + additional_userdata = "echo foo bar" + additional_security_group_ids = [aws_security_group.worker_group_mgmt_two.id] + asg_desired_capacity = 1 + }, + ] + + # Worker groups (using Launch Templates) + worker_groups_launch_template = [ + { + name = "spot-1" + override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] + spot_instance_pools = 4 + asg_max_size = 5 + asg_desired_capacity = 5 + kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot" + public_ip = true + }, + ] + + # Managed Node Groups + node_groups_defaults = { + ami_type = "AL2_x86_64" + disk_size = 50 + } + + node_groups = { + example = { + desired_capacity = 1 + max_capacity = 10 + min_capacity = 1 + + instance_types = ["t3.large"] + capacity_type = "SPOT" + k8s_labels = { + Environment = "test" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + additional_tags = { + ExtraTag = "example" + } + taints = [ + { + key = "dedicated" + value = "gpuGroup" + effect = "NO_SCHEDULE" + } + ] + update_config = { + max_unavailable_percentage = 50 # or set `max_unavailable` + } + } + } + + # Fargate + fargate_profiles = { + default = { + name = "default" + selectors = [ + { + namespace = "kube-system" + labels = { + k8s-app = "kube-dns" + } + }, + { + namespace = "default" + } + ] + + tags = { + Owner = "test" + } + } + } + + # AWS Auth (kubernetes_config_map) + map_roles = [ + { + rolearn = "arn:aws:iam::66666666666:role/role1" + username = "role1" + groups = ["system:masters"] + }, + ] + + map_users = [ + { + userarn = "arn:aws:iam::66666666666:user/user1" + username = "user1" + groups = ["system:masters"] + }, + { + userarn = "arn:aws:iam::66666666666:user/user2" + username = "user2" + groups = ["system:masters"] + }, + ] + + map_accounts = [ + "777777777777", + "888888888888", + ] + + tags = { + Environment = "test" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +#################### +# Disabled creation +#################### + +module "disabled_eks" { + source = "../.." + + create_eks = false +} + +module "disabled_fargate" { + source = "../../modules/fargate" + + create_fargate_pod_execution_role = false +} + +module "disabled_node_groups" { + source = "../../modules/node_groups" + + create_eks = false +} + +############# +# Kubernetes +############# + +data "aws_eks_cluster" "cluster" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.eks.cluster_id +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token +} + +################################################################################ +# Supporting resources +################################################################################ + +resource "aws_security_group" "worker_group_mgmt_one" { + name_prefix = "worker_group_mgmt_one" + vpc_id = local.vpc.vpc_id + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + + cidr_blocks = [ + "10.0.0.0/8", + ] + } +} + +resource "aws_security_group" "worker_group_mgmt_two" { + name_prefix = "worker_group_mgmt_two" + vpc_id = local.vpc.vpc_id + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + + cidr_blocks = [ + "192.168.0.0/16", + ] + } +} + +resource "aws_security_group" "all_worker_mgmt" { + name_prefix = "all_worker_management" + vpc_id = local.vpc.vpc_id + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + + cidr_blocks = [ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + ] + } +} + + +################################################################################ +# Supporting resources (managed in "_bootstrap" directory) +################################################################################ + +data "terraform_remote_state" "bootstrap" { + backend = "local" + + config = { + path = "../_bootstrap/terraform.tfstate" + } +} + +locals { + region = data.terraform_remote_state.bootstrap.outputs.region + cluster_name = data.terraform_remote_state.bootstrap.outputs.cluster_name + vpc = data.terraform_remote_state.bootstrap.outputs.vpc +} diff --git a/examples/complete/outputs.tf b/examples/complete/outputs.tf new file mode 100644 index 0000000..3564102 --- /dev/null +++ b/examples/complete/outputs.tf @@ -0,0 +1,29 @@ +output "region" { + description = "AWS region." + value = local.region +} + +output "cluster_endpoint" { + description = "Endpoint for EKS control plane." + value = module.eks.cluster_endpoint +} + +output "cluster_security_group_id" { + description = "Security group ids attached to the cluster control plane." + value = module.eks.cluster_security_group_id +} + +output "kubectl_config" { + description = "kubectl config as generated by the module." + value = module.eks.kubeconfig +} + +output "config_map_aws_auth" { + description = "A kubernetes configuration to authenticate to this EKS cluster." + value = module.eks.config_map_aws_auth +} + +output "node_groups" { + description = "Outputs from node groups" + value = module.eks.node_groups +} diff --git a/examples/complete/variables.tf b/examples/complete/variables.tf new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/examples/complete/variables.tf @@ -0,0 +1 @@ + diff --git a/examples/basic/versions.tf b/examples/complete/versions.tf similarity index 85% rename from examples/basic/versions.tf rename to examples/complete/versions.tf index 6e29ae8..120d873 100644 --- a/examples/basic/versions.tf +++ b/examples/complete/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = ">= 1.11" } } diff --git a/examples/create_false/main.tf b/examples/create_false/main.tf deleted file mode 100644 index 0afffcd..0000000 --- a/examples/create_false/main.tf +++ /dev/null @@ -1,30 +0,0 @@ -provider "aws" { - region = var.region -} - -data "aws_eks_cluster" "cluster" { - count = 0 - name = module.eks.cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - count = 0 - name = module.eks.cluster_id -} - -provider "kubernetes" { - host = element(concat(data.aws_eks_cluster.cluster[*].endpoint, [""]), 0) - cluster_ca_certificate = base64decode(element(concat(data.aws_eks_cluster.cluster[*].certificate_authority.0.data, [""]), 0)) - token = element(concat(data.aws_eks_cluster_auth.cluster[*].token, [""]), 0) - load_config_file = false -} - -module "eks" { - source = "../.." - create_eks = false - cluster_version = "" - - vpc_id = "" - cluster_name = "" - subnets = [] -} diff --git a/examples/create_false/variables.tf b/examples/create_false/variables.tf deleted file mode 100644 index 81b8dbe..0000000 --- a/examples/create_false/variables.tf +++ /dev/null @@ -1,3 +0,0 @@ -variable "region" { - default = "us-west-2" -} diff --git a/examples/fargate/README.md b/examples/fargate/README.md new file mode 100644 index 0000000..6abd31b --- /dev/null +++ b/examples/fargate/README.md @@ -0,0 +1,68 @@ +# AWS EKS Cluster with Fargate profiles + +Configuration in this directory creates EKS cluster with Fargate profiles in two different ways: +- Using a root module, where EKS Cluster and Fargate profiles should be created at once. This is the default behaviour for most users. +- Using `modules/fargate` submodule where Fargate profiles should be attached to the barebone EKS Cluster. + +## Usage + +To run this example you need to execute: + +```bash +$ terraform init +$ terraform plan +$ terraform apply +``` + +Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources. + + +## Requirements + +| Name | Version | +|------|---------| +| [terraform](#requirement\_terraform) | >= 0.13.1 | +| [aws](#requirement\_aws) | >= 3.22.0 | +| [kubernetes](#requirement\_kubernetes) | >= 1.11 | +| [local](#requirement\_local) | >= 1.4 | +| [random](#requirement\_random) | >= 2.1 | + +## Providers + +| Name | Version | +|------|---------| +| [aws](#provider\_aws) | >= 3.22.0 | +| [terraform](#provider\_terraform) | n/a | + +## Modules + +| Name | Source | Version | +|------|--------|---------| +| [barebone\_eks](#module\_barebone\_eks) | ../.. | | +| [eks](#module\_eks) | ../.. | | +| [fargate\_profile\_existing\_cluster](#module\_fargate\_profile\_existing\_cluster) | ../../modules/fargate | | + +## Resources + +| Name | Type | +|------|------| +| [aws_eks_cluster.barebone](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source | +| [aws_eks_cluster_auth.barebone](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source | +| [terraform_remote_state.bootstrap](https://registry.terraform.io/providers/hashicorp/terraform/latest/docs/data-sources/remote_state) | data source | + +## Inputs + +No inputs. + +## Outputs + +| Name | Description | +|------|-------------| +| [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. | +| [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. | +| [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. | +| [fargate\_profile\_arns](#output\_fargate\_profile\_arns) | Outputs from node groups | +| [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. | + diff --git a/examples/fargate/main.tf b/examples/fargate/main.tf index 254793f..0901843 100644 --- a/examples/fargate/main.tf +++ b/examples/fargate/main.tf @@ -1,76 +1,16 @@ -terraform { - required_version = ">= 0.12.6" -} - provider "aws" { - region = var.region -} - -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} - -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false -} - -data "aws_availability_zones" "available" { -} - -locals { - cluster_name = "test-eks-${random_string.suffix.result}" -} - -resource "random_string" "suffix" { - length = 8 - special = false -} - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" - - name = "test-vpc" - cidr = "172.16.0.0/16" - azs = data.aws_availability_zones.available.names - private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"] - public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"] - enable_nat_gateway = true - single_nat_gateway = true - enable_dns_hostnames = true - - public_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/elb" = "1" - } - - private_subnet_tags = { - "kubernetes.io/cluster/${local.cluster_name}" = "shared" - "kubernetes.io/role/internal-elb" = "1" - } + region = local.region } module "eks" { - source = "../.." + source = "../.." + cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = [module.vpc.private_subnets[0], module.vpc.public_subnets[1]] - fargate_subnets = [module.vpc.private_subnets[2]] + cluster_version = "1.21" - tags = { - Environment = "test" - GithubRepo = "terraform-aws-eks" - GithubOrg = "terraform-aws-modules" - } - - vpc_id = module.vpc.vpc_id + vpc_id = local.vpc.vpc_id + subnets = [local.vpc.private_subnets[0], local.vpc.public_subnets[1]] + fargate_subnets = [local.vpc.private_subnets[2]] fargate_profiles = { default = { @@ -84,25 +24,178 @@ module "eks" { }, { namespace = "default" - # Kubernetes labels for selection - # labels = { - # Environment = "test" - # GithubRepo = "terraform-aws-eks" - # GithubOrg = "terraform-aws-modules" - # } + labels = { + WorkerType = "fargate" + } } ] - # using specific subnets instead of all the ones configured in eks - # subnets = ["subnet-0ca3e3d1234a56c78"] + tags = { + Owner = "default" + } + } + + secondary = { + name = "secondary" + selectors = [ + { + namespace = "default" + labels = { + Environment = "test" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } + } + ] + + # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`) + subnets = [local.vpc.private_subnets[1]] tags = { - Owner = "test" + Owner = "secondary" } } } - map_roles = var.map_roles - map_users = var.map_users - map_accounts = var.map_accounts + manage_aws_auth = false + + tags = { + Environment = "test" + GithubRepo = "terraform-aws-eks" + GithubOrg = "terraform-aws-modules" + } +} + +############################################## +# Calling submodule with existing EKS cluster +############################################## + +module "fargate_profile_existing_cluster" { + source = "../../modules/fargate" + + cluster_name = module.barebone_eks.cluster_id + subnets = [local.vpc.private_subnets[0], local.vpc.private_subnets[1]] + + fargate_profiles = { + profile1 = { + name = "profile1" + selectors = [ + { + namespace = "kube-system" + labels = { + k8s-app = "kube-dns" + } + }, + { + namespace = "profile" + labels = { + WorkerType = "fargate" + } + } + ] + + tags = { + Owner = "profile1" + } + } + + profile2 = { + name = "profile2" + selectors = [ + { + namespace = "default" + labels = { + Fargate = "profile2" + } + } + ] + + # Using specific subnets instead of the ones configured in EKS (`subnets` and `fargate_subnets`) + subnets = [local.vpc.private_subnets[1]] + + tags = { + Owner = "profile2" + } + } + } + + tags = { + DoYouLoveFargate = "Yes" + } +} + +############# +# Kubernetes +############# + +data "aws_eks_cluster" "cluster" { + name = module.eks.cluster_id +} + +data "aws_eks_cluster_auth" "cluster" { + name = module.eks.cluster_id +} + +provider "kubernetes" { + host = data.aws_eks_cluster.cluster.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.cluster.token +} + +############################################################ +# Barebone EKS Cluster where submodules can add extra stuff +############################################################ + +module "barebone_eks" { + source = "../.." + + cluster_name = "barebone-${local.cluster_name}" + cluster_version = "1.21" + + vpc_id = local.vpc.vpc_id + subnets = local.vpc.private_subnets + + tags = { + Environment = "test" + Barebone = "yes_please" + } +} + +############# +# Kubernetes +############# + +data "aws_eks_cluster" "barebone" { + name = module.barebone_eks.cluster_id +} + +data "aws_eks_cluster_auth" "barebone" { + name = module.barebone_eks.cluster_id +} + +provider "kubernetes" { + alias = "barebone" + + host = data.aws_eks_cluster.barebone.endpoint + cluster_ca_certificate = base64decode(data.aws_eks_cluster.barebone.certificate_authority[0].data) + token = data.aws_eks_cluster_auth.barebone.token +} + + +################################################################################ +# Supporting resources (managed in "_bootstrap" directory) +################################################################################ + +data "terraform_remote_state" "bootstrap" { + backend = "local" + + config = { + path = "../_bootstrap/terraform.tfstate" + } +} + +locals { + region = data.terraform_remote_state.bootstrap.outputs.region + cluster_name = data.terraform_remote_state.bootstrap.outputs.cluster_name + vpc = data.terraform_remote_state.bootstrap.outputs.vpc } diff --git a/examples/fargate/outputs.tf b/examples/fargate/outputs.tf index 59aa57a..b7f23ee 100644 --- a/examples/fargate/outputs.tf +++ b/examples/fargate/outputs.tf @@ -18,11 +18,6 @@ output "config_map_aws_auth" { value = module.eks.config_map_aws_auth } -output "region" { - description = "AWS region." - value = var.region -} - output "fargate_profile_arns" { description = "Outputs from node groups" value = module.eks.fargate_profile_arns diff --git a/examples/fargate/variables.tf b/examples/fargate/variables.tf index 7085aea..e69de29 100644 --- a/examples/fargate/variables.tf +++ b/examples/fargate/variables.tf @@ -1,52 +0,0 @@ -variable "region" { - default = "us-west-2" -} - -variable "map_accounts" { - description = "Additional AWS account numbers to add to the aws-auth configmap." - type = list(string) - - default = [ - "777777777777", - "888888888888", - ] -} - -variable "map_roles" { - description = "Additional IAM roles to add to the aws-auth configmap." - type = list(object({ - rolearn = string - username = string - groups = list(string) - })) - - default = [ - { - rolearn = "arn:aws:iam::66666666666:role/role1" - username = "role1" - groups = ["system:masters"] - }, - ] -} - -variable "map_users" { - description = "Additional IAM users to add to the aws-auth configmap." - type = list(object({ - userarn = string - username = string - groups = list(string) - })) - - default = [ - { - userarn = "arn:aws:iam::66666666666:user/user1" - username = "user1" - groups = ["system:masters"] - }, - { - userarn = "arn:aws:iam::66666666666:user/user2" - username = "user2" - groups = ["system:masters"] - }, - ] -} diff --git a/examples/fargate/versions.tf b/examples/fargate/versions.tf index 6e29ae8..120d873 100644 --- a/examples/fargate/versions.tf +++ b/examples/fargate/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = ">= 1.11" } } diff --git a/examples/instance_refresh/main.tf b/examples/instance_refresh/main.tf index 1883ecc..54735fc 100644 --- a/examples/instance_refresh/main.tf +++ b/examples/instance_refresh/main.tf @@ -1,7 +1,7 @@ # Based on the official aws-node-termination-handler setup guide at https://github.com/aws/aws-node-termination-handler#infrastructure-setup provider "aws" { - region = var.region + region = local.region } data "aws_caller_identity" "current" {} @@ -16,15 +16,14 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false } provider "helm" { kubernetes { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token } } @@ -34,6 +33,7 @@ data "aws_availability_zones" "available" { locals { cluster_name = "test-refresh-${random_string.suffix.result}" + region = "eu-west-1" } resource "random_string" "suffix" { @@ -102,7 +102,7 @@ data "aws_iam_policy_document" "aws_node_termination_handler_events" { "sqs:SendMessage", ] resources = [ - "arn:aws:sqs:${var.region}:${data.aws_caller_identity.current.account_id}:${local.cluster_name}", + "arn:aws:sqs:${local.region}:${data.aws_caller_identity.current.account_id}:${local.cluster_name}", ] } } @@ -184,7 +184,7 @@ resource "helm_release" "aws_node_termination_handler" { set { name = "awsRegion" - value = var.region + value = local.region } set { name = "serviceAccount.name" diff --git a/examples/instance_refresh/outputs.tf b/examples/instance_refresh/outputs.tf index 3b981f1..a3dd033 100644 --- a/examples/instance_refresh/outputs.tf +++ b/examples/instance_refresh/outputs.tf @@ -18,11 +18,6 @@ output "config_map_aws_auth" { value = module.eks.config_map_aws_auth } -output "region" { - description = "AWS region." - value = var.region -} - output "sqs_queue_asg_notification_arn" { description = "SQS queue ASG notification ARN" value = module.aws_node_termination_handler_sqs.sqs_queue_arn diff --git a/examples/instance_refresh/variables.tf b/examples/instance_refresh/variables.tf index 96fc26d..60b2433 100644 --- a/examples/instance_refresh/variables.tf +++ b/examples/instance_refresh/variables.tf @@ -1,18 +1,17 @@ -variable "region" { - default = "us-west-2" -} - variable "aws_node_termination_handler_chart_version" { description = "Version of the aws-node-termination-handler Helm chart to install." + type = string default = "0.15.0" } variable "namespace" { description = "Namespace for the aws-node-termination-handler." + type = string default = "kube-system" } variable "serviceaccount" { description = "Serviceaccount for the aws-node-termination-handler." + type = string default = "aws-node-termination-handler" } diff --git a/examples/irsa/cluster-autoscaler-chart-values.yaml b/examples/irsa/cluster-autoscaler-chart-values.yaml index ccde436..4e5494d 100644 --- a/examples/irsa/cluster-autoscaler-chart-values.yaml +++ b/examples/irsa/cluster-autoscaler-chart-values.yaml @@ -1,4 +1,4 @@ -awsRegion: us-west-2 +awsRegion: eu-west-1 rbac: create: true diff --git a/examples/irsa/irsa.tf b/examples/irsa/irsa.tf index c841d4f..7bb9f7f 100644 --- a/examples/irsa/irsa.tf +++ b/examples/irsa/irsa.tf @@ -1,6 +1,7 @@ module "iam_assumable_role_admin" { - source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" - version = "3.6.0" + source = "terraform-aws-modules/iam/aws//modules/iam-assumable-role-with-oidc" + version = "3.6.0" + create_role = true role_name = "cluster-autoscaler" provider_url = replace(module.eks.cluster_oidc_issuer_url, "https://", "") diff --git a/examples/irsa/main.tf b/examples/irsa/main.tf index e6c9fa4..c9ea505 100644 --- a/examples/irsa/main.tf +++ b/examples/irsa/main.tf @@ -1,5 +1,5 @@ provider "aws" { - region = var.region + region = "eu-west-1" } data "aws_eks_cluster" "cluster" { @@ -12,9 +12,8 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false } data "aws_availability_zones" "available" {} diff --git a/examples/irsa/outputs.tf b/examples/irsa/outputs.tf index ef2ab95..796e8ee 100644 --- a/examples/irsa/outputs.tf +++ b/examples/irsa/outputs.tf @@ -1,3 +1,4 @@ output "aws_account_id" { - value = data.aws_caller_identity.current.account_id + description = "IAM AWS account id" + value = data.aws_caller_identity.current.account_id } diff --git a/examples/irsa/variables.tf b/examples/irsa/variables.tf index 81b8dbe..e69de29 100644 --- a/examples/irsa/variables.tf +++ b/examples/irsa/variables.tf @@ -1,3 +0,0 @@ -variable "region" { - default = "us-west-2" -} diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf index ad71e13..68fc205 100644 --- a/examples/launch_templates/main.tf +++ b/examples/launch_templates/main.tf @@ -1,5 +1,5 @@ provider "aws" { - region = var.region + region = "eu-west-1" } data "aws_eks_cluster" "cluster" { @@ -12,9 +12,8 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false } data "aws_availability_zones" "available" { diff --git a/examples/launch_templates/outputs.tf b/examples/launch_templates/outputs.tf index a0788af..359db3a 100644 --- a/examples/launch_templates/outputs.tf +++ b/examples/launch_templates/outputs.tf @@ -17,9 +17,3 @@ output "config_map_aws_auth" { description = "A kubernetes configuration to authenticate to this EKS cluster." value = module.eks.config_map_aws_auth } - -output "region" { - description = "AWS region." - value = var.region -} - diff --git a/examples/launch_templates/variables.tf b/examples/launch_templates/variables.tf index f69e500..e69de29 100644 --- a/examples/launch_templates/variables.tf +++ b/examples/launch_templates/variables.tf @@ -1,4 +0,0 @@ -variable "region" { - default = "us-west-2" -} - diff --git a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf b/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf index 0f51fb1..0362193 100644 --- a/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf +++ b/examples/launch_templates_with_managed_node_groups/disk_encryption_policy.tf @@ -4,74 +4,74 @@ resource "aws_iam_service_linked_role" "autoscaling" { description = "Default Service-Linked Role enables access to AWS Services and Resources used or managed by Auto Scaling" } -data "aws_caller_identity" "current" {} - -# This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes -data "aws_iam_policy_document" "ebs_decryption" { - # Copy of default KMS policy that lets you manage it - statement { - sid = "Enable IAM User Permissions" - effect = "Allow" - - principals { - type = "AWS" - identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] - } - - actions = [ - "kms:*" - ] - - resources = ["*"] - } - - # Required for EKS - statement { - sid = "Allow service-linked role use of the CMK" - effect = "Allow" - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - actions = [ - "kms:Encrypt", - "kms:Decrypt", - "kms:ReEncrypt*", - "kms:GenerateDataKey*", - "kms:DescribeKey" - ] - - resources = ["*"] - } - - statement { - sid = "Allow attachment of persistent resources" - effect = "Allow" - - principals { - type = "AWS" - identifiers = [ - "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes - module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs - ] - } - - actions = [ - "kms:CreateGrant" - ] - - resources = ["*"] - - condition { - test = "Bool" - variable = "kms:GrantIsForAWSResource" - values = ["true"] - } - - } -} +#data "aws_caller_identity" "current" {} +# +## This policy is required for the KMS key used for EKS root volumes, so the cluster is allowed to enc/dec/attach encrypted EBS volumes +#data "aws_iam_policy_document" "ebs_decryption" { +# # Copy of default KMS policy that lets you manage it +# statement { +# sid = "Enable IAM User Permissions" +# effect = "Allow" +# +# principals { +# type = "AWS" +# identifiers = ["arn:aws:iam::${data.aws_caller_identity.current.account_id}:root"] +# } +# +# actions = [ +# "kms:*" +# ] +# +# resources = ["*"] +# } +# +# # Required for EKS +# statement { +# sid = "Allow service-linked role use of the CMK" +# effect = "Allow" +# +# principals { +# type = "AWS" +# identifiers = [ +# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes +# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs +# ] +# } +# +# actions = [ +# "kms:Encrypt", +# "kms:Decrypt", +# "kms:ReEncrypt*", +# "kms:GenerateDataKey*", +# "kms:DescribeKey" +# ] +# +# resources = ["*"] +# } +# +# statement { +# sid = "Allow attachment of persistent resources" +# effect = "Allow" +# +# principals { +# type = "AWS" +# identifiers = [ +# "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/aws-service-role/autoscaling.amazonaws.com/AWSServiceRoleForAutoScaling", # required for the ASG to manage encrypted volumes for nodes +# module.eks.cluster_iam_role_arn, # required for the cluster / persistentvolume-controller to create encrypted PVCs +# ] +# } +# +# actions = [ +# "kms:CreateGrant" +# ] +# +# resources = ["*"] +# +# condition { +# test = "Bool" +# variable = "kms:GrantIsForAWSResource" +# values = ["true"] +# } +# +# } +#} diff --git a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf b/examples/launch_templates_with_managed_node_groups/launchtemplate.tf index 2494a7a..a393770 100644 --- a/examples/launch_templates_with_managed_node_groups/launchtemplate.tf +++ b/examples/launch_templates_with_managed_node_groups/launchtemplate.tf @@ -1,15 +1,15 @@ -data "template_file" "launch_template_userdata" { - template = file("${path.module}/templates/userdata.sh.tpl") - - vars = { - cluster_name = local.cluster_name - endpoint = module.eks.cluster_endpoint - cluster_auth_base64 = module.eks.cluster_certificate_authority_data - - bootstrap_extra_args = "" - kubelet_extra_args = "" - } -} +#data "template_file" "launch_template_userdata" { +# template = file("${path.module}/templates/userdata.sh.tpl") +# +# vars = { +# cluster_name = local.cluster_name +# endpoint = module.eks.cluster_endpoint +# cluster_auth_base64 = module.eks.cluster_certificate_authority_data +# +# bootstrap_extra_args = "" +# kubelet_extra_args = "" +# } +#} # This is based on the LT that EKS would create if no custom one is specified (aws ec2 describe-launch-template-versions --launch-template-id xxx) # there are several more options one could set but you probably dont need to modify them diff --git a/examples/launch_templates_with_managed_node_groups/main.tf b/examples/launch_templates_with_managed_node_groups/main.tf index 8d99dcf..78c2b31 100644 --- a/examples/launch_templates_with_managed_node_groups/main.tf +++ b/examples/launch_templates_with_managed_node_groups/main.tf @@ -1,5 +1,5 @@ provider "aws" { - region = var.region + region = "eu-west-1" } data "aws_eks_cluster" "cluster" { @@ -12,9 +12,8 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false } data "aws_availability_zones" "available" { diff --git a/examples/spot_instances/outputs.tf b/examples/launch_templates_with_managed_node_groups/outputs.tf similarity index 88% rename from examples/spot_instances/outputs.tf rename to examples/launch_templates_with_managed_node_groups/outputs.tf index a0788af..359db3a 100644 --- a/examples/spot_instances/outputs.tf +++ b/examples/launch_templates_with_managed_node_groups/outputs.tf @@ -17,9 +17,3 @@ output "config_map_aws_auth" { description = "A kubernetes configuration to authenticate to this EKS cluster." value = module.eks.config_map_aws_auth } - -output "region" { - description = "AWS region." - value = var.region -} - diff --git a/examples/launch_templates_with_managed_node_groups/variables.tf b/examples/launch_templates_with_managed_node_groups/variables.tf index 6dcb269..351ffdb 100644 --- a/examples/launch_templates_with_managed_node_groups/variables.tf +++ b/examples/launch_templates_with_managed_node_groups/variables.tf @@ -1,15 +1,6 @@ -variable "region" { - default = "eu-central-1" -} - variable "instance_type" { + description = "Instance type" # Smallest recommended, where ~1.1Gb of 2Gb memory is available for the Kubernetes pods after ‘warming up’ Docker, Kubelet, and OS - default = "t3.small" type = string -} - -variable "kms_key_arn" { - default = "" - description = "KMS key ARN to use if you want to encrypt EKS node root volumes" - type = string + default = "t3.small" } diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf index b619314..8262239 100644 --- a/examples/managed_node_groups/main.tf +++ b/examples/managed_node_groups/main.tf @@ -1,5 +1,5 @@ provider "aws" { - region = var.region + region = "eu-west-1" } data "aws_eks_cluster" "cluster" { @@ -12,9 +12,8 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false } data "aws_availability_zones" "available" { diff --git a/examples/managed_node_groups/outputs.tf b/examples/managed_node_groups/outputs.tf index 7010db2..10a3a96 100644 --- a/examples/managed_node_groups/outputs.tf +++ b/examples/managed_node_groups/outputs.tf @@ -18,11 +18,6 @@ output "config_map_aws_auth" { value = module.eks.config_map_aws_auth } -output "region" { - description = "AWS region." - value = var.region -} - output "node_groups" { description = "Outputs from node groups" value = module.eks.node_groups diff --git a/examples/managed_node_groups/variables.tf b/examples/managed_node_groups/variables.tf index 7085aea..57853d8 100644 --- a/examples/managed_node_groups/variables.tf +++ b/examples/managed_node_groups/variables.tf @@ -1,7 +1,3 @@ -variable "region" { - default = "us-west-2" -} - variable "map_accounts" { description = "Additional AWS account numbers to add to the aws-auth configmap." type = list(string) diff --git a/examples/secrets_encryption/main.tf b/examples/secrets_encryption/main.tf index 9aebd4c..76fa2b2 100644 --- a/examples/secrets_encryption/main.tf +++ b/examples/secrets_encryption/main.tf @@ -1,5 +1,5 @@ provider "aws" { - region = var.region + region = "eu-west-1" } data "aws_eks_cluster" "cluster" { @@ -12,9 +12,8 @@ data "aws_eks_cluster_auth" "cluster" { provider "kubernetes" { host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) + cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data) token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false } data "aws_availability_zones" "available" { diff --git a/examples/secrets_encryption/outputs.tf b/examples/secrets_encryption/outputs.tf index 51ddb02..359db3a 100644 --- a/examples/secrets_encryption/outputs.tf +++ b/examples/secrets_encryption/outputs.tf @@ -17,8 +17,3 @@ output "config_map_aws_auth" { description = "A kubernetes configuration to authenticate to this EKS cluster." value = module.eks.config_map_aws_auth } - -output "region" { - description = "AWS region." - value = var.region -} diff --git a/examples/secrets_encryption/variables.tf b/examples/secrets_encryption/variables.tf index 7085aea..57853d8 100644 --- a/examples/secrets_encryption/variables.tf +++ b/examples/secrets_encryption/variables.tf @@ -1,7 +1,3 @@ -variable "region" { - default = "us-west-2" -} - variable "map_accounts" { description = "Additional AWS account numbers to add to the aws-auth configmap." type = list(string) diff --git a/examples/secrets_encryption/versions.tf b/examples/secrets_encryption/versions.tf index 6e29ae8..120d873 100644 --- a/examples/secrets_encryption/versions.tf +++ b/examples/secrets_encryption/versions.tf @@ -5,6 +5,6 @@ terraform { aws = ">= 3.22.0" local = ">= 1.4" random = ">= 2.1" - kubernetes = "~> 1.11" + kubernetes = ">= 1.11" } } diff --git a/examples/spot_instances/main.tf b/examples/spot_instances/main.tf deleted file mode 100644 index fb2ad23..0000000 --- a/examples/spot_instances/main.tf +++ /dev/null @@ -1,61 +0,0 @@ -provider "aws" { - region = var.region -} - -data "aws_eks_cluster" "cluster" { - name = module.eks.cluster_id -} - -data "aws_eks_cluster_auth" "cluster" { - name = module.eks.cluster_id -} - -provider "kubernetes" { - host = data.aws_eks_cluster.cluster.endpoint - cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority.0.data) - token = data.aws_eks_cluster_auth.cluster.token - load_config_file = false -} - -data "aws_availability_zones" "available" { -} - -locals { - cluster_name = "test-eks-spot-${random_string.suffix.result}" -} - -resource "random_string" "suffix" { - length = 8 - special = false -} - -module "vpc" { - source = "terraform-aws-modules/vpc/aws" - version = "~> 2.47" - - name = "test-vpc-spot" - cidr = "10.0.0.0/16" - azs = data.aws_availability_zones.available.names - public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"] - enable_dns_hostnames = true -} - -module "eks" { - source = "../.." - cluster_name = local.cluster_name - cluster_version = "1.20" - subnets = module.vpc.public_subnets - vpc_id = module.vpc.vpc_id - - worker_groups_launch_template = [ - { - name = "spot-1" - override_instance_types = ["m5.large", "m5a.large", "m5d.large", "m5ad.large"] - spot_instance_pools = 4 - asg_max_size = 5 - asg_desired_capacity = 5 - kubelet_extra_args = "--node-labels=node.kubernetes.io/lifecycle=spot" - public_ip = true - }, - ] -} diff --git a/examples/spot_instances/variables.tf b/examples/spot_instances/variables.tf deleted file mode 100644 index f69e500..0000000 --- a/examples/spot_instances/variables.tf +++ /dev/null @@ -1,4 +0,0 @@ -variable "region" { - default = "us-west-2" -} - diff --git a/fargate.tf b/fargate.tf index ddba3bd..5526e2e 100644 --- a/fargate.tf +++ b/fargate.tf @@ -1,23 +1,16 @@ module "fargate" { - source = "./modules/fargate" - cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0] + source = "./modules/fargate" + create_eks = var.create_eks create_fargate_pod_execution_role = var.create_fargate_pod_execution_role - fargate_pod_execution_role_name = var.fargate_pod_execution_role_name - fargate_profiles = var.fargate_profiles - permissions_boundary = var.permissions_boundary - iam_path = var.iam_path - iam_policy_arn_prefix = local.policy_arn_prefix - subnets = coalescelist(var.fargate_subnets, var.subnets, [""]) - tags = var.tags - # Hack to ensure ordering of resource creation. - # This is a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2 - # Do not create node_groups before other resources are ready and removes race conditions - # Ensure these resources are created before "unlocking" the data source. - # Will be removed in Terraform 0.13 - eks_depends_on = [ - aws_eks_cluster.this, - kubernetes_config_map.aws_auth, - ] + cluster_name = local.cluster_name + fargate_pod_execution_role_name = var.fargate_pod_execution_role_name + permissions_boundary = var.permissions_boundary + iam_path = var.iam_path + subnets = coalescelist(var.fargate_subnets, var.subnets, [""]) + + fargate_profiles = var.fargate_profiles + + tags = var.tags } diff --git a/irsa.tf b/irsa.tf index 8c6e34d..5fc3dc8 100644 --- a/irsa.tf +++ b/irsa.tf @@ -8,10 +8,11 @@ # https://github.com/terraform-providers/terraform-provider-aws/issues/10104 resource "aws_iam_openid_connect_provider" "oidc_provider" { - count = var.enable_irsa && var.create_eks ? 1 : 0 + count = var.enable_irsa && var.create_eks ? 1 : 0 + client_id_list = local.client_id_list thumbprint_list = [var.eks_oidc_root_ca_thumbprint] - url = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc.0.issuer, [""]))[0] + url = local.cluster_oidc_issuer_url tags = merge( { diff --git a/kubectl.tf b/kubectl.tf index 21021f9..b5d6947 100644 --- a/kubectl.tf +++ b/kubectl.tf @@ -1,5 +1,6 @@ resource "local_file" "kubeconfig" { - count = var.write_kubeconfig && var.create_eks ? 1 : 0 + count = var.write_kubeconfig && var.create_eks ? 1 : 0 + content = local.kubeconfig filename = substr(var.kubeconfig_output_path, -1, 1) == "/" ? "${var.kubeconfig_output_path}kubeconfig_${var.cluster_name}" : var.kubeconfig_output_path file_permission = var.kubeconfig_file_permission diff --git a/local.tf b/locals.tf similarity index 83% rename from local.tf rename to locals.tf index 6064876..60472a6 100644 --- a/local.tf +++ b/locals.tf @@ -1,53 +1,38 @@ locals { - cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id - cluster_primary_security_group_id = var.cluster_version >= 1.14 ? element(concat(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, [""]), 0) : null - cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name - cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn) - worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id + # EKS Cluster + cluster_id = coalescelist(aws_eks_cluster.this[*].id, [""])[0] + cluster_arn = coalescelist(aws_eks_cluster.this[*].arn, [""])[0] + cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0] + cluster_endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0] + cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0] + cluster_oidc_issuer_url = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc[0].issuer, [""]))[0] + cluster_primary_security_group_id = coalescelist(aws_eks_cluster.this[*].vpc_config[0].cluster_security_group_id, [""])[0] + + cluster_security_group_id = var.cluster_create_security_group ? join("", aws_security_group.cluster.*.id) : var.cluster_security_group_id + cluster_iam_role_name = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.name) : var.cluster_iam_role_name + cluster_iam_role_arn = var.manage_cluster_iam_resources ? join("", aws_iam_role.cluster.*.arn) : join("", data.aws_iam_role.custom_cluster_iam_role.*.arn) + + # Worker groups + worker_security_group_id = var.worker_create_security_group ? join("", aws_security_group.workers.*.id) : var.worker_security_group_id - default_platform = "linux" default_iam_role_id = concat(aws_iam_role.workers.*.id, [""])[0] default_ami_id_linux = local.workers_group_defaults.ami_id != "" ? local.workers_group_defaults.ami_id : concat(data.aws_ami.eks_worker.*.id, [""])[0] default_ami_id_windows = local.workers_group_defaults.ami_id_windows != "" ? local.workers_group_defaults.ami_id_windows : concat(data.aws_ami.eks_worker_windows.*.id, [""])[0] - kubeconfig_name = var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name + worker_group_launch_configuration_count = length(var.worker_groups) + worker_group_launch_template_count = length(var.worker_groups_launch_template) - worker_group_count = length(var.worker_groups) - worker_group_launch_template_count = length(var.worker_groups_launch_template) + worker_groups_platforms = [for x in concat(var.worker_groups, var.worker_groups_launch_template) : try(x.platform, var.workers_group_defaults["platform"], var.default_platform)] - worker_has_linux_ami = length([for x in concat(var.worker_groups, var.worker_groups_launch_template) : x if lookup( - x, - "platform", - # Fallback on default `platform` if it's not defined in current worker group - lookup( - merge({ platform = local.default_platform }, var.workers_group_defaults), - "platform", - null - ) - ) == "linux"]) > 0 - worker_has_windows_ami = length([for x in concat(var.worker_groups, var.worker_groups_launch_template) : x if lookup( - x, - "platform", - # Fallback on default `platform` if it's not defined in current worker group - lookup( - merge({ platform = local.default_platform }, var.workers_group_defaults), - "platform", - null - ) - ) == "windows"]) > 0 - - worker_ami_name_filter = var.worker_ami_name_filter != "" ? var.worker_ami_name_filter : "amazon-eks-node-${var.cluster_version}-v*" - # Windows nodes are available from k8s 1.14. If cluster version is less than 1.14, fix ami filter to some constant to not fail on 'terraform plan'. - worker_ami_name_filter_windows = (var.worker_ami_name_filter_windows != "" ? - var.worker_ami_name_filter_windows : "Windows_Server-2019-English-Core-EKS_Optimized-${tonumber(var.cluster_version) >= 1.14 ? var.cluster_version : 1.14}-*" - ) - - ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}" - sts_principal = "sts.${data.aws_partition.current.dns_suffix}" - client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences))) + worker_ami_name_filter = coalesce(var.worker_ami_name_filter, "amazon-eks-node-${coalesce(var.cluster_version, "cluster_version")}-v*") + worker_ami_name_filter_windows = coalesce(var.worker_ami_name_filter_windows, "Windows_Server-2019-English-Core-EKS_Optimized-${coalesce(var.cluster_version, "cluster_version")}-*") + ec2_principal = "ec2.${data.aws_partition.current.dns_suffix}" + sts_principal = "sts.${data.aws_partition.current.dns_suffix}" + client_id_list = distinct(compact(concat([local.sts_principal], var.openid_connect_audiences))) policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy" + workers_group_defaults_defaults = { name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used. tags = [] # A list of maps defining extra tags to be applied to the worker group autoscaling group and volumes. @@ -92,7 +77,7 @@ locals { placement_group = null # The name of the placement group into which to launch the instances, if any. service_linked_role_arn = "" # Arn of custom service linked role that Auto Scaling group will use. Useful when you have encrypted EBS termination_policies = [] # A list of policies to decide how the instances in the auto scale group should be terminated. - platform = local.default_platform # Platform of workers. Either "linux" or "windows". + platform = var.default_platform # Platform of workers. Either "linux" or "windows". additional_ebs_volumes = [] # A list of additional volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), volume_size, volume_type, iops, throughput, encrypted, kms_key_id (only on launch-template), delete_on_termination. Optional values are grabbed from root volume or from defaults additional_instance_store_volumes = [] # A list of additional instance store (local disk) volumes to be attached to the instances on this Auto Scaling group. Each volume should be an object with the following: block_device_name (required), virtual_name. warm_pool = null # If this block is configured, add a Warm Pool to the specified Auto Scaling group. @@ -176,17 +161,17 @@ locals { ] kubeconfig = var.create_eks ? templatefile("${path.module}/templates/kubeconfig.tpl", { - kubeconfig_name = local.kubeconfig_name - endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0] - cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0] + kubeconfig_name = coalesce(var.kubeconfig_name, "eks_${var.cluster_name}") + endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 aws_authenticator_command = var.kubeconfig_aws_authenticator_command - aws_authenticator_command_args = length(var.kubeconfig_aws_authenticator_command_args) > 0 ? var.kubeconfig_aws_authenticator_command_args : ["token", "-i", coalescelist(aws_eks_cluster.this[*].name, [""])[0]] + aws_authenticator_command_args = coalescelist(var.kubeconfig_aws_authenticator_command_args, ["token", "-i", local.cluster_name]) aws_authenticator_additional_args = var.kubeconfig_aws_authenticator_additional_args aws_authenticator_env_variables = var.kubeconfig_aws_authenticator_env_variables }) : "" - userdata_rendered = [ - for index in range(var.create_eks ? local.worker_group_count : 0) : templatefile( + launch_configuration_userdata_rendered = [ + for index in range(var.create_eks ? local.worker_group_launch_configuration_count : 0) : templatefile( lookup( var.worker_groups[index], "userdata_template_file", @@ -196,9 +181,9 @@ locals { ), merge({ platform = lookup(var.worker_groups[index], "platform", local.workers_group_defaults["platform"]) - cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0] - endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0] - cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0] + cluster_name = local.cluster_name + endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 pre_userdata = lookup( var.worker_groups[index], "pre_userdata", @@ -240,9 +225,9 @@ locals { ), merge({ platform = lookup(var.worker_groups_launch_template[index], "platform", local.workers_group_defaults["platform"]) - cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0] - endpoint = coalescelist(aws_eks_cluster.this[*].endpoint, [""])[0] - cluster_auth_base64 = coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0] + cluster_name = local.cluster_name + endpoint = local.cluster_endpoint + cluster_auth_base64 = local.cluster_auth_base64 pre_userdata = lookup( var.worker_groups_launch_template[index], "pre_userdata", diff --git a/cluster.tf b/main.tf similarity index 79% rename from cluster.tf rename to main.tf index 7d34556..c5cc718 100644 --- a/cluster.tf +++ b/main.tf @@ -1,21 +1,20 @@ resource "aws_cloudwatch_log_group" "this" { - count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0 + count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0 + name = "/aws/eks/${var.cluster_name}/cluster" retention_in_days = var.cluster_log_retention_in_days kms_key_id = var.cluster_log_kms_key_id - tags = var.tags + + tags = var.tags } resource "aws_eks_cluster" "this" { - count = var.create_eks ? 1 : 0 + count = var.create_eks ? 1 : 0 + name = var.cluster_name enabled_cluster_log_types = var.cluster_enabled_log_types role_arn = local.cluster_iam_role_arn version = var.cluster_version - tags = merge( - var.tags, - var.cluster_tags, - ) vpc_config { security_group_ids = compact([local.cluster_security_group_id]) @@ -29,11 +28,6 @@ resource "aws_eks_cluster" "this" { service_ipv4_cidr = var.cluster_service_ipv4_cidr } - timeouts { - create = var.cluster_create_timeout - delete = var.cluster_delete_timeout - } - dynamic "encryption_config" { for_each = toset(var.cluster_encryption_config) @@ -45,6 +39,16 @@ resource "aws_eks_cluster" "this" { } } + tags = merge( + var.tags, + var.cluster_tags, + ) + + timeouts { + create = var.cluster_create_timeout + delete = var.cluster_delete_timeout + } + depends_on = [ aws_security_group_rule.cluster_egress_internet, aws_security_group_rule.cluster_https_worker_ingress, @@ -56,10 +60,12 @@ resource "aws_eks_cluster" "this" { } resource "aws_security_group" "cluster" { - count = var.cluster_create_security_group && var.create_eks ? 1 : 0 + count = var.cluster_create_security_group && var.create_eks ? 1 : 0 + name_prefix = var.cluster_name description = "EKS cluster security group." vpc_id = var.vpc_id + tags = merge( var.tags, { @@ -69,7 +75,8 @@ resource "aws_security_group" "cluster" { } resource "aws_security_group_rule" "cluster_egress_internet" { - count = var.cluster_create_security_group && var.create_eks ? 1 : 0 + count = var.cluster_create_security_group && var.create_eks ? 1 : 0 + description = "Allow cluster egress access to the Internet." protocol = "-1" security_group_id = local.cluster_security_group_id @@ -80,7 +87,8 @@ resource "aws_security_group_rule" "cluster_egress_internet" { } resource "aws_security_group_rule" "cluster_https_worker_ingress" { - count = var.cluster_create_security_group && var.create_eks && var.worker_create_security_group ? 1 : 0 + count = var.cluster_create_security_group && var.create_eks && var.worker_create_security_group ? 1 : 0 + description = "Allow pods to communicate with the EKS cluster API." protocol = "tcp" security_group_id = local.cluster_security_group_id @@ -91,7 +99,8 @@ resource "aws_security_group_rule" "cluster_https_worker_ingress" { } resource "aws_security_group_rule" "cluster_private_access_cidrs_source" { - for_each = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : [] + for_each = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_cidrs != null ? toset(var.cluster_endpoint_private_access_cidrs) : [] + description = "Allow private K8S API ingress from custom CIDR source." type = "ingress" from_port = 443 @@ -103,7 +112,8 @@ resource "aws_security_group_rule" "cluster_private_access_cidrs_source" { } resource "aws_security_group_rule" "cluster_private_access_sg_source" { - count = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0 + count = var.create_eks && var.cluster_create_endpoint_private_access_sg_rule && var.cluster_endpoint_private_access && var.cluster_endpoint_private_access_sg != null ? length(var.cluster_endpoint_private_access_sg) : 0 + description = "Allow private K8S API ingress from custom Security Groups source." type = "ingress" from_port = 443 @@ -115,30 +125,35 @@ resource "aws_security_group_rule" "cluster_private_access_sg_source" { } resource "aws_iam_role" "cluster" { - count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + name_prefix = var.cluster_iam_role_name != "" ? null : var.cluster_name name = var.cluster_iam_role_name != "" ? var.cluster_iam_role_name : null assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json permissions_boundary = var.permissions_boundary path = var.iam_path force_detach_policies = true - tags = var.tags + + tags = var.tags } resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" { - count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + policy_arn = "${local.policy_arn_prefix}/AmazonEKSClusterPolicy" role = local.cluster_iam_role_name } resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" { - count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + policy_arn = "${local.policy_arn_prefix}/AmazonEKSServicePolicy" role = local.cluster_iam_role_name } resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSVPCResourceControllerPolicy" { - count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + policy_arn = "${local.policy_arn_prefix}/AmazonEKSVPCResourceController" role = local.cluster_iam_role_name } @@ -163,16 +178,19 @@ data "aws_iam_policy_document" "cluster_elb_sl_role_creation" { } resource "aws_iam_policy" "cluster_elb_sl_role_creation" { - count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + name_prefix = "${var.cluster_name}-elb-sl-role-creation" description = "Permissions for EKS to create AWSServiceRoleForElasticLoadBalancing service-linked role" policy = data.aws_iam_policy_document.cluster_elb_sl_role_creation[0].json path = var.iam_path - tags = var.tags + + tags = var.tags } resource "aws_iam_role_policy_attachment" "cluster_elb_sl_role_creation" { - count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0 + policy_arn = aws_iam_policy.cluster_elb_sl_role_creation[0].arn role = local.cluster_iam_role_name } diff --git a/modules/fargate/README.md b/modules/fargate/README.md index 858ec96..c4dfaaf 100644 --- a/modules/fargate/README.md +++ b/modules/fargate/README.md @@ -1,13 +1,15 @@ -# eks `fargate` submodule +# EKS `fargate` submodule Helper submodule to create and manage resources related to `aws_eks_fargate_profile`. -## Assumptions -* Designed for use by the parent module and not directly by end users - ## `fargate_profile` keys + `fargate_profile` is a map of maps. Key of first level will be used as unique value for `for_each` resources and in the `aws_eks_fargate_profile` name. Inner map can take the below values. +## Example + +See example code in `examples/fargate`. + | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| | name | Fargate profile name | `string` | Auto generated in the following format `[cluster_name]-fargate-[fargate_profile_map_key]`| no | @@ -42,19 +44,18 @@ No modules. | [aws_iam_role_policy_attachment.eks_fargate_pod](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/iam_role_policy_attachment) | resource | | [aws_iam_policy_document.eks_fargate_pod_assume_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_policy_document) | data source | | [aws_iam_role.custom_fargate_iam_role](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/iam_role) | data source | +| [aws_partition.current](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/partition) | data source | ## Inputs | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. | `string` | n/a | yes | +| [cluster\_name](#input\_cluster\_name) | Name of the EKS cluster. | `string` | `""` | no | | [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no | | [create\_fargate\_pod\_execution\_role](#input\_create\_fargate\_pod\_execution\_role) | Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created. | `bool` | `true` | no | -| [eks\_depends\_on](#input\_eks\_depends\_on) | List of references to other resources this submodule depends on. | `any` | `null` | no | | [fargate\_pod\_execution\_role\_name](#input\_fargate\_pod\_execution\_role\_name) | The IAM Role that provides permissions for the EKS Fargate Profile. | `string` | `null` | no | | [fargate\_profiles](#input\_fargate\_profiles) | Fargate profiles to create. See `fargate_profile` keys section in README.md for more details | `any` | `{}` | no | | [iam\_path](#input\_iam\_path) | IAM roles will be created on this path. | `string` | `"/"` | no | -| [iam\_policy\_arn\_prefix](#input\_iam\_policy\_arn\_prefix) | IAM policy prefix with the correct AWS partition. | `string` | n/a | yes | | [permissions\_boundary](#input\_permissions\_boundary) | If provided, all IAM roles will be created with this permissions boundary attached. | `string` | `null` | no | | [subnets](#input\_subnets) | A list of subnets for the EKS Fargate profiles. | `list(string)` | `[]` | no | | [tags](#input\_tags) | A map of tags to add to all resources. | `map(string)` | `{}` | no | diff --git a/modules/fargate/data.tf b/modules/fargate/data.tf deleted file mode 100644 index ee8e0d8..0000000 --- a/modules/fargate/data.tf +++ /dev/null @@ -1,17 +0,0 @@ -data "aws_iam_policy_document" "eks_fargate_pod_assume_role" { - count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0 - statement { - effect = "Allow" - actions = ["sts:AssumeRole"] - - principals { - type = "Service" - identifiers = ["eks-fargate-pods.amazonaws.com"] - } - } -} - -data "aws_iam_role" "custom_fargate_iam_role" { - count = local.create_eks && !var.create_fargate_pod_execution_role ? 1 : 0 - name = var.fargate_pod_execution_role_name -} diff --git a/modules/fargate/fargate.tf b/modules/fargate/fargate.tf deleted file mode 100644 index caa73fa..0000000 --- a/modules/fargate/fargate.tf +++ /dev/null @@ -1,33 +0,0 @@ -resource "aws_iam_role" "eks_fargate_pod" { - count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0 - name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24)) - assume_role_policy = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json - permissions_boundary = var.permissions_boundary - tags = var.tags - path = var.iam_path -} - -resource "aws_iam_role_policy_attachment" "eks_fargate_pod" { - count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0 - policy_arn = "${var.iam_policy_arn_prefix}/AmazonEKSFargatePodExecutionRolePolicy" - role = aws_iam_role.eks_fargate_pod[0].name -} - -resource "aws_eks_fargate_profile" "this" { - for_each = local.create_eks ? local.fargate_profiles_expanded : {} - cluster_name = var.cluster_name - fargate_profile_name = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-"))) - pod_execution_role_arn = local.pod_execution_role_arn - subnet_ids = lookup(each.value, "subnets", var.subnets) - tags = each.value.tags - - dynamic "selector" { - for_each = each.value.selectors - content { - namespace = selector.value["namespace"] - labels = lookup(selector.value, "labels", {}) - } - } - - depends_on = [var.eks_depends_on] -} diff --git a/modules/fargate/locals.tf b/modules/fargate/locals.tf deleted file mode 100644 index 18ba964..0000000 --- a/modules/fargate/locals.tf +++ /dev/null @@ -1,10 +0,0 @@ -locals { - create_eks = var.create_eks && length(var.fargate_profiles) > 0 - pod_execution_role_arn = var.create_fargate_pod_execution_role ? element(concat(aws_iam_role.eks_fargate_pod.*.arn, [""]), 0) : element(concat(data.aws_iam_role.custom_fargate_iam_role.*.arn, [""]), 0) - pod_execution_role_name = var.create_fargate_pod_execution_role ? element(concat(aws_iam_role.eks_fargate_pod.*.name, [""]), 0) : element(concat(data.aws_iam_role.custom_fargate_iam_role.*.name, [""]), 0) - - fargate_profiles_expanded = { for k, v in var.fargate_profiles : k => merge( - v, - { tags = merge(var.tags, lookup(v, "tags", {})) }, - ) if var.create_eks } -} diff --git a/modules/fargate/main.tf b/modules/fargate/main.tf new file mode 100644 index 0000000..74e3c9b --- /dev/null +++ b/modules/fargate/main.tf @@ -0,0 +1,67 @@ +locals { + create_eks = var.create_eks && length(var.fargate_profiles) > 0 + + pod_execution_role_arn = coalescelist(aws_iam_role.eks_fargate_pod.*.arn, data.aws_iam_role.custom_fargate_iam_role.*.arn, [""])[0] + pod_execution_role_name = coalescelist(aws_iam_role.eks_fargate_pod.*.name, data.aws_iam_role.custom_fargate_iam_role.*.name, [""])[0] + + fargate_profiles = { for k, v in var.fargate_profiles : k => v if var.create_eks } +} + +data "aws_partition" "current" {} + +data "aws_iam_policy_document" "eks_fargate_pod_assume_role" { + count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0 + + statement { + effect = "Allow" + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["eks-fargate-pods.amazonaws.com"] + } + } +} + +data "aws_iam_role" "custom_fargate_iam_role" { + count = local.create_eks && !var.create_fargate_pod_execution_role ? 1 : 0 + + name = var.fargate_pod_execution_role_name +} + +resource "aws_iam_role" "eks_fargate_pod" { + count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0 + + name_prefix = format("%s-fargate", substr(var.cluster_name, 0, 24)) + assume_role_policy = data.aws_iam_policy_document.eks_fargate_pod_assume_role[0].json + permissions_boundary = var.permissions_boundary + tags = var.tags + path = var.iam_path +} + +resource "aws_iam_role_policy_attachment" "eks_fargate_pod" { + count = local.create_eks && var.create_fargate_pod_execution_role ? 1 : 0 + + policy_arn = "arn:${data.aws_partition.current.partition}:iam::aws:policy/AmazonEKSFargatePodExecutionRolePolicy" + role = aws_iam_role.eks_fargate_pod[0].name +} + +resource "aws_eks_fargate_profile" "this" { + for_each = local.fargate_profiles + + cluster_name = var.cluster_name + fargate_profile_name = lookup(each.value, "name", format("%s-fargate-%s", var.cluster_name, replace(each.key, "_", "-"))) + pod_execution_role_arn = local.pod_execution_role_arn + subnet_ids = lookup(each.value, "subnets", var.subnets) + + dynamic "selector" { + for_each = each.value.selectors + + content { + namespace = selector.value["namespace"] + labels = lookup(selector.value, "labels", {}) + } + } + + tags = merge(var.tags, lookup(each.value, "tags", {})) +} diff --git a/modules/fargate/variables.tf b/modules/fargate/variables.tf index acfd69b..39e2cc6 100644 --- a/modules/fargate/variables.tf +++ b/modules/fargate/variables.tf @@ -1,31 +1,27 @@ -variable "cluster_name" { - description = "Name of the EKS cluster." - type = string -} - variable "create_eks" { description = "Controls if EKS resources should be created (it affects almost all resources)" type = bool default = true } -variable "iam_path" { - description = "IAM roles will be created on this path." - type = string - default = "/" -} - -variable "iam_policy_arn_prefix" { - description = "IAM policy prefix with the correct AWS partition." - type = string -} - variable "create_fargate_pod_execution_role" { description = "Controls if the the IAM Role that provides permissions for the EKS Fargate Profile should be created." type = bool default = true } +variable "cluster_name" { + description = "Name of the EKS cluster." + type = string + default = "" +} + +variable "iam_path" { + description = "IAM roles will be created on this path." + type = string + default = "/" +} + variable "fargate_pod_execution_role_name" { description = "The IAM Role that provides permissions for the EKS Fargate Profile." type = string @@ -55,11 +51,3 @@ variable "tags" { type = map(string) default = {} } - -# Hack for a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2 -# Will be removed in Terraform 0.13 with the support of module's `depends_on` https://github.com/hashicorp/terraform/issues/10462 -variable "eks_depends_on" { - description = "List of references to other resources this submodule depends on." - type = any - default = null -} diff --git a/modules/node_groups/README.md b/modules/node_groups/README.md index d80b126..f004fd1 100644 --- a/modules/node_groups/README.md +++ b/modules/node_groups/README.md @@ -1,11 +1,9 @@ -# eks `node_groups` submodule +# EKS `node_groups` submodule Helper submodule to create and manage resources related to `eks_node_groups`. -## Assumptions -* Designed for use by the parent module and not directly by end users - ## Node Groups' IAM Role + The role ARN specified in `var.default_iam_role_arn` will be used by default. In a simple configuration this will be the worker role created by the parent module. `iam_role_arn` must be specified in either `var.node_groups_defaults` or `var.node_groups` if the default parent IAM role is not being created for whatever reason, for example if `manage_worker_iam_resources` is set to false in the parent. @@ -64,13 +62,14 @@ The role ARN specified in `var.default_iam_role_arn` will be used by default. In |------|---------| | [terraform](#requirement\_terraform) | >= 0.13.1 | | [aws](#requirement\_aws) | >= 3.56.0 | +| [cloudinit](#requirement\_cloudinit) | >= 2.0 | ## Providers | Name | Version | |------|---------| | [aws](#provider\_aws) | >= 3.56.0 | -| [cloudinit](#provider\_cloudinit) | n/a | +| [cloudinit](#provider\_cloudinit) | >= 2.0 | ## Modules @@ -88,18 +87,17 @@ No modules. | Name | Description | Type | Default | Required | |------|-------------|------|---------|:--------:| -| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | n/a | yes | +| [cluster\_name](#input\_cluster\_name) | Name of parent cluster | `string` | `""` | no | | [create\_eks](#input\_create\_eks) | Controls if EKS resources should be created (it affects almost all resources) | `bool` | `true` | no | -| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | n/a | yes | +| [default\_iam\_role\_arn](#input\_default\_iam\_role\_arn) | ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults` | `string` | `""` | no | | [ebs\_optimized\_not\_supported](#input\_ebs\_optimized\_not\_supported) | List of instance types that do not support EBS optimization | `list(string)` | `[]` | no | | [ng\_depends\_on](#input\_ng\_depends\_on) | List of references to other resources this submodule depends on | `any` | `null` | no | | [node\_groups](#input\_node\_groups) | Map of maps of `eks_node_groups` to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no | -| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | n/a | yes | -| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | n/a | yes | -| [timeouts](#input\_timeouts) | A map of timeouts for create/update/delete operations. | `map(string)` | n/a | yes | +| [node\_groups\_defaults](#input\_node\_groups\_defaults) | map of maps of node groups to create. See "`node_groups` and `node_groups_defaults` keys" section in README.md for more details | `any` | `{}` | no | +| [tags](#input\_tags) | A map of tags to add to all resources | `map(string)` | `{}` | no | | [worker\_additional\_security\_group\_ids](#input\_worker\_additional\_security\_group\_ids) | A list of additional security group ids to attach to worker instances | `list(string)` | `[]` | no | | [worker\_security\_group\_id](#input\_worker\_security\_group\_id) | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingress/egress to work with the EKS cluster. | `string` | `""` | no | -| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | n/a | yes | +| [workers\_group\_defaults](#input\_workers\_group\_defaults) | Workers group defaults from parent | `any` | `{}` | no | ## Outputs diff --git a/modules/node_groups/node_groups.tf b/modules/node_groups/main.tf similarity index 98% rename from modules/node_groups/node_groups.tf rename to modules/node_groups/main.tf index 134d383..7ceafb5 100644 --- a/modules/node_groups/node_groups.tf +++ b/modules/node_groups/main.tf @@ -99,7 +99,7 @@ resource "aws_eks_node_group" "workers" { lifecycle { create_before_destroy = true - ignore_changes = [scaling_config.0.desired_size] + ignore_changes = [scaling_config[0].desired_size] } depends_on = [var.ng_depends_on] diff --git a/modules/node_groups/variables.tf b/modules/node_groups/variables.tf index d881968..d106bb7 100644 --- a/modules/node_groups/variables.tf +++ b/modules/node_groups/variables.tf @@ -7,16 +7,19 @@ variable "create_eks" { variable "cluster_name" { description = "Name of parent cluster" type = string + default = "" } variable "default_iam_role_arn" { description = "ARN of the default IAM worker role to use if one is not specified in `var.node_groups` or `var.node_groups_defaults`" type = string + default = "" } variable "workers_group_defaults" { description = "Workers group defaults from parent" type = any + default = {} } variable "worker_security_group_id" { @@ -34,16 +37,13 @@ variable "worker_additional_security_group_ids" { variable "tags" { description = "A map of tags to add to all resources" type = map(string) -} - -variable "timeouts" { - description = "A map of timeouts for create/update/delete operations." - type = map(string) + default = {} } variable "node_groups_defaults" { description = "map of maps of node groups to create. See \"`node_groups` and `node_groups_defaults` keys\" section in README.md for more details" type = any + default = {} } variable "node_groups" { diff --git a/modules/node_groups/versions.tf b/modules/node_groups/versions.tf index ea2a91d..5324b48 100644 --- a/modules/node_groups/versions.tf +++ b/modules/node_groups/versions.tf @@ -2,6 +2,7 @@ terraform { required_version = ">= 0.13.1" required_providers { - aws = ">= 3.56.0" + aws = ">= 3.56.0" + cloudinit = ">= 2.0" } } diff --git a/node_groups.tf b/node_groups.tf index 2a35808..c40f635 100644 --- a/node_groups.tf +++ b/node_groups.tf @@ -1,16 +1,19 @@ module "node_groups" { - source = "./modules/node_groups" - create_eks = var.create_eks - cluster_name = coalescelist(aws_eks_cluster.this[*].name, [""])[0] + source = "./modules/node_groups" + + create_eks = var.create_eks + + cluster_name = local.cluster_name default_iam_role_arn = coalescelist(aws_iam_role.workers[*].arn, [""])[0] + ebs_optimized_not_supported = local.ebs_optimized_not_supported workers_group_defaults = local.workers_group_defaults worker_security_group_id = local.worker_security_group_id worker_additional_security_group_ids = var.worker_additional_security_group_ids - tags = var.tags - timeouts = var.timeouts - node_groups_defaults = var.node_groups_defaults - node_groups = var.node_groups - ebs_optimized_not_supported = local.ebs_optimized_not_supported + + node_groups_defaults = var.node_groups_defaults + node_groups = var.node_groups + + tags = var.tags # Hack to ensure ordering of resource creation. # This is a homemade `depends_on` https://discuss.hashicorp.com/t/tips-howto-implement-module-depends-on-emulation/2305/2 diff --git a/outputs.tf b/outputs.tf index f6c5351..8676285 100644 --- a/outputs.tf +++ b/outputs.tf @@ -1,6 +1,6 @@ output "cluster_id" { description = "The name/id of the EKS cluster. Will block on cluster creation until the cluster is really ready." - value = element(concat(aws_eks_cluster.this.*.id, [""]), 0) + value = local.cluster_id # So that calling plans wait for the cluster to be available before attempting to use it. # There is no need to duplicate this datasource @@ -9,17 +9,17 @@ output "cluster_id" { output "cluster_arn" { description = "The Amazon Resource Name (ARN) of the cluster." - value = element(concat(aws_eks_cluster.this.*.arn, [""]), 0) + value = local.cluster_arn } output "cluster_certificate_authority_data" { description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster." - value = element(concat(aws_eks_cluster.this[*].certificate_authority[0].data, [""]), 0) + value = local.cluster_auth_base64 } output "cluster_endpoint" { description = "The endpoint for your EKS Kubernetes API." - value = element(concat(aws_eks_cluster.this.*.endpoint, [""]), 0) + value = local.cluster_endpoint } output "cluster_version" { @@ -49,7 +49,7 @@ output "cluster_iam_role_arn" { output "cluster_oidc_issuer_url" { description = "The URL on the EKS cluster OIDC Issuer" - value = flatten(concat(aws_eks_cluster.this[*].identity[*].oidc.0.issuer, [""]))[0] + value = local.cluster_oidc_issuer_url } output "cluster_primary_security_group_id" { @@ -109,7 +109,7 @@ output "workers_asg_names" { output "workers_user_data" { description = "User data of worker groups" value = concat( - local.userdata_rendered, + local.launch_configuration_userdata_rendered, local.launch_template_userdata_rendered, ) } diff --git a/variables.tf b/variables.tf index 9b94c99..6454e6b 100644 --- a/variables.tf +++ b/variables.tf @@ -1,24 +1,25 @@ variable "cluster_enabled_log_types" { - default = [] description = "A list of the desired control plane logging to enable. For more information, see Amazon EKS Control Plane Logging documentation (https://docs.aws.amazon.com/eks/latest/userguide/control-plane-logs.html)" type = list(string) + default = [] } variable "cluster_log_kms_key_id" { - default = "" description = "If a KMS Key ARN is set, this key will be used to encrypt the corresponding log group. Please be sure that the KMS Key has an appropriate key policy (https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/encrypt-log-data-kms.html)" type = string + default = "" } variable "cluster_log_retention_in_days" { - default = 90 description = "Number of days to retain log events. Default retention - 90 days." type = number + default = 90 } variable "cluster_name" { description = "Name of the EKS cluster. Also used as a prefix in names of related resources." type = string + default = "" } variable "cluster_security_group_id" { @@ -30,6 +31,7 @@ variable "cluster_security_group_id" { variable "cluster_version" { description = "Kubernetes version to use for the EKS cluster." type = string + default = null } variable "kubeconfig_output_path" { @@ -50,8 +52,15 @@ variable "write_kubeconfig" { default = true } +variable "default_platform" { + description = "Default platform name. Valid options are `linux` and `windows`." + type = string + default = "linux" +} + variable "manage_aws_auth" { description = "Whether to apply the aws-auth configmap file." + type = bool default = true } @@ -96,6 +105,7 @@ variable "fargate_subnets" { variable "subnets" { description = "A list of subnets to place the EKS cluster and workers within." type = list(string) + default = [] } variable "tags" { @@ -110,15 +120,10 @@ variable "cluster_tags" { default = {} } -variable "timeouts" { - description = "A map of timeouts for create/update/delete operations." - type = map(string) - default = {} -} - variable "vpc_id" { description = "VPC where the cluster and workers will be deployed." type = string + default = null } variable "worker_groups" { diff --git a/versions.tf b/versions.tf index aa74ab7..e9b6bc9 100644 --- a/versions.tf +++ b/versions.tf @@ -5,6 +5,7 @@ terraform { aws = ">= 3.56.0" local = ">= 1.4" kubernetes = ">= 1.11.1" + cloudinit = ">= 2.0" http = { source = "terraform-aws-modules/http" version = ">= 2.4.1" diff --git a/workers.tf b/workers.tf index c73046e..f2cd7cb 100644 --- a/workers.tf +++ b/workers.tf @@ -1,12 +1,13 @@ # Worker Groups using Launch Configurations resource "aws_autoscaling_group" "workers" { - count = var.create_eks ? local.worker_group_count : 0 + count = var.create_eks ? local.worker_group_launch_configuration_count : 0 + name_prefix = join( "-", compact( [ - coalescelist(aws_eks_cluster.this[*].name, [""])[0], + local.cluster_name, lookup(var.worker_groups[count.index], "name", count.index) ] ) @@ -131,16 +132,16 @@ resource "aws_autoscaling_group" "workers" { [ { "key" = "Name" - "value" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg" + "value" = "${local.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg" "propagate_at_launch" = true }, { - "key" = "kubernetes.io/cluster/${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}" + "key" = "kubernetes.io/cluster/${local.cluster_name}" "value" = "owned" "propagate_at_launch" = true }, { - "key" = "k8s.io/cluster/${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}" + "key" = "k8s.io/cluster/${local.cluster_name}" "value" = "owned" "propagate_at_launch" = true }, @@ -201,8 +202,9 @@ resource "aws_autoscaling_group" "workers" { } resource "aws_launch_configuration" "workers" { - count = var.create_eks ? local.worker_group_count : 0 - name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup(var.worker_groups[count.index], "name", count.index)}" + count = var.create_eks ? local.worker_group_launch_configuration_count : 0 + + name_prefix = "${local.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}" associate_public_ip_address = lookup( var.worker_groups[count.index], "public_ip", @@ -236,7 +238,7 @@ resource "aws_launch_configuration" "workers" { "key_name", local.workers_group_defaults["key_name"], ) - user_data_base64 = base64encode(local.userdata_rendered[count.index]) + user_data_base64 = base64encode(local.launch_configuration_userdata_rendered[count.index]) ebs_optimized = lookup( var.worker_groups[count.index], "ebs_optimized", @@ -368,7 +370,8 @@ resource "aws_launch_configuration" "workers" { } resource "aws_security_group" "workers" { - count = var.worker_create_security_group && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 + name_prefix = var.cluster_name description = "Security group for all nodes in the cluster." vpc_id = var.vpc_id @@ -382,7 +385,8 @@ resource "aws_security_group" "workers" { } resource "aws_security_group_rule" "workers_egress_internet" { - count = var.worker_create_security_group && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 + description = "Allow nodes all egress to the Internet." protocol = "-1" security_group_id = local.worker_security_group_id @@ -393,7 +397,8 @@ resource "aws_security_group_rule" "workers_egress_internet" { } resource "aws_security_group_rule" "workers_ingress_self" { - count = var.worker_create_security_group && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 + description = "Allow node to communicate with each other." protocol = "-1" security_group_id = local.worker_security_group_id @@ -404,7 +409,8 @@ resource "aws_security_group_rule" "workers_ingress_self" { } resource "aws_security_group_rule" "workers_ingress_cluster" { - count = var.worker_create_security_group && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 + description = "Allow workers pods to receive communication from the cluster control plane." protocol = "tcp" security_group_id = local.worker_security_group_id @@ -415,7 +421,8 @@ resource "aws_security_group_rule" "workers_ingress_cluster" { } resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" { - count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0 + count = var.worker_create_security_group && var.create_eks ? var.worker_sg_ingress_from_port > 10250 ? 1 : 0 : 0 + description = "Allow workers Kubelets to receive communication from the cluster control plane." protocol = "tcp" security_group_id = local.worker_security_group_id @@ -426,7 +433,8 @@ resource "aws_security_group_rule" "workers_ingress_cluster_kubelet" { } resource "aws_security_group_rule" "workers_ingress_cluster_https" { - count = var.worker_create_security_group && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.create_eks ? 1 : 0 + description = "Allow pods running extension API servers on port 443 to receive communication from cluster control plane." protocol = "tcp" security_group_id = local.worker_security_group_id @@ -437,7 +445,8 @@ resource "aws_security_group_rule" "workers_ingress_cluster_https" { } resource "aws_security_group_rule" "workers_ingress_cluster_primary" { - count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create_eks ? 1 : 0 + description = "Allow pods running on workers to receive communication from cluster primary security group (e.g. Fargate pods)." protocol = "all" security_group_id = local.worker_security_group_id @@ -448,7 +457,8 @@ resource "aws_security_group_rule" "workers_ingress_cluster_primary" { } resource "aws_security_group_rule" "cluster_primary_ingress_workers" { - count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.cluster_version >= 1.14 && var.create_eks ? 1 : 0 + count = var.worker_create_security_group && var.worker_create_cluster_primary_security_group_rules && var.create_eks ? 1 : 0 + description = "Allow pods running on workers to send communication to cluster primary security group (e.g. Fargate pods)." protocol = "all" security_group_id = local.cluster_primary_security_group_id @@ -459,26 +469,29 @@ resource "aws_security_group_rule" "cluster_primary_ingress_workers" { } resource "aws_iam_role" "workers" { - count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 - name_prefix = var.workers_role_name != "" ? null : coalescelist(aws_eks_cluster.this[*].name, [""])[0] + count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 + + name_prefix = var.workers_role_name != "" ? null : local.cluster_name name = var.workers_role_name != "" ? var.workers_role_name : null assume_role_policy = data.aws_iam_policy_document.workers_assume_role_policy.json permissions_boundary = var.permissions_boundary path = var.iam_path force_detach_policies = true - tags = var.tags + + tags = var.tags } resource "aws_iam_instance_profile" "workers" { - count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_count : 0 - name_prefix = coalescelist(aws_eks_cluster.this[*].name, [""])[0] + count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_configuration_count : 0 + + name_prefix = local.cluster_name role = lookup( var.worker_groups[count.index], "iam_role_id", local.default_iam_role_id, ) - path = var.iam_path + tags = var.tags lifecycle { @@ -487,25 +500,29 @@ resource "aws_iam_instance_profile" "workers" { } resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" { - count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 + policy_arn = "${local.policy_arn_prefix}/AmazonEKSWorkerNodePolicy" role = aws_iam_role.workers[0].name } resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" { - count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0 + count = var.manage_worker_iam_resources && var.attach_worker_cni_policy && var.create_eks ? 1 : 0 + policy_arn = "${local.policy_arn_prefix}/AmazonEKS_CNI_Policy" role = aws_iam_role.workers[0].name } resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" { - count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 + count = var.manage_worker_iam_resources && var.create_eks ? 1 : 0 + policy_arn = "${local.policy_arn_prefix}/AmazonEC2ContainerRegistryReadOnly" role = aws_iam_role.workers[0].name } resource "aws_iam_role_policy_attachment" "workers_additional_policies" { - count = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0 + count = var.manage_worker_iam_resources && var.create_eks ? length(var.workers_additional_policies) : 0 + role = aws_iam_role.workers[0].name policy_arn = var.workers_additional_policies[count.index] } diff --git a/workers_launch_template.tf b/workers_launch_template.tf index 9da5a16..b675304 100644 --- a/workers_launch_template.tf +++ b/workers_launch_template.tf @@ -2,11 +2,12 @@ resource "aws_autoscaling_group" "workers_launch_template" { count = var.create_eks ? local.worker_group_launch_template_count : 0 + name_prefix = join( "-", compact( [ - coalescelist(aws_eks_cluster.this[*].name, [""])[0], + local.cluster_name, lookup(var.worker_groups_launch_template[count.index], "name", count.index) ] ) @@ -219,7 +220,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { [ { "key" = "Name" - "value" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup( + "value" = "${local.cluster_name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -227,7 +228,7 @@ resource "aws_autoscaling_group" "workers_launch_template" { "propagate_at_launch" = true }, { - "key" = "kubernetes.io/cluster/${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}" + "key" = "kubernetes.io/cluster/${local.cluster_name}" "value" = "owned" "propagate_at_launch" = true }, @@ -289,7 +290,8 @@ resource "aws_autoscaling_group" "workers_launch_template" { resource "aws_launch_template" "workers_launch_template" { count = var.create_eks ? (local.worker_group_launch_template_count) : 0 - name_prefix = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup( + + name_prefix = "${local.cluster_name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -540,7 +542,7 @@ resource "aws_launch_template" "workers_launch_template" { tags = merge( { - "Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup( + "Name" = "${local.cluster_name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -560,7 +562,7 @@ resource "aws_launch_template" "workers_launch_template" { tags = merge( { - "Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup( + "Name" = "${local.cluster_name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -578,7 +580,7 @@ resource "aws_launch_template" "workers_launch_template" { tags = merge( { - "Name" = "${coalescelist(aws_eks_cluster.this[*].name, [""])[0]}-${lookup( + "Name" = "${local.cluster_name}-${lookup( var.worker_groups_launch_template[count.index], "name", count.index, @@ -617,14 +619,16 @@ resource "aws_launch_template" "workers_launch_template" { } resource "aws_iam_instance_profile" "workers_launch_template" { - count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_template_count : 0 - name_prefix = coalescelist(aws_eks_cluster.this[*].name, [""])[0] + count = var.manage_worker_iam_resources && var.create_eks ? local.worker_group_launch_template_count : 0 + + name_prefix = local.cluster_name role = lookup( var.worker_groups_launch_template[count.index], "iam_role_id", local.default_iam_role_id, ) path = var.iam_path + tags = var.tags lifecycle {