fix: Rebuild examples (#1625)

This commit is contained in:
Dawid Rogaczewski
2021-10-12 15:20:14 +02:00
committed by GitHub
parent 54a5f1e42b
commit 99d289988d
45 changed files with 1281 additions and 708 deletions

View File

@@ -0,0 +1,73 @@
# Managed groups example
This is EKS example using managed groups feature in two different ways:
- Using SPOT instances in node group
- Using ON_DEMAND instance in node group
See [the official documentation](https://docs.aws.amazon.com/eks/latest/userguide/managed-node-groups.html) for more details.
## Usage
To run this example you need to execute:
```bash
$ terraform init
$ terraform plan
$ terraform apply
```
Note that this example may create resources which cost money. Run `terraform destroy` when you don't need these resources.
<!-- BEGINNING OF PRE-COMMIT-TERRAFORM DOCS HOOK -->
## Requirements
| Name | Version |
|------|---------|
| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | >= 0.13.1 |
| <a name="requirement_aws"></a> [aws](#requirement\_aws) | >= 3.22.0 |
| <a name="requirement_kubernetes"></a> [kubernetes](#requirement\_kubernetes) | ~> 2.0 |
| <a name="requirement_local"></a> [local](#requirement\_local) | >= 1.4 |
| <a name="requirement_random"></a> [random](#requirement\_random) | >= 2.1 |
## Providers
| Name | Version |
|------|---------|
| <a name="provider_aws"></a> [aws](#provider\_aws) | >= 3.22.0 |
| <a name="provider_random"></a> [random](#provider\_random) | >= 2.1 |
## Modules
| Name | Source | Version |
|------|--------|---------|
| <a name="module_eks"></a> [eks](#module\_eks) | ../.. | |
| <a name="module_vpc"></a> [vpc](#module\_vpc) | terraform-aws-modules/vpc/aws | ~> 3.0 |
## Resources
| Name | Type |
|------|------|
| [random_string.suffix](https://registry.terraform.io/providers/hashicorp/random/latest/docs/resources/string) | resource |
| [aws_availability_zones.available](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/availability_zones) | data source |
| [aws_eks_cluster.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster) | data source |
| [aws_eks_cluster_auth.cluster](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/eks_cluster_auth) | data source |
## Inputs
| Name | Description | Type | Default | Required |
|------|-------------|------|---------|:--------:|
| <a name="input_map_accounts"></a> [map\_accounts](#input\_map\_accounts) | Additional AWS account numbers to add to the aws-auth configmap. | `list(string)` | <pre>[<br> "777777777777",<br> "888888888888"<br>]</pre> | no |
| <a name="input_map_roles"></a> [map\_roles](#input\_map\_roles) | Additional IAM roles to add to the aws-auth configmap. | <pre>list(object({<br> rolearn = string<br> username = string<br> groups = list(string)<br> }))</pre> | <pre>[<br> {<br> "groups": [<br> "system:masters"<br> ],<br> "rolearn": "arn:aws:iam::66666666666:role/role1",<br> "username": "role1"<br> }<br>]</pre> | no |
| <a name="input_map_users"></a> [map\_users](#input\_map\_users) | Additional IAM users to add to the aws-auth configmap. | <pre>list(object({<br> userarn = string<br> username = string<br> groups = list(string)<br> }))</pre> | <pre>[<br> {<br> "groups": [<br> "system:masters"<br> ],<br> "userarn": "arn:aws:iam::66666666666:user/user1",<br> "username": "user1"<br> },<br> {<br> "groups": [<br> "system:masters"<br> ],<br> "userarn": "arn:aws:iam::66666666666:user/user2",<br> "username": "user2"<br> }<br>]</pre> | no |
## Outputs
| Name | Description |
|------|-------------|
| <a name="output_cluster_endpoint"></a> [cluster\_endpoint](#output\_cluster\_endpoint) | Endpoint for EKS control plane. |
| <a name="output_cluster_security_group_id"></a> [cluster\_security\_group\_id](#output\_cluster\_security\_group\_id) | Security group ids attached to the cluster control plane. |
| <a name="output_config_map_aws_auth"></a> [config\_map\_aws\_auth](#output\_config\_map\_aws\_auth) | A kubernetes configuration to authenticate to this EKS cluster. |
| <a name="output_kubectl_config"></a> [kubectl\_config](#output\_kubectl\_config) | kubectl config as generated by the module. |
| <a name="output_node_groups"></a> [node\_groups](#output\_node\_groups) | Outputs from node groups |
<!-- END OF PRE-COMMIT-TERRAFORM DOCS HOOK -->

View File

@@ -1,70 +1,28 @@
provider "aws" {
region = "eu-west-1"
}
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
}
data "aws_availability_zones" "available" {
region = local.region
}
locals {
cluster_name = "test-eks-${random_string.suffix.result}"
name = "managed_node_groups-${random_string.suffix.result}"
cluster_version = "1.20"
region = "eu-west-1"
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 2.47"
name = "test-vpc"
cidr = "172.16.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["172.16.1.0/24", "172.16.2.0/24", "172.16.3.0/24"]
public_subnets = ["172.16.4.0/24", "172.16.5.0/24", "172.16.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.cluster_name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
}
################################################################################
# EKS Module
################################################################################
module "eks" {
source = "../.."
cluster_name = local.cluster_name
cluster_version = "1.20"
subnets = module.vpc.private_subnets
source = "../.."
tags = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
cluster_name = local.name
cluster_version = local.cluster_version
vpc_id = module.vpc.vpc_id
vpc_id = module.vpc.vpc_id
subnets = module.vpc.private_subnets
cluster_endpoint_private_access = true
cluster_endpoint_public_access = true
node_groups_defaults = {
ami_type = "AL2_x86_64"
@@ -73,23 +31,16 @@ module "eks" {
node_groups = {
example = {
create_launch_template = true
desired_capacity = 1
max_capacity = 10
min_capacity = 1
disk_size = 50
disk_type = "gp3"
disk_throughput = 150
disk_iops = 3000
instance_types = ["t3.large"]
capacity_type = "SPOT"
k8s_labels = {
Environment = "test"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
Example = "managed_node_groups"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
additional_tags = {
ExtraTag = "example"
@@ -105,24 +56,93 @@ module "eks" {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
}
example2 = {
desired_capacity = 1
max_capacity = 10
min_capacity = 1
instance_types = ["t3.medium"]
k8s_labels = {
Example = "managed_node_groups"
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
additional_tags = {
ExtraTag = "example2"
}
update_config = {
max_unavailable_percentage = 50 # or set `max_unavailable`
}
}
}
# Create security group rules to allow communication between pods on workers and pods in managed node groups.
# Set this to true if you have AWS-Managed node groups and Self-Managed worker groups.
# See https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1089
# worker_create_cluster_primary_security_group_rules = true
# worker_groups_launch_template = [
# {
# name = "worker-group-1"
# instance_type = "t3.small"
# asg_desired_capacity = 2
# public_ip = true
# }
# ]
map_roles = var.map_roles
map_users = var.map_users
map_accounts = var.map_accounts
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}
################################################################################
# Kubernetes provider configuration
################################################################################
data "aws_eks_cluster" "cluster" {
name = module.eks.cluster_id
}
data "aws_eks_cluster_auth" "cluster" {
name = module.eks.cluster_id
}
provider "kubernetes" {
host = data.aws_eks_cluster.cluster.endpoint
cluster_ca_certificate = base64decode(data.aws_eks_cluster.cluster.certificate_authority[0].data)
token = data.aws_eks_cluster_auth.cluster.token
}
################################################################################
# Supporting Resources
################################################################################
data "aws_availability_zones" "available" {
}
resource "random_string" "suffix" {
length = 8
special = false
}
module "vpc" {
source = "terraform-aws-modules/vpc/aws"
version = "~> 3.0"
name = local.name
cidr = "10.0.0.0/16"
azs = data.aws_availability_zones.available.names
private_subnets = ["10.0.1.0/24", "10.0.2.0/24", "10.0.3.0/24"]
public_subnets = ["10.0.4.0/24", "10.0.5.0/24", "10.0.6.0/24"]
enable_nat_gateway = true
single_nat_gateway = true
enable_dns_hostnames = true
public_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/elb" = "1"
}
private_subnet_tags = {
"kubernetes.io/cluster/${local.name}" = "shared"
"kubernetes.io/role/internal-elb" = "1"
}
tags = {
Example = local.name
GithubRepo = "terraform-aws-eks"
GithubOrg = "terraform-aws-modules"
}
}

View File

@@ -2,9 +2,9 @@ terraform {
required_version = ">= 0.13.1"
required_providers {
aws = ">= 3.56.0"
aws = ">= 3.22.0"
local = ">= 1.4"
random = ">= 2.1"
kubernetes = "~> 1.11"
kubernetes = "~> 2.0"
}
}