Merge pull request #13 from terraform-aws-modules/NextDeveloperTeam-add-worker-groups

Flexible number of worker autoscaling groups now able to be created by module consumers.
This commit is contained in:
Brandon J. O'Connor
2018-06-11 15:57:48 -07:00
committed by GitHub
18 changed files with 347 additions and 355 deletions

2
.gitignore vendored
View File

@@ -9,3 +9,5 @@ Gemfile.lock
terraform.tfstate.d/ terraform.tfstate.d/
kubeconfig kubeconfig
config-map-aws-auth.yaml config-map-aws-auth.yaml
eks-admin-cluster-role-binding.yaml
eks-admin-service-account.yaml

View File

@@ -1,16 +1,21 @@
language: ruby language: ruby
sudo: required sudo: required
dist: trusty dist: trusty
services: services:
- docker - docker
rvm: rvm:
- 2.4.2 - 2.4.2
before_install: before_install:
- echo "before_install" - echo "before_install"
install: install:
- echo "install" - echo "install"
- gem install bundler --no-rdoc --no-ri - gem install bundler --no-rdoc --no-ri
- bundle install - bundle install
before_script: before_script:
- echo 'before_script' - echo 'before_script'
- export AWS_REGION='us-east-1' - export AWS_REGION='us-east-1'
@@ -22,12 +27,13 @@ before_script:
- unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform - unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform
- mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/ - mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/
- terraform -v - terraform -v
script: script:
- echo 'script' - echo 'script'
- terraform init - terraform init
- terraform fmt -check=true - terraform fmt -check=true
- terraform validate -var "region=${AWS_REGION}" -var "vpc_id=vpc-123456" -var "subnets=[\"subnet-12345a\"]" -var "workers_ami_id=ami-123456" -var "cluster_ingress_cidrs=[]" -var "cluster_name=test_cluster" - terraform validate -var "region=${AWS_REGION}" -var "vpc_id=vpc-123456" -var "subnets=[\"subnet-12345a\"]" -var "workers_ami_id=ami-123456" -var "cluster_ingress_cidrs=[]" -var "cluster_name=test_cluster"
- docker run --rm -v $(pwd):/app/ --workdir=/app/ -t wata727/tflint --error-with-issues # - docker run --rm -v $(pwd):/app/ --workdir=/app/ -t wata727/tflint --error-with-issues
- cd examples/eks_test_fixture - cd examples/eks_test_fixture
- terraform init - terraform init
- terraform fmt -check=true - terraform fmt -check=true
@@ -40,6 +46,7 @@ script:
# script: ci/deploy.sh # script: ci/deploy.sh
# on: # on:
# branch: master # branch: master
notifications: notifications:
email: email:
recipients: recipients:

View File

@@ -5,22 +5,36 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/) and this The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
project adheres to [Semantic Versioning](http://semver.org/). project adheres to [Semantic Versioning](http://semver.org/).
## [[v1.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.2.0...v1.0.0)] - 2018-06-11]
### Added
- security group id can be provided for either/both of the cluster and the workers. If not provided, security groups will be created with sufficient rules to allow cluster-worker communication. - kudos to @tanmng on the idea ⭐
- outputs of security group ids and worker ASG arns added for working with these resources outside the module.
### Changed
- Worker build out refactored to allow multiple autoscaling groups each having differing specs. If none are given, a single ASG is created with a set of sane defaults - big thanks to @kppullin 🥨
## [[v0.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.1...v0.2.0)] - 2018-06-08] ## [[v0.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.1...v0.2.0)] - 2018-06-08]
### Added
- ability to specify extra userdata code to execute following kubelet services start.
- EBS optimization used whenever possible for the given instance type.
- When `configure_kubectl_session` is set to true the current shell will be configured to talk to the kubernetes cluster using config files output from the module.
### Changed ### Changed
- files rendered from dedicated templates to separate out raw code and config from `hcl` - files rendered from dedicated templates to separate out raw code and config from `hcl`
- `workers_ami_id` is now made optional. If not specified, the module will source the latest AWS supported EKS AMI instead. - `workers_ami_id` is now made optional. If not specified, the module will source the latest AWS supported EKS AMI instead.
- added ability to specify extra userdata code to execute after the second to configure and start kube services.
- When `configure_kubectl_session` is set to true the current shell will be configured to talk to the kubernetes cluster using config files output from the module.
- EBS optimization used whenever possible for the given instance type.
## [[v0.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.0...v0.1.1)] - 2018-06-07] ## [[v0.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.0...v0.1.1)] - 2018-06-07]
### Changed ### Changed
- pre-commit hooks fixed and working. - Pre-commit hooks fixed and working.
- made progress on CI, advancing the build to the final `kitchen test` stage before failing. - Made progress on CI, advancing the build to the final `kitchen test` stage before failing.
## [v0.1.0] - 2018-06-07 ## [v0.1.0] - 2018-06-07

View File

@@ -4,8 +4,7 @@ A terraform module to create a managed Kubernetes cluster on AWS EKS. Available
through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws).
Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html)
and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started).
Instructions on [this post](https://aws.amazon.com/blogs/aws/amazon-eks-now-generally-available/) Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html).
can help guide you through connecting to the cluster via `kubectl`.
| Branch | Build status | | Branch | Build status |
| ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -13,8 +12,9 @@ can help guide you through connecting to the cluster via `kubectl`.
## Assumptions ## Assumptions
* You want to create a set of resources around an EKS cluster: namely an autoscaling group of workers and a security group for them. * You want to create an EKS cluster and an autoscaling group of workers for the cluster.
* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS. * You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module.
* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources.
## Usage example ## Usage example
@@ -28,7 +28,6 @@ module "eks" {
subnets = ["subnet-abcde012", "subnet-bcde012a"] subnets = ["subnet-abcde012", "subnet-bcde012a"]
tags = "${map("Environment", "test")}" tags = "${map("Environment", "test")}"
vpc_id = "vpc-abcde012" vpc_id = "vpc-abcde012"
cluster_ingress_cidrs = ["24.18.23.91/32"]
} }
``` ```
@@ -52,8 +51,10 @@ This module has been packaged with [awspec](https://github.com/k1LoW/awspec) tes
3. Ensure your AWS environment is configured (i.e. credentials and region) for test. 3. Ensure your AWS environment is configured (i.e. credentials and region) for test.
4. Test using `bundle exec kitchen test` from the root of the repo. 4. Test using `bundle exec kitchen test` from the root of the repo.
For now, connectivity to the kubernetes cluster is not tested but will be in the future. For now, connectivity to the kubernetes cluster is not tested but will be in the
To test your kubectl connection manually, see the [eks_test_fixture README](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_test_fixture/README.md). future. If `configure_kubectl_session` is set `true`, once the test fixture has
converged, you can query the test cluster from that terminal session with
`kubectl get nodes --watch --kubeconfig kubeconfig`.
## Doc generation ## Doc generation
@@ -93,30 +94,28 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| Name | Description | Type | Default | Required | | Name | Description | Type | Default | Required |
|------|-------------|:----:|:-----:|:-----:| |------|-------------|:----:|:-----:|:-----:|
| additional_userdata | Extra lines of userdata (bash) which are appended to the default userdata code. | string | `` | no | | cluster_name | Name of the EKS cluster. Also used as a prefix in names of related resources. | string | - | yes |
| cluster_ingress_cidrs | The CIDRs from which we can execute kubectl commands. | list | - | yes | | cluster_security_group_id | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32. | string | `` | no |
| cluster_name | Name of the EKS cluster which is also used as a prefix in names of related resources. | string | - | yes | | cluster_version | Kubernetes version to use for the EKS cluster. | string | `1.10` | no |
| cluster_version | Kubernetes version to use for the cluster. | string | `1.10` | no |
| config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. | string | `./` | no | | config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. | string | `./` | no |
| configure_kubectl_session | Configure the current session's kubectl to use the instantiated cluster. | string | `false` | no | | configure_kubectl_session | Configure the current session's kubectl to use the instantiated EKS cluster. | string | `true` | no |
| ebs_optimized_workers | If left at default of true, will use ebs optimization if available on the given instance type. | string | `true` | no | | subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes |
| subnets | A list of subnets to associate with the cluster's underlying instances. | list | - | yes |
| tags | A map of tags to add to all resources. | string | `<map>` | no | | tags | A map of tags to add to all resources. | string | `<map>` | no |
| vpc_id | VPC id where the cluster and other resources will be deployed. | string | - | yes | | vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes |
| workers_ami_id | AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI. | string | `` | no | | worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `<list>` | no |
| workers_asg_desired_capacity | Desired worker capacity in the autoscaling group. | string | `1` | no | | worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no |
| workers_asg_max_size | Maximum worker capacity in the autoscaling group. | string | `3` | no | | workers_group_defaults | Default values for target groups as defined by the list of maps. | map | `<map>` | no |
| workers_asg_min_size | Minimum worker capacity in the autoscaling group. | string | `1` | no |
| workers_instance_type | Size of the workers instances. | string | `m4.large` | no |
## Outputs ## Outputs
| Name | Description | | Name | Description |
|------|-------------| |------|-------------|
| cluster_certificate_authority_data | Nested attribute containing certificate-authority-data for your cluster. Tis is the base64 encoded certificate data required to communicate with your cluster. | | cluster_certificate_authority_data | Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster. |
| cluster_endpoint | The endpoint for your Kubernetes API server. | | cluster_endpoint | The endpoint for your EKS Kubernetes API. |
| cluster_id | The name/id of the cluster. | | cluster_id | The name/id of the EKS cluster. |
| cluster_security_group_ids | description | | cluster_security_group_id | Security group ID attached to the EKS cluster. |
| cluster_version | The Kubernetes server version for the cluster. | | cluster_version | The Kubernetes server version for the EKS cluster. |
| config_map_aws_auth | A kubernetes configuration to authenticate to this cluster. | | config_map_aws_auth | A kubernetes configuration to authenticate to this EKS cluster. |
| kubeconfig | kubectl config file contents for this cluster. | | kubeconfig | kubectl config file contents for this EKS cluster. |
| worker_security_group_id | Security group ID attached to the EKS workers. |
| workers_asg_arns | IDs of the autoscaling groups containing workers. |

View File

@@ -4,7 +4,7 @@ resource "aws_eks_cluster" "this" {
version = "${var.cluster_version}" version = "${var.cluster_version}"
vpc_config { vpc_config {
security_group_ids = ["${aws_security_group.cluster.id}"] security_group_ids = ["${local.cluster_security_group_id}"]
subnet_ids = ["${var.subnets}"] subnet_ids = ["${var.subnets}"]
} }
@@ -16,39 +16,43 @@ resource "aws_eks_cluster" "this" {
resource "aws_security_group" "cluster" { resource "aws_security_group" "cluster" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
description = "Cluster communication with workers nodes" description = "EKS cluster security group."
vpc_id = "${var.vpc_id}" vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}" tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}"
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
} }
resource "aws_security_group_rule" "cluster_egress_internet" { resource "aws_security_group_rule" "cluster_egress_internet" {
description = "Allow cluster egress to the Internet." description = "Allow cluster egress access to the Internet."
protocol = "-1" protocol = "-1"
security_group_id = "${aws_security_group.cluster.id}" security_group_id = "${aws_security_group.cluster.id}"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
from_port = 0 from_port = 0
to_port = 0 to_port = 0
type = "egress" type = "egress"
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
} }
resource "aws_security_group_rule" "cluster_https_worker_ingress" { resource "aws_security_group_rule" "cluster_https_worker_ingress" {
description = "Allow pods to communicate with the cluster API Server." description = "Allow pods to communicate with the EKS cluster API."
protocol = "tcp" protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}" security_group_id = "${aws_security_group.cluster.id}"
source_security_group_id = "${aws_security_group.workers.id}" source_security_group_id = "${local.worker_security_group_id}"
from_port = 443 from_port = 443
to_port = 443 to_port = 443
type = "ingress" type = "ingress"
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
} }
resource "aws_security_group_rule" "cluster_https_cidr_ingress" { resource "aws_security_group_rule" "cluster_https_cidr_ingress" {
cidr_blocks = ["${var.cluster_ingress_cidrs}"] cidr_blocks = ["${local.workstation_external_cidr}"]
description = "Allow communication with the cluster API Server." description = "Allow kubectl communication with the EKS cluster API."
protocol = "tcp" protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}" security_group_id = "${aws_security_group.cluster.id}"
from_port = 443 from_port = 443
to_port = 443 to_port = 443
type = "ingress" type = "ingress"
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
} }
resource "aws_iam_role" "cluster" { resource "aws_iam_role" "cluster" {

48
data.tf
View File

@@ -1,13 +1,7 @@
data "aws_region" "current" {} data "aws_region" "current" {}
data "aws_ami" "eks_worker" { data "http" "workstation_external_ip" {
filter { url = "http://icanhazip.com"
name = "name"
values = ["eks-worker-*"]
}
most_recent = true
owners = ["602401143452"] # Amazon
} }
data "aws_iam_policy_document" "workers_assume_role_policy" { data "aws_iam_policy_document" "workers_assume_role_policy" {
@@ -25,6 +19,16 @@ data "aws_iam_policy_document" "workers_assume_role_policy" {
} }
} }
data "aws_ami" "eks_worker" {
filter {
name = "name"
values = ["eks-worker-*"]
}
most_recent = true
owners = ["602401143452"] # Amazon
}
data "aws_iam_policy_document" "cluster_assume_role_policy" { data "aws_iam_policy_document" "cluster_assume_role_policy" {
statement { statement {
sid = "EKSClusterAssumeRole" sid = "EKSClusterAssumeRole"
@@ -40,19 +44,6 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
} }
} }
data template_file userdata {
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
vars {
region = "${data.aws_region.current.name}"
max_pod_count = "${lookup(local.max_pod_per_node, var.workers_instance_type)}"
cluster_name = "${var.cluster_name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
additional_userdata = "${var.additional_userdata}"
}
}
data template_file kubeconfig { data template_file kubeconfig {
template = "${file("${path.module}/templates/kubeconfig.tpl")}" template = "${file("${path.module}/templates/kubeconfig.tpl")}"
@@ -72,7 +63,16 @@ data template_file config_map_aws_auth {
} }
} }
module "ebs_optimized" { data template_file userdata {
source = "./modules/tf_util_ebs_optimized" template = "${file("${path.module}/templates/userdata.sh.tpl")}"
instance_type = "${var.workers_instance_type}" count = "${length(var.worker_groups)}"
vars {
region = "${data.aws_region.current.name}"
cluster_name = "${var.cluster_name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")))}"
additional_userdata = "${lookup(var.worker_groups[count.index], "additional_userdata",lookup(var.workers_group_defaults, "additional_userdata"))}"
}
} }

View File

@@ -11,18 +11,16 @@ provider "random" {
version = "= 1.3.1" version = "= 1.3.1"
} }
provider "http" {}
provider "local" {}
data "aws_availability_zones" "available" {} data "aws_availability_zones" "available" {}
data "http" "workstation_external_ip" {
url = "http://icanhazip.com"
}
locals { locals {
workstation_external_cidr = "${chomp(data.http.workstation_external_ip.body)}/32" cluster_name = "test-eks-${random_string.suffix.result}"
cluster_name = "test-eks-${random_string.suffix.result}"
worker_groups = "${list(
map("instance_type","t2.small",
"additional_userdata","echo foo bar"
),
)}"
tags = "${map("Environment", "test", tags = "${map("Environment", "test",
"GithubRepo", "terraform-aws-eks", "GithubRepo", "terraform-aws-eks",
@@ -50,13 +48,10 @@ module "vpc" {
} }
module "eks" { module "eks" {
source = "../.." source = "../.."
cluster_name = "${local.cluster_name}" cluster_name = "${local.cluster_name}"
subnets = "${module.vpc.public_subnets}" subnets = "${module.vpc.public_subnets}"
tags = "${local.tags}" tags = "${local.tags}"
vpc_id = "${module.vpc.vpc_id}" vpc_id = "${module.vpc.vpc_id}"
cluster_ingress_cidrs = ["${local.workstation_external_cidr}"] worker_groups = "${local.worker_groups}"
workers_instance_type = "t2.small"
additional_userdata = "echo hello world"
configure_kubectl_session = true
} }

View File

@@ -3,9 +3,9 @@ output "cluster_endpoint" {
value = "${module.eks.cluster_endpoint}" value = "${module.eks.cluster_endpoint}"
} }
output "cluster_security_group_ids" { output "cluster_security_group_id" {
description = "Security group ids attached to the cluster control plane." description = "Security group ids attached to the cluster control plane."
value = "${module.eks.cluster_security_group_ids}" value = "${module.eks.cluster_security_group_id}"
} }
output "kubectl_config" { output "kubectl_config" {

24
kubectl.tf Normal file
View File

@@ -0,0 +1,24 @@
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "${var.config_output_path}/kubeconfig"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "${var.config_output_path}/config-map-aws-auth.yaml"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "null_resource" "configure_kubectl" {
provisioner "local-exec" {
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
}
count = "${var.configure_kubectl_session ? 1 : 0}"
}

168
local.tf
View File

@@ -1,4 +1,9 @@
locals { locals {
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
cluster_security_group_id = "${var.cluster_security_group_id == "" ? aws_security_group.cluster.id : var.cluster_security_group_id}"
worker_security_group_id = "${var.worker_security_group_id == "" ? aws_security_group.workers.id : var.worker_security_group_id}"
workstation_external_cidr = "${chomp(data.http.workstation_external_ip.body)}/32"
# Mapping from the node type that we selected and the max number of pods that it can run # Mapping from the node type that we selected and the max number of pods that it can run
# Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml # Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
max_pod_per_node = { max_pod_per_node = {
@@ -59,48 +64,123 @@ locals {
x1.32xlarge = 234 x1.32xlarge = 234
} }
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"] ebs_optimized = {
"c1.medium" = false
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml "c1.xlarge" = true
config_map_aws_auth = <<CONFIGMAPAWSAUTH "c3.2xlarge" = true
apiVersion: v1 "c3.4xlarge" = true
kind: ConfigMap "c3.8xlarge" = false
metadata: "c3.large" = false
name: aws-auth "c3.xlarge" = false
namespace: kube-system "c4.2xlarge" = true
data: "c4.4xlarge" = true
mapRoles: | "c4.8xlarge" = true
- rolearn: ${aws_iam_role.workers.arn} "c4.large" = true
username: system:node:{{EC2PrivateDNSName}} "c4.xlarge" = true
groups: "c5.18xlarge" = true
- system:bootstrappers "c5.2xlarge" = true
- system:nodes "c5.4xlarge" = true
CONFIGMAPAWSAUTH "c5.9xlarge" = true
"c5.large" = true
kubeconfig = <<KUBECONFIG "c5.xlarge" = true
apiVersion: v1 "c5d.18xlarge" = true
clusters: "c5d.2xlarge" = true
- cluster: "c5d.4xlarge" = true
server: ${aws_eks_cluster.this.endpoint} "c5d.9xlarge" = true
certificate-authority-data: ${aws_eks_cluster.this.certificate_authority.0.data} "c5d.large" = true
name: kubernetes "c5d.xlarge" = true
contexts: "cc2.8xlarge" = false
- context: "cr1.8xlarge" = false
cluster: kubernetes "d2.2xlarge" = true
user: aws "d2.4xlarge" = true
name: aws "d2.8xlarge" = true
current-context: aws "d2.xlarge" = true
kind: Config "f1.16xlarge" = true
preferences: {} "f1.2xlarge" = true
users: "g2.2xlarge" = true
- name: aws "g2.8xlarge" = false
user: "g3.16xlarge" = true
exec: "g3.4xlarge" = true
apiVersion: client.authentication.k8s.io/v1alpha1 "g3.8xlarge" = true
command: heptio-authenticator-aws "h1.16xlarge" = true
args: "h1.2xlarge" = true
- "token" "h1.4xlarge" = true
- "-i" "h1.8xlarge" = true
- "${var.cluster_name}" "hs1.8xlarge" = false
KUBECONFIG "i2.2xlarge" = true
"i2.4xlarge" = true
"i2.8xlarge" = false
"i2.xlarge" = true
"i3.16xlarge" = true
"i3.2xlarge" = true
"i3.4xlarge" = true
"i3.8xlarge" = true
"i3.large" = true
"i3.metal" = true
"i3.xlarge" = true
"m1.large" = true
"m1.medium" = false
"m1.small" = false
"m1.xlarge" = true
"m2.2large" = false
"m2.2xlarge" = true
"m2.4xlarge" = true
"m2.xlarge" = false
"m3.2xlarge" = true
"m3.large" = false
"m3.medium" = false
"m3.xlarge" = true
"m4.10xlarge" = true
"m4.16xlarge" = true
"m4.2xlarge" = true
"m4.4xlarge" = true
"m4.large" = true
"m4.xlarge" = true
"m5.12xlarge" = true
"m5.24xlarge" = true
"m5.2xlarge" = true
"m5.4xlarge" = true
"m5.large" = true
"m5.xlarge" = true
"m5d.12xlarge" = true
"m5d.24xlarge" = true
"m5d.2xlarge" = true
"m5d.4xlarge" = true
"m5d.large" = true
"m5d.xlarge" = true
"p2.16xlarge" = true
"p2.8xlarge" = true
"p2.xlarge" = true
"p3.16xlarge" = true
"p3.2xlarge" = true
"p3.8xlarge" = true
"r3.2xlarge" = false
"r3.2xlarge" = true
"r3.4xlarge" = true
"r3.8xlarge" = false
"r3.large" = false
"r3.xlarge" = true
"r4.16xlarge" = true
"r4.2xlarge" = true
"r4.4xlarge" = true
"r4.8xlarge" = true
"r4.large" = true
"r4.xlarge" = true
"t1.micro" = false
"t2.2xlarge" = false
"t2.large" = false
"t2.medium" = false
"t2.micro" = false
"t2.nano" = false
"t2.small" = false
"t2.xlarge" = false
"x1.16xlarge" = true
"x1.32xlarge" = true
"x1e.16xlarge" = true
"x1e.2xlarge" = true
"x1e.32xlarge" = true
"x1e.4xlarge" = true
"x1e.8xlarge" = true
"x1e.xlarge" = true
}
} }

41
main.tf
View File

@@ -5,8 +5,7 @@
* through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). * through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws).
* Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) * Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html)
* and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). * and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started).
* Instructions on [this post](https://aws.amazon.com/blogs/aws/amazon-eks-now-generally-available/) * Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html).
* can help guide you through connecting to the cluster via `kubectl`.
* | Branch | Build status | * | Branch | Build status |
* | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | * | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- |
@@ -14,8 +13,9 @@
* ## Assumptions * ## Assumptions
** You want to create a set of resources around an EKS cluster: namely an autoscaling group of workers and a security group for them. ** You want to create an EKS cluster and an autoscaling group of workers for the cluster.
** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS. ** You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module.
** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources.
* ## Usage example * ## Usage example
@@ -29,7 +29,6 @@
* subnets = ["subnet-abcde012", "subnet-bcde012a"] * subnets = ["subnet-abcde012", "subnet-bcde012a"]
* tags = "${map("Environment", "test")}" * tags = "${map("Environment", "test")}"
* vpc_id = "vpc-abcde012" * vpc_id = "vpc-abcde012"
* cluster_ingress_cidrs = ["24.18.23.91/32"]
* } * }
* ``` * ```
@@ -53,8 +52,10 @@ are installed and on your shell's PATH.
* 3. Ensure your AWS environment is configured (i.e. credentials and region) for test. * 3. Ensure your AWS environment is configured (i.e. credentials and region) for test.
* 4. Test using `bundle exec kitchen test` from the root of the repo. * 4. Test using `bundle exec kitchen test` from the root of the repo.
For now, connectivity to the kubernetes cluster is not tested but will be in the future. * For now, connectivity to the kubernetes cluster is not tested but will be in the
To test your kubectl connection manually, see the [eks_test_fixture README](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_test_fixture/README.md). * future. If `configure_kubectl_session` is set `true`, once the test fixture has
* converged, you can query the test cluster from that terminal session with
* `kubectl get nodes --watch --kubeconfig kubeconfig`.
* ## Doc generation * ## Doc generation
@@ -93,28 +94,4 @@ To test your kubectl connection manually, see the [eks_test_fixture README](http
provider "null" {} provider "null" {}
provider "template" {} provider "template" {}
provider "http" {}
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "${var.config_output_path}/kubeconfig"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "${var.config_output_path}/config-map-aws-auth.yaml"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "null_resource" "configure_kubectl" {
provisioner "local-exec" {
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
}
count = "${var.configure_kubectl_session ? 1 : 0}"
}

View File

@@ -1,114 +0,0 @@
/**
# terraform_util_ebs_optimized
A terraform module to return true or false based on if an instance type supports the EBS optmized flag.
*/
locals {
ebs_optimized_types = {
"c4.large" = true
"c4.xlarge" = true
"c4.2xlarge" = true
"c4.4xlarge" = true
"c4.8xlarge" = true
"c5.large" = true
"c5.xlarge" = true
"c5.2xlarge" = true
"c5.4xlarge" = true
"c5.9xlarge" = true
"c5.18xlarge" = true
"c5d.large" = true
"c5d.xlarge" = true
"c5d.2xlarge" = true
"c5d.4xlarge" = true
"c5d.9xlarge" = true
"c5d.18xlarge" = true
"d2.xlarge" = true
"d2.2xlarge" = true
"d2.4xlarge" = true
"d2.8xlarge" = true
"f1.2xlarge" = true
"f1.16xlarge" = true
"g3.4xlarge" = true
"g3.8xlarge" = true
"g3.16xlarge" = true
"h1.2xlarge" = true
"h1.4xlarge" = true
"h1.8xlarge" = true
"h1.16xlarge" = true
"i3.large" = true
"i3.xlarge" = true
"i3.2xlarge" = true
"i3.4xlarge" = true
"i3.8xlarge" = true
"i3.16xlarge" = true
"i3.metal" = true
"m4.large" = true
"m4.xlarge" = true
"m4.2xlarge" = true
"m4.4xlarge" = true
"m4.10xlarge" = true
"m4.16xlarge" = true
"m5.large" = true
"m5.xlarge" = true
"m5.2xlarge" = true
"m5.4xlarge" = true
"m5.12xlarge" = true
"m5.24xlarge" = true
"m5d.large" = true
"m5d.xlarge" = true
"m5d.2xlarge" = true
"m5d.4xlarge" = true
"m5d.12xlarge" = true
"m5d.24xlarge" = true
"p2.xlarge" = true
"p2.8xlarge" = true
"p2.16xlarge" = true
"p3.2xlarge" = true
"p3.8xlarge" = true
"p3.16xlarge" = true
"r4.large" = true
"r4.xlarge" = true
"r4.2xlarge" = true
"r4.4xlarge" = true
"r4.8xlarge" = true
"r4.16xlarge" = true
"x1.16xlarge" = true
"x1.32xlarge" = true
"x1e.xlarge" = true
"x1e.2xlarge" = true
"x1e.4xlarge" = true
"x1e.8xlarge" = true
"x1e.16xlarge" = true
"x1e.32xlarge" = true
"c5.large" = true
"c5.xlarge" = true
"c5.2xlarge" = true
"c5d.large" = true
"c5d.xlarge" = true
"c5d.2xlarge" = true
"m5.large" = true
"m5.xlarge" = true
"m5.2xlarge" = true
"m5d.large" = true
"m5d.xlarge" = true
"m5d.2xlarge" = true
"c1.xlarge" = true
"c3.xlarge" = true
"c3.2xlarge" = true
"c3.4xlarge" = true
"g2.2xlarge" = true
"i2.xlarge" = true
"i2.2xlarge" = true
"i2.4xlarge" = true
"m1.large" = true
"m1.xlarge" = true
"m2.2xlarge" = true
"m2.4xlarge" = true
"m3.xlarge" = true
"m3.2xlarge" = true
"r3.xlarge" = true
"r3.2xlarge" = true
"r3.4xlarge" = true
}
}

View File

@@ -1,4 +0,0 @@
output "answer" {
description = "Returns true or false depending on if the instance type is able to be EBS optimized."
value = "${lookup(local.ebs_optimized_types, var.instance_type, false)}"
}

View File

@@ -1,3 +0,0 @@
variable "instance_type" {
description = "Instance type to evaluate if EBS optimized is an option."
}

View File

@@ -1,15 +1,5 @@
output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this cluster."
value = "${data.template_file.config_map_aws_auth.rendered}"
}
output "kubeconfig" {
description = "kubectl config file contents for this cluster."
value = "${data.template_file.kubeconfig.rendered}"
}
output "cluster_id" { output "cluster_id" {
description = "The name/id of the cluster." description = "The name/id of the EKS cluster."
value = "${aws_eks_cluster.this.id}" value = "${aws_eks_cluster.this.id}"
} }
@@ -20,21 +10,41 @@ output "cluster_id" {
# } # }
output "cluster_certificate_authority_data" { output "cluster_certificate_authority_data" {
description = "Nested attribute containing certificate-authority-data for your cluster. Tis is the base64 encoded certificate data required to communicate with your cluster." description = "Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster."
value = "${aws_eks_cluster.this.certificate_authority.0.data}" value = "${aws_eks_cluster.this.certificate_authority.0.data}"
} }
output "cluster_endpoint" { output "cluster_endpoint" {
description = "The endpoint for your Kubernetes API server." description = "The endpoint for your EKS Kubernetes API."
value = "${aws_eks_cluster.this.endpoint}" value = "${aws_eks_cluster.this.endpoint}"
} }
output "cluster_version" { output "cluster_version" {
description = "The Kubernetes server version for the cluster." description = "The Kubernetes server version for the EKS cluster."
value = "${aws_eks_cluster.this.version}" value = "${aws_eks_cluster.this.version}"
} }
output "cluster_security_group_ids" { output "cluster_security_group_id" {
description = "description" description = "Security group ID attached to the EKS cluster."
value = "${aws_eks_cluster.this.vpc_config.0.security_group_ids}" value = "${local.cluster_security_group_id}"
}
output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this EKS cluster."
value = "${data.template_file.config_map_aws_auth.rendered}"
}
output "kubeconfig" {
description = "kubectl config file contents for this EKS cluster."
value = "${data.template_file.kubeconfig.rendered}"
}
output "workers_asg_arns" {
description = "IDs of the autoscaling groups containing workers."
value = "${aws_autoscaling_group.workers.*.arn}"
}
output "worker_security_group_id" {
description = "Security group ID attached to the EKS workers."
value = "${local.worker_security_group_id}"
} }

View File

@@ -1,19 +1,14 @@
variable "additional_userdata" { variable "cluster_name" {
description = "Extra lines of userdata (bash) which are appended to the default userdata code." description = "Name of the EKS cluster. Also used as a prefix in names of related resources."
}
variable "cluster_security_group_id" {
description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32."
default = "" default = ""
} }
variable "cluster_ingress_cidrs" {
description = "The CIDRs from which we can execute kubectl commands."
type = "list"
}
variable "cluster_name" {
description = "Name of the EKS cluster which is also used as a prefix in names of related resources."
}
variable "cluster_version" { variable "cluster_version" {
description = "Kubernetes version to use for the cluster." description = "Kubernetes version to use for the EKS cluster."
default = "1.10" default = "1.10"
} }
@@ -23,17 +18,12 @@ variable "config_output_path" {
} }
variable "configure_kubectl_session" { variable "configure_kubectl_session" {
description = "Configure the current session's kubectl to use the instantiated cluster." description = "Configure the current session's kubectl to use the instantiated EKS cluster."
default = false
}
variable "ebs_optimized_workers" {
description = "If left at default of true, will use ebs optimization if available on the given instance type."
default = true default = true
} }
variable "subnets" { variable "subnets" {
description = "A list of subnets to associate with the cluster's underlying instances." description = "A list of subnets to place the EKS cluster and workers within."
type = "list" type = "list"
} }
@@ -43,30 +33,35 @@ variable "tags" {
} }
variable "vpc_id" { variable "vpc_id" {
description = "VPC id where the cluster and other resources will be deployed." description = "VPC where the cluster and workers will be deployed."
} }
variable "workers_ami_id" { variable "worker_groups" {
description = "AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI." description = "A list of maps defining worker group configurations. See workers_group_defaults for valid keys."
type = "list"
default = [{
"name" = "default"
}]
}
variable "workers_group_defaults" {
description = "Default values for target groups as defined by the list of maps."
type = "map"
default = {
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
ami_id = "" # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI.
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group.
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
asg_min_size = "1" # Minimum worker capacity in the autoscaling group.
instance_type = "m4.large" # Size of the workers instances.
additional_userdata = "" # userdata to append to the default userdata.
ebs_optimized = true # sets whether to use ebs optimization on supported types.
}
}
variable "worker_security_group_id" {
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster."
default = "" default = ""
} }
variable "workers_asg_desired_capacity" {
description = "Desired worker capacity in the autoscaling group."
default = "1"
}
variable "workers_asg_max_size" {
description = "Maximum worker capacity in the autoscaling group."
default = "3"
}
variable "workers_asg_min_size" {
description = "Minimum worker capacity in the autoscaling group."
default = "1"
}
variable "workers_instance_type" {
description = "Size of the workers instances."
default = "m4.large"
}

View File

@@ -1 +1 @@
v0.2.0 v1.0.0

View File

@@ -1,14 +1,15 @@
resource "aws_autoscaling_group" "workers" { resource "aws_autoscaling_group" "workers" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
launch_configuration = "${aws_launch_configuration.workers.id}" desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", lookup(var.workers_group_defaults, "asg_desired_capacity"))}"
desired_capacity = "${var.workers_asg_desired_capacity}" max_size = "${lookup(var.worker_groups[count.index], "asg_max_size",lookup(var.workers_group_defaults, "asg_max_size"))}"
max_size = "${var.workers_asg_max_size}" min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}"
min_size = "${var.workers_asg_min_size}" launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}"
vpc_zone_identifier = ["${var.subnets}"] vpc_zone_identifier = ["${var.subnets}"]
count = "${length(var.worker_groups)}"
tags = ["${concat( tags = ["${concat(
list( list(
map("key", "Name", "value", "${var.cluster_name}-eks_asg", "propagate_at_launch", true), map("key", "Name", "value", "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true), map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
), ),
local.asg_tags) local.asg_tags)
@@ -16,14 +17,15 @@ resource "aws_autoscaling_group" "workers" {
} }
resource "aws_launch_configuration" "workers" { resource "aws_launch_configuration" "workers" {
name_prefix = "${var.cluster_name}" name = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
associate_public_ip_address = true associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.workers.name}" security_groups = ["${local.worker_security_group_id}"]
image_id = "${var.workers_ami_id == "" ? data.aws_ami.eks_worker.id : var.workers_ami_id}" iam_instance_profile = "${aws_iam_instance_profile.workers.id}"
instance_type = "${var.workers_instance_type}" image_id = "${lookup(var.worker_groups[count.index], "ami_id", data.aws_ami.eks_worker.id)}"
security_groups = ["${aws_security_group.workers.id}"] instance_type = "${lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type"))}"
user_data_base64 = "${base64encode(data.template_file.userdata.rendered)}" user_data_base64 = "${base64encode(element(data.template_file.userdata.*.rendered, count.index))}"
ebs_optimized = "${var.ebs_optimized_workers ? module.ebs_optimized.answer : false}" ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}"
count = "${length(var.worker_groups)}"
lifecycle { lifecycle {
create_before_destroy = true create_before_destroy = true
@@ -38,6 +40,7 @@ resource "aws_security_group" "workers" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
description = "Security group for all nodes in the cluster." description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}" vpc_id = "${var.vpc_id}"
count = "${var.worker_security_group_id == "" ? 1 : 0}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned" tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
))}" ))}"
} }
@@ -50,6 +53,7 @@ resource "aws_security_group_rule" "workers_egress_internet" {
from_port = 0 from_port = 0
to_port = 0 to_port = 0
type = "egress" type = "egress"
count = "${var.worker_security_group_id == "" ? 1 : 0}"
} }
resource "aws_security_group_rule" "workers_ingress_self" { resource "aws_security_group_rule" "workers_ingress_self" {
@@ -60,16 +64,18 @@ resource "aws_security_group_rule" "workers_ingress_self" {
from_port = 0 from_port = 0
to_port = 65535 to_port = 65535
type = "ingress" type = "ingress"
count = "${var.worker_security_group_id == "" ? 1 : 0}"
} }
resource "aws_security_group_rule" "workers_ingress_cluster" { resource "aws_security_group_rule" "workers_ingress_cluster" {
description = "Allow workers Kubelets and pods to receive communication from the cluster control plane." description = "Allow workers Kubelets and pods to receive communication from the cluster control plane."
protocol = "tcp" protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}" security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.cluster.id}" source_security_group_id = "${local.cluster_security_group_id}"
from_port = 1025 from_port = 1025
to_port = 65535 to_port = 65535
type = "ingress" type = "ingress"
count = "${var.worker_security_group_id == "" ? 1 : 0}"
} }
resource "aws_iam_role" "workers" { resource "aws_iam_role" "workers" {