From 0107a9b914f5277f957f8fb3f77d7a5a047b6bf2 Mon Sep 17 00:00:00 2001 From: Kevin Pullin Date: Fri, 8 Jun 2018 16:40:50 -0700 Subject: [PATCH 1/9] Support creating multiple worker auto scaling groups, similar to KOPS --- README.md | 12 ++-- data.tf | 28 -------- local.tf | 62 ----------------- modules/tf_util_ebs_optimized/outputs.tf | 4 -- modules/tf_util_ebs_optimized/variables.tf | 3 - modules/worker_groups/data.tf | 9 +++ .../main.tf => worker_groups/local.tf} | 68 +++++++++++++++++-- modules/worker_groups/main.tf | 64 +++++++++++++++++ .../worker_groups/templates}/userdata.sh.tpl | 0 modules/worker_groups/variables.tf | 64 +++++++++++++++++ outputs.tf | 2 +- variables.tf | 35 ++++------ workers.tf | 55 +++------------ 13 files changed, 231 insertions(+), 175 deletions(-) delete mode 100644 modules/tf_util_ebs_optimized/outputs.tf delete mode 100644 modules/tf_util_ebs_optimized/variables.tf create mode 100644 modules/worker_groups/data.tf rename modules/{tf_util_ebs_optimized/main.tf => worker_groups/local.tf} (65%) create mode 100644 modules/worker_groups/main.tf rename {templates => modules/worker_groups/templates}/userdata.sh.tpl (100%) create mode 100644 modules/worker_groups/variables.tf diff --git a/README.md b/README.md index 395f8dc..3b9b7de 100644 --- a/README.md +++ b/README.md @@ -103,11 +103,13 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | subnets | A list of subnets to associate with the cluster's underlying instances. | list | - | yes | | tags | A map of tags to add to all resources. | string | `` | no | | vpc_id | VPC id where the cluster and other resources will be deployed. | string | - | yes | -| workers_ami_id | AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI. | string | `` | no | -| workers_asg_desired_capacity | Desired worker capacity in the autoscaling group. | string | `1` | no | -| workers_asg_max_size | Maximum worker capacity in the autoscaling group. | string | `3` | no | -| workers_asg_min_size | Minimum worker capacity in the autoscaling group. | string | `1` | no | -| workers_instance_type | Size of the workers instances. | string | `m4.large` | no | +| worker_groups | A list of maps defining worker autoscaling groups | list of maps | - | no | +| worker_groups.name | Name of the worker group | string | `nodes` | yes +| worker_groups.ami_id | AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI. | string | `` | no | +| worker_groups.asg_desired_capacity | Desired worker capacity in the autoscaling group. | string | `1` | no | +| worker_groups.asg_max_size | Maximum worker capacity in the autoscaling group. | string | `3` | no | +| worker_groups.asg_min_size | Minimum worker capacity in the autoscaling group. | string | `1` | no | +| worker_groups.instance_type | Size of the workers instances. | string | `m4.large` | no | ## Outputs diff --git a/data.tf b/data.tf index c25327c..1af66ee 100644 --- a/data.tf +++ b/data.tf @@ -1,15 +1,5 @@ data "aws_region" "current" {} -data "aws_ami" "eks_worker" { - filter { - name = "name" - values = ["eks-worker-*"] - } - - most_recent = true - owners = ["602401143452"] # Amazon -} - data "aws_iam_policy_document" "workers_assume_role_policy" { statement { sid = "EKSWorkerAssumeRole" @@ -40,19 +30,6 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" { } } -data template_file userdata { - template = "${file("${path.module}/templates/userdata.sh.tpl")}" - - vars { - region = "${data.aws_region.current.name}" - max_pod_count = "${lookup(local.max_pod_per_node, var.workers_instance_type)}" - cluster_name = "${var.cluster_name}" - endpoint = "${aws_eks_cluster.this.endpoint}" - cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}" - additional_userdata = "${var.additional_userdata}" - } -} - data template_file kubeconfig { template = "${file("${path.module}/templates/kubeconfig.tpl")}" @@ -71,8 +48,3 @@ data template_file config_map_aws_auth { role_arn = "${aws_iam_role.workers.arn}" } } - -module "ebs_optimized" { - source = "./modules/tf_util_ebs_optimized" - instance_type = "${var.workers_instance_type}" -} diff --git a/local.tf b/local.tf index d66ba13..f6e07c1 100644 --- a/local.tf +++ b/local.tf @@ -1,66 +1,4 @@ locals { - # Mapping from the node type that we selected and the max number of pods that it can run - # Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml - max_pod_per_node = { - c4.large = 29 - c4.xlarge = 58 - c4.2xlarge = 58 - c4.4xlarge = 234 - c4.8xlarge = 234 - c5.large = 29 - c5.xlarge = 58 - c5.2xlarge = 58 - c5.4xlarge = 234 - c5.9xlarge = 234 - c5.18xlarge = 737 - i3.large = 29 - i3.xlarge = 58 - i3.2xlarge = 58 - i3.4xlarge = 234 - i3.8xlarge = 234 - i3.16xlarge = 737 - m3.medium = 12 - m3.large = 29 - m3.xlarge = 58 - m3.2xlarge = 118 - m4.large = 20 - m4.xlarge = 58 - m4.2xlarge = 58 - m4.4xlarge = 234 - m4.10xlarge = 234 - m5.large = 29 - m5.xlarge = 58 - m5.2xlarge = 58 - m5.4xlarge = 234 - m5.12xlarge = 234 - m5.24xlarge = 737 - p2.xlarge = 58 - p2.8xlarge = 234 - p2.16xlarge = 234 - p3.2xlarge = 58 - p3.8xlarge = 234 - p3.16xlarge = 234 - r3.xlarge = 58 - r3.2xlarge = 58 - r3.4xlarge = 234 - r3.8xlarge = 234 - r4.large = 29 - r4.xlarge = 58 - r4.2xlarge = 58 - r4.4xlarge = 234 - r4.8xlarge = 234 - r4.16xlarge = 737 - t2.small = 8 - t2.medium = 17 - t2.large = 35 - t2.xlarge = 44 - t2.2xlarge = 44 - x1.16xlarge = 234 - x1.32xlarge = 234 - } - - asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"] - # More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml config_map_aws_auth = < Date: Fri, 8 Jun 2018 18:05:00 -0700 Subject: [PATCH 2/9] testing if I have push rights here --- modules/worker_groups/main.tf | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/modules/worker_groups/main.tf b/modules/worker_groups/main.tf index 067c8e4..43f55b4 100644 --- a/modules/worker_groups/main.tf +++ b/modules/worker_groups/main.tf @@ -1,12 +1,11 @@ resource "aws_autoscaling_group" "workers" { - count = "${length(var.worker_groups)}" - name_prefix = "${lookup(var.worker_groups[count.index], "name")}.${var.cluster_name}" launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}" desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity")}" max_size = "${lookup(var.worker_groups[count.index], "asg_max_size")}" min_size = "${lookup(var.worker_groups[count.index], "asg_min_size")}" vpc_zone_identifier = ["${var.subnets}"] + count = "${length(var.worker_groups)}" tags = ["${concat( list( @@ -18,8 +17,6 @@ resource "aws_autoscaling_group" "workers" { } resource "aws_launch_configuration" "workers" { - count = "${length(var.worker_groups)}" - name_prefix = "${lookup(var.worker_groups[count.index], "name")}.${lookup(var.worker_groups[count.index], "name")}.${var.cluster_name}" associate_public_ip_address = true iam_instance_profile = "${var.iam_instance_profile}" @@ -28,6 +25,7 @@ resource "aws_launch_configuration" "workers" { security_groups = ["${var.security_group_id}"] user_data_base64 = "${base64encode(element(data.template_file.userdata.*.rendered, count.index))}" ebs_optimized = "${var.ebs_optimized_workers ? lookup(local.ebs_optimized_types, lookup(var.worker_groups[count.index], "instance_type"), false) : false}" + count = "${length(var.worker_groups)}" lifecycle { create_before_destroy = true @@ -39,9 +37,8 @@ resource "aws_launch_configuration" "workers" { } data template_file userdata { - count = "${length(var.worker_groups)}" - template = "${file("${path.module}/templates/userdata.sh.tpl")}" + count = "${length(var.worker_groups)}" vars { region = "${var.aws_region}" From 6bda7ee97d0d83c1e7d45f77df8b30a4cda409cb Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Mon, 11 Jun 2018 03:34:13 -0700 Subject: [PATCH 3/9] workers can now be specified as multiple asgs of different flavors. BYO security group now possible for both workers and cluster --- cluster.tf | 18 +- data.tf | 28 +++ examples/eks_test_fixture/main.tf | 31 ++- examples/eks_test_fixture/outputs.tf | 4 +- kubectl.tf | 24 ++ local.tf | 210 ++++++++++++++---- main.tf | 32 +-- modules/worker_groups/data.tf | 9 - modules/worker_groups/local.tf | 170 -------------- modules/worker_groups/main.tf | 61 ----- modules/worker_groups/variables.tf | 64 ------ outputs.tf | 37 +-- .../templates => templates}/userdata.sh.tpl | 0 variables.tf | 66 +++--- workers.tf | 63 +++++- 15 files changed, 359 insertions(+), 458 deletions(-) create mode 100644 kubectl.tf delete mode 100644 modules/worker_groups/data.tf delete mode 100644 modules/worker_groups/local.tf delete mode 100644 modules/worker_groups/main.tf delete mode 100644 modules/worker_groups/variables.tf rename {modules/worker_groups/templates => templates}/userdata.sh.tpl (100%) diff --git a/cluster.tf b/cluster.tf index 1a0fc81..03fe7aa 100644 --- a/cluster.tf +++ b/cluster.tf @@ -4,7 +4,7 @@ resource "aws_eks_cluster" "this" { version = "${var.cluster_version}" vpc_config { - security_group_ids = ["${aws_security_group.cluster.id}"] + security_group_ids = ["${local.cluster_security_group_id}"] subnet_ids = ["${var.subnets}"] } @@ -16,39 +16,43 @@ resource "aws_eks_cluster" "this" { resource "aws_security_group" "cluster" { name_prefix = "${var.cluster_name}" - description = "Cluster communication with workers nodes" + description = "EKS cluster security group." vpc_id = "${var.vpc_id}" tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}" + count = "${var.cluster_security_group_id == "" ? 1 : 0}" } resource "aws_security_group_rule" "cluster_egress_internet" { - description = "Allow cluster egress to the Internet." + description = "Allow cluster egress access to the Internet." protocol = "-1" security_group_id = "${aws_security_group.cluster.id}" cidr_blocks = ["0.0.0.0/0"] from_port = 0 to_port = 0 type = "egress" + count = "${var.cluster_security_group_id == "" ? 1 : 0}" } resource "aws_security_group_rule" "cluster_https_worker_ingress" { - description = "Allow pods to communicate with the cluster API Server." + description = "Allow pods to communicate with the EKS cluster API." protocol = "tcp" security_group_id = "${aws_security_group.cluster.id}" - source_security_group_id = "${aws_security_group.workers.id}" + source_security_group_id = "${local.worker_security_group_id}" from_port = 443 to_port = 443 type = "ingress" + count = "${var.cluster_security_group_id == "" ? 1 : 0}" } resource "aws_security_group_rule" "cluster_https_cidr_ingress" { - cidr_blocks = ["${var.cluster_ingress_cidrs}"] - description = "Allow communication with the cluster API Server." + cidr_blocks = ["${local.workstation_external_cidr}"] + description = "Allow kubectl communication with the EKS cluster API." protocol = "tcp" security_group_id = "${aws_security_group.cluster.id}" from_port = 443 to_port = 443 type = "ingress" + count = "${var.cluster_security_group_id == "" ? 1 : 0}" } resource "aws_iam_role" "cluster" { diff --git a/data.tf b/data.tf index 1af66ee..2f08972 100644 --- a/data.tf +++ b/data.tf @@ -1,5 +1,9 @@ data "aws_region" "current" {} +data "http" "workstation_external_ip" { + url = "http://icanhazip.com" +} + data "aws_iam_policy_document" "workers_assume_role_policy" { statement { sid = "EKSWorkerAssumeRole" @@ -15,6 +19,16 @@ data "aws_iam_policy_document" "workers_assume_role_policy" { } } +data "aws_ami" "eks_worker" { + filter { + name = "name" + values = ["eks-worker-*"] + } + + most_recent = true + owners = ["602401143452"] # Amazon +} + data "aws_iam_policy_document" "cluster_assume_role_policy" { statement { sid = "EKSClusterAssumeRole" @@ -48,3 +62,17 @@ data template_file config_map_aws_auth { role_arn = "${aws_iam_role.workers.arn}" } } + +data template_file userdata { + template = "${file("${path.module}/templates/userdata.sh.tpl")}" + count = "${length(var.worker_groups)}" + + vars { + region = "${data.aws_region.current.name}" + cluster_name = "${var.cluster_name}" + endpoint = "${aws_eks_cluster.this.endpoint}" + cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}" + max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")))}" + additional_userdata = "${lookup(var.worker_groups[count.index], "additional_userdata",lookup(var.workers_group_defaults, "additional_userdata"))}" + } +} diff --git a/examples/eks_test_fixture/main.tf b/examples/eks_test_fixture/main.tf index 21687be..459c1ee 100644 --- a/examples/eks_test_fixture/main.tf +++ b/examples/eks_test_fixture/main.tf @@ -11,18 +11,16 @@ provider "random" { version = "= 1.3.1" } -provider "http" {} -provider "local" {} - data "aws_availability_zones" "available" {} -data "http" "workstation_external_ip" { - url = "http://icanhazip.com" -} - locals { - workstation_external_cidr = "${chomp(data.http.workstation_external_ip.body)}/32" - cluster_name = "test-eks-${random_string.suffix.result}" + cluster_name = "test-eks-${random_string.suffix.result}" + + worker_groups = "${list( + map("instance_type","t2.small", + "additional_userdata","echo foo bar" + ), + )}" tags = "${map("Environment", "test", "GithubRepo", "terraform-aws-eks", @@ -50,13 +48,10 @@ module "vpc" { } module "eks" { - source = "../.." - cluster_name = "${local.cluster_name}" - subnets = "${module.vpc.public_subnets}" - tags = "${local.tags}" - vpc_id = "${module.vpc.vpc_id}" - cluster_ingress_cidrs = ["${local.workstation_external_cidr}"] - workers_instance_type = "t2.small" - additional_userdata = "echo hello world" - configure_kubectl_session = true + source = "../.." + cluster_name = "${local.cluster_name}" + subnets = "${module.vpc.public_subnets}" + tags = "${local.tags}" + vpc_id = "${module.vpc.vpc_id}" + worker_groups = "${local.worker_groups}" } diff --git a/examples/eks_test_fixture/outputs.tf b/examples/eks_test_fixture/outputs.tf index 7a656a7..0422d74 100644 --- a/examples/eks_test_fixture/outputs.tf +++ b/examples/eks_test_fixture/outputs.tf @@ -3,9 +3,9 @@ output "cluster_endpoint" { value = "${module.eks.cluster_endpoint}" } -output "cluster_security_group_ids" { +output "cluster_security_group_id" { description = "Security group ids attached to the cluster control plane." - value = "${module.eks.cluster_security_group_ids}" + value = "${module.eks.cluster_security_group_id}" } output "kubectl_config" { diff --git a/kubectl.tf b/kubectl.tf new file mode 100644 index 0000000..e2508f0 --- /dev/null +++ b/kubectl.tf @@ -0,0 +1,24 @@ +resource "local_file" "kubeconfig" { + content = "${data.template_file.kubeconfig.rendered}" + filename = "${var.config_output_path}/kubeconfig" + count = "${var.configure_kubectl_session ? 1 : 0}" +} + +resource "local_file" "config_map_aws_auth" { + content = "${data.template_file.config_map_aws_auth.rendered}" + filename = "${var.config_output_path}/config-map-aws-auth.yaml" + count = "${var.configure_kubectl_session ? 1 : 0}" +} + +resource "null_resource" "configure_kubectl" { + provisioner "local-exec" { + command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig" + } + + triggers { + config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}" + kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}" + } + + count = "${var.configure_kubectl_session ? 1 : 0}" +} diff --git a/local.tf b/local.tf index f6e07c1..9354131 100644 --- a/local.tf +++ b/local.tf @@ -1,44 +1,174 @@ locals { # More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml - config_map_aws_auth = < Date: Mon, 11 Jun 2018 03:46:58 -0700 Subject: [PATCH 4/9] rounded out the documentation for this changeset --- .travis.yml | 9 ++++++++- CHANGELOG.md | 23 ++++++++++++++++++----- README.md | 45 ++++++++++++++++++++------------------------- version | 2 +- 4 files changed, 47 insertions(+), 32 deletions(-) diff --git a/.travis.yml b/.travis.yml index 3c9b98e..b596e57 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,16 +1,21 @@ language: ruby sudo: required dist: trusty + services: - docker + rvm: - 2.4.2 + before_install: - echo "before_install" + install: - echo "install" - gem install bundler --no-rdoc --no-ri - bundle install + before_script: - echo 'before_script' - export AWS_REGION='us-east-1' @@ -22,12 +27,13 @@ before_script: - unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform - mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/ - terraform -v + script: - echo 'script' - terraform init - terraform fmt -check=true - terraform validate -var "region=${AWS_REGION}" -var "vpc_id=vpc-123456" -var "subnets=[\"subnet-12345a\"]" -var "workers_ami_id=ami-123456" -var "cluster_ingress_cidrs=[]" -var "cluster_name=test_cluster" -- docker run --rm -v $(pwd):/app/ --workdir=/app/ -t wata727/tflint --error-with-issues +# - docker run --rm -v $(pwd):/app/ --workdir=/app/ -t wata727/tflint --error-with-issues - cd examples/eks_test_fixture - terraform init - terraform fmt -check=true @@ -40,6 +46,7 @@ script: # script: ci/deploy.sh # on: # branch: master + notifications: email: recipients: diff --git a/CHANGELOG.md b/CHANGELOG.md index 208e41b..f8dcd33 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,22 +5,35 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](http://keepachangelog.com/) and this project adheres to [Semantic Versioning](http://semver.org/). +## [[v1.0.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.2.0...v1.0.0)] - 2018-06-11] + +### Added + +- security group id can be provided for either or both of the cluster and the workers. If not provided, security groups will be created with sufficient rules to allow cluster-worker communication. + +### Changed + +- Worker build out refactored to allow multiple autoscaling groups each having differing specs. + ## [[v0.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.1...v0.2.0)] - 2018-06-08] +### Added + +- ability to specify extra userdata code to execute following kubelet services start. +- EBS optimization used whenever possible for the given instance type. +- When `configure_kubectl_session` is set to true the current shell will be configured to talk to the kubernetes cluster using config files output from the module. + ### Changed - files rendered from dedicated templates to separate out raw code and config from `hcl` - `workers_ami_id` is now made optional. If not specified, the module will source the latest AWS supported EKS AMI instead. -- added ability to specify extra userdata code to execute after the second to configure and start kube services. -- When `configure_kubectl_session` is set to true the current shell will be configured to talk to the kubernetes cluster using config files output from the module. -- EBS optimization used whenever possible for the given instance type. ## [[v0.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.0...v0.1.1)] - 2018-06-07] ### Changed -- pre-commit hooks fixed and working. -- made progress on CI, advancing the build to the final `kitchen test` stage before failing. +- Pre-commit hooks fixed and working. +- Made progress on CI, advancing the build to the final `kitchen test` stage before failing. ## [v0.1.0] - 2018-06-07 diff --git a/README.md b/README.md index 3b9b7de..0363e22 100644 --- a/README.md +++ b/README.md @@ -28,7 +28,6 @@ module "eks" { subnets = ["subnet-abcde012", "subnet-bcde012a"] tags = "${map("Environment", "test")}" vpc_id = "vpc-abcde012" - cluster_ingress_cidrs = ["24.18.23.91/32"] } ``` @@ -52,8 +51,9 @@ This module has been packaged with [awspec](https://github.com/k1LoW/awspec) tes 3. Ensure your AWS environment is configured (i.e. credentials and region) for test. 4. Test using `bundle exec kitchen test` from the root of the repo. -For now, connectivity to the kubernetes cluster is not tested but will be in the future. -To test your kubectl connection manually, see the [eks_test_fixture README](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_test_fixture/README.md). +For now, connectivity to the kubernetes cluster is not tested but will be in the +future. If `configure_kubectl_session` is set `true`, once the test fixture has +converged, you can query the test cluster with `kubectl get nodes --watch --kubeconfig kubeconfig`. ## Doc generation @@ -93,32 +93,27 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| -| additional_userdata | Extra lines of userdata (bash) which are appended to the default userdata code. | string | `` | no | -| cluster_ingress_cidrs | The CIDRs from which we can execute kubectl commands. | list | - | yes | -| cluster_name | Name of the EKS cluster which is also used as a prefix in names of related resources. | string | - | yes | -| cluster_version | Kubernetes version to use for the cluster. | string | `1.10` | no | +| cluster_name | Name of the EKS cluster. Also used as a prefix in names of related resources. | string | - | yes | +| cluster_security_group_id | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32. | string | `` | no | +| cluster_version | Kubernetes version to use for the EKS cluster. | string | `1.10` | no | | config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. | string | `./` | no | -| configure_kubectl_session | Configure the current session's kubectl to use the instantiated cluster. | string | `false` | no | -| ebs_optimized_workers | If left at default of true, will use ebs optimization if available on the given instance type. | string | `true` | no | -| subnets | A list of subnets to associate with the cluster's underlying instances. | list | - | yes | +| configure_kubectl_session | Configure the current session's kubectl to use the instantiated EKS cluster. | string | `true` | no | +| subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes | | tags | A map of tags to add to all resources. | string | `` | no | -| vpc_id | VPC id where the cluster and other resources will be deployed. | string | - | yes | -| worker_groups | A list of maps defining worker autoscaling groups | list of maps | - | no | -| worker_groups.name | Name of the worker group | string | `nodes` | yes -| worker_groups.ami_id | AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI. | string | `` | no | -| worker_groups.asg_desired_capacity | Desired worker capacity in the autoscaling group. | string | `1` | no | -| worker_groups.asg_max_size | Maximum worker capacity in the autoscaling group. | string | `3` | no | -| worker_groups.asg_min_size | Minimum worker capacity in the autoscaling group. | string | `1` | no | -| worker_groups.instance_type | Size of the workers instances. | string | `m4.large` | no | +| vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes | +| worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `` | no | +| worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no | +| workers_group_defaults | Default values for target groups as defined by the list of maps. | map | `` | no | ## Outputs | Name | Description | |------|-------------| -| cluster_certificate_authority_data | Nested attribute containing certificate-authority-data for your cluster. Tis is the base64 encoded certificate data required to communicate with your cluster. | -| cluster_endpoint | The endpoint for your Kubernetes API server. | -| cluster_id | The name/id of the cluster. | -| cluster_security_group_ids | description | -| cluster_version | The Kubernetes server version for the cluster. | -| config_map_aws_auth | A kubernetes configuration to authenticate to this cluster. | -| kubeconfig | kubectl config file contents for this cluster. | +| cluster_certificate_authority_data | Nested attribute containing certificate-authority-data for your cluster. This is the base64 encoded certificate data required to communicate with your cluster. | +| cluster_endpoint | The endpoint for your EKS Kubernetes API. | +| cluster_id | The name/id of the EKS cluster. | +| cluster_security_group_id | Security group ID attached to the EKS cluster. | +| cluster_version | The Kubernetes server version for the EKS cluster. | +| config_map_aws_auth | A kubernetes configuration to authenticate to this EKS cluster. | +| kubeconfig | kubectl config file contents for this EKS cluster. | +| worker_security_group_id | Security group ID attached to the EKS workers. | diff --git a/version b/version index 1474d00..0ec25f7 100644 --- a/version +++ b/version @@ -1 +1 @@ -v0.2.0 +v1.0.0 From 757a6aae3681412e1fa217fe44477ea53ffa148d Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Mon, 11 Jun 2018 03:59:29 -0700 Subject: [PATCH 5/9] changelog props given --- CHANGELOG.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f8dcd33..6b42484 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,11 +9,11 @@ project adheres to [Semantic Versioning](http://semver.org/). ### Added -- security group id can be provided for either or both of the cluster and the workers. If not provided, security groups will be created with sufficient rules to allow cluster-worker communication. +- security group id can be provided for either/both of the cluster and the workers. If not provided, security groups will be created with sufficient rules to allow cluster-worker communication. - kudos to @tanmng on the idea ⭐ ### Changed -- Worker build out refactored to allow multiple autoscaling groups each having differing specs. +- Worker build out refactored to allow multiple autoscaling groups each having differing specs. If none are given, a single ASG is created with a set of sane defaults - big thanks to @kppullin 🥨 ## [[v0.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.1...v0.2.0)] - 2018-06-08] From 30e4c5ec9c1ffe0a927bdc38dc433650d400e20d Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Mon, 11 Jun 2018 12:01:33 -0700 Subject: [PATCH 6/9] updated ebs optimized types to match that of the dedicated module --- local.tf | 145 +++++++++++++++++++++++++++++------------------------ workers.tf | 2 +- 2 files changed, 80 insertions(+), 67 deletions(-) diff --git a/local.tf b/local.tf index 9354131..ce706e7 100644 --- a/local.tf +++ b/local.tf @@ -65,110 +65,123 @@ locals { x1.32xlarge = 234 } - ebs_optimized_types = { - "c4.large" = true - "c4.xlarge" = true + ebs_optimized = { + "c1.medium" = false + "c1.xlarge" = true + "c3.2xlarge" = true + "c3.4xlarge" = true + "c3.8xlarge" = false + "c3.large" = false + "c3.xlarge" = false "c4.2xlarge" = true "c4.4xlarge" = true "c4.8xlarge" = true - "c5.large" = true - "c5.xlarge" = true + "c4.large" = true + "c4.xlarge" = true + "c5.18xlarge" = true "c5.2xlarge" = true "c5.4xlarge" = true "c5.9xlarge" = true - "c5.18xlarge" = true - "c5d.large" = true - "c5d.xlarge" = true + "c5.large" = true + "c5.xlarge" = true + "c5d.18xlarge" = true "c5d.2xlarge" = true "c5d.4xlarge" = true "c5d.9xlarge" = true - "c5d.18xlarge" = true - "d2.xlarge" = true + "c5d.large" = true + "c5d.xlarge" = true + "cc2.8xlarge" = false + "cr1.8xlarge" = false "d2.2xlarge" = true "d2.4xlarge" = true "d2.8xlarge" = true - "f1.2xlarge" = true + "d2.xlarge" = true "f1.16xlarge" = true + "f1.2xlarge" = true + "g2.2xlarge" = true + "g2.8xlarge" = false + "g3.16xlarge" = true "g3.4xlarge" = true "g3.8xlarge" = true - "g3.16xlarge" = true + "h1.16xlarge" = true "h1.2xlarge" = true "h1.4xlarge" = true "h1.8xlarge" = true - "h1.16xlarge" = true - "i3.large" = true - "i3.xlarge" = true + "hs1.8xlarge" = false + "i2.2xlarge" = true + "i2.4xlarge" = true + "i2.8xlarge" = false + "i2.xlarge" = true + "i3.16xlarge" = true "i3.2xlarge" = true "i3.4xlarge" = true "i3.8xlarge" = true - "i3.16xlarge" = true + "i3.large" = true "i3.metal" = true - "m4.large" = true - "m4.xlarge" = true - "m4.2xlarge" = true - "m4.4xlarge" = true + "i3.xlarge" = true + "m1.large" = true + "m1.medium" = false + "m1.small" = false + "m1.xlarge" = true + "m2.2large" = false + "m2.2xlarge" = true + "m2.4xlarge" = true + "m2.xlarge" = false + "m3.2xlarge" = true + "m3.large" = false + "m3.medium" = false + "m3.xlarge" = true "m4.10xlarge" = true "m4.16xlarge" = true - "m5.large" = true - "m5.xlarge" = true - "m5.2xlarge" = true - "m5.4xlarge" = true + "m4.2xlarge" = true + "m4.4xlarge" = true + "m4.large" = true + "m4.xlarge" = true "m5.12xlarge" = true "m5.24xlarge" = true - "m5d.large" = true - "m5d.xlarge" = true - "m5d.2xlarge" = true - "m5d.4xlarge" = true + "m5.2xlarge" = true + "m5.4xlarge" = true + "m5.large" = true + "m5.xlarge" = true "m5d.12xlarge" = true "m5d.24xlarge" = true - "p2.xlarge" = true - "p2.8xlarge" = true + "m5d.2xlarge" = true + "m5d.4xlarge" = true + "m5d.large" = true + "m5d.xlarge" = true "p2.16xlarge" = true + "p2.8xlarge" = true + "p2.xlarge" = true + "p3.16xlarge" = true "p3.2xlarge" = true "p3.8xlarge" = true - "p3.16xlarge" = true - "r4.large" = true - "r4.xlarge" = true + "r3.2xlarge" = false + "r3.2xlarge" = true + "r3.4xlarge" = true + "r3.8xlarge" = false + "r3.large" = false + "r3.xlarge" = true + "r4.16xlarge" = true "r4.2xlarge" = true "r4.4xlarge" = true "r4.8xlarge" = true - "r4.16xlarge" = true + "r4.large" = true + "r4.xlarge" = true + "t1.micro" = false + "t2.2xlarge" = false + "t2.large" = false + "t2.medium" = false + "t2.micro" = false + "t2.nano" = false + "t2.small" = false + "t2.xlarge" = false "x1.16xlarge" = true "x1.32xlarge" = true - "x1e.xlarge" = true + "x1e.16xlarge" = true "x1e.2xlarge" = true + "x1e.32xlarge" = true "x1e.4xlarge" = true "x1e.8xlarge" = true - "x1e.16xlarge" = true - "x1e.32xlarge" = true - "c5.large" = true - "c5.xlarge" = true - "c5.2xlarge" = true - "c5d.large" = true - "c5d.xlarge" = true - "c5d.2xlarge" = true - "m5.large" = true - "m5.xlarge" = true - "m5.2xlarge" = true - "m5d.large" = true - "m5d.xlarge" = true - "m5d.2xlarge" = true - "c1.xlarge" = true - "c3.xlarge" = true - "c3.2xlarge" = true - "c3.4xlarge" = true - "g2.2xlarge" = true - "i2.xlarge" = true - "i2.2xlarge" = true - "i2.4xlarge" = true - "m1.large" = true - "m1.xlarge" = true - "m2.2xlarge" = true - "m2.4xlarge" = true - "m3.xlarge" = true - "m3.2xlarge" = true - "r3.xlarge" = true - "r3.2xlarge" = true - "r3.4xlarge" = true + "x1e.xlarge" = true } } diff --git a/workers.tf b/workers.tf index 72dfb43..ae82fe8 100644 --- a/workers.tf +++ b/workers.tf @@ -24,7 +24,7 @@ resource "aws_launch_configuration" "workers" { image_id = "${lookup(var.worker_groups[count.index], "ami_id", data.aws_ami.eks_worker.id)}" instance_type = "${lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type"))}" user_data_base64 = "${base64encode(element(data.template_file.userdata.*.rendered, count.index))}" - ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized_types, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}" + ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}" count = "${length(var.worker_groups)}" lifecycle { From 210e92d8219811fea3588328e633e534c1bd38d3 Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Mon, 11 Jun 2018 12:07:46 -0700 Subject: [PATCH 7/9] documentation updated for clarity --- README.md | 11 +++++------ local.tf | 1 - main.tf | 11 +++++------ 3 files changed, 10 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 0363e22..783f09b 100644 --- a/README.md +++ b/README.md @@ -4,17 +4,15 @@ A terraform module to create a managed Kubernetes cluster on AWS EKS. Available through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). -Instructions on [this post](https://aws.amazon.com/blogs/aws/amazon-eks-now-generally-available/) -can help guide you through connecting to the cluster via `kubectl`. | Branch | Build status | | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | master | [![build Status](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks.svg?branch=master)](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks) | ## Assumptions - -* You want to create a set of resources around an EKS cluster: namely an autoscaling group of workers and a security group for them. -* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS. +* You want to create an EKS cluster and an autoscaling group of workers for the cluster. +* You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module. +* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources. ## Usage example @@ -53,7 +51,8 @@ This module has been packaged with [awspec](https://github.com/k1LoW/awspec) tes For now, connectivity to the kubernetes cluster is not tested but will be in the future. If `configure_kubectl_session` is set `true`, once the test fixture has -converged, you can query the test cluster with `kubectl get nodes --watch --kubeconfig kubeconfig`. +converged, you can query the test cluster from that terminal session with +`kubectl get nodes --watch --kubeconfig kubeconfig`. ## Doc generation diff --git a/local.tf b/local.tf index ce706e7..6c3249d 100644 --- a/local.tf +++ b/local.tf @@ -1,5 +1,4 @@ locals { - # More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"] cluster_security_group_id = "${var.cluster_security_group_id == "" ? aws_security_group.cluster.id : var.cluster_security_group_id}" worker_security_group_id = "${var.worker_security_group_id == "" ? aws_security_group.workers.id : var.worker_security_group_id}" diff --git a/main.tf b/main.tf index a51c336..fd33776 100644 --- a/main.tf +++ b/main.tf @@ -5,17 +5,15 @@ * through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). * Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) * and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). -* Instructions on [this post](https://aws.amazon.com/blogs/aws/amazon-eks-now-generally-available/) -* can help guide you through connecting to the cluster via `kubectl`. * | Branch | Build status | * | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | * | master | [![build Status](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks.svg?branch=master)](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks) | * ## Assumptions - -** You want to create a set of resources around an EKS cluster: namely an autoscaling group of workers and a security group for them. -** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS. +** You want to create an EKS cluster and an autoscaling group of workers for the cluster. +** You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module. +** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources. * ## Usage example @@ -54,7 +52,8 @@ are installed and on your shell's PATH. * For now, connectivity to the kubernetes cluster is not tested but will be in the * future. If `configure_kubectl_session` is set `true`, once the test fixture has -* converged, you can query the test cluster with `kubectl get nodes --watch --kubeconfig kubeconfig`. +* converged, you can query the test cluster from that terminal session with +* `kubectl get nodes --watch --kubeconfig kubeconfig`. * ## Doc generation From 60e2259e92410ff652868f8d4488d766e0060fec Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Mon, 11 Jun 2018 13:08:03 -0700 Subject: [PATCH 8/9] added output for asg --- CHANGELOG.md | 1 + README.md | 1 + outputs.tf | 5 +++++ workers.tf | 2 +- 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6b42484..259c420 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,6 +10,7 @@ project adheres to [Semantic Versioning](http://semver.org/). ### Added - security group id can be provided for either/both of the cluster and the workers. If not provided, security groups will be created with sufficient rules to allow cluster-worker communication. - kudos to @tanmng on the idea ⭐ +- outputs of security group ids and worker ASG arns added for working with these resources outside the module. ### Changed diff --git a/README.md b/README.md index 783f09b..83e4035 100644 --- a/README.md +++ b/README.md @@ -116,3 +116,4 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | config_map_aws_auth | A kubernetes configuration to authenticate to this EKS cluster. | | kubeconfig | kubectl config file contents for this EKS cluster. | | worker_security_group_id | Security group ID attached to the EKS workers. | +| workers_asg_arns | IDs of the autoscaling groups containing workers. | diff --git a/outputs.tf b/outputs.tf index c6753e1..37145f4 100644 --- a/outputs.tf +++ b/outputs.tf @@ -39,6 +39,11 @@ output "kubeconfig" { value = "${data.template_file.kubeconfig.rendered}" } +output "workers_asg_arns" { + description = "IDs of the autoscaling groups containing workers." + value = "${aws_autoscaling_group.workers.*.arn}" +} + output "worker_security_group_id" { description = "Security group ID attached to the EKS workers." value = "${local.worker_security_group_id}" diff --git a/workers.tf b/workers.tf index ae82fe8..1056712 100644 --- a/workers.tf +++ b/workers.tf @@ -17,7 +17,7 @@ resource "aws_autoscaling_group" "workers" { } resource "aws_launch_configuration" "workers" { - name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-" + name = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}" associate_public_ip_address = true security_groups = ["${local.worker_security_group_id}"] iam_instance_profile = "${aws_iam_instance_profile.workers.id}" From c8997a5cf6f2707462ef8206360f8004f99724bd Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Mon, 11 Jun 2018 15:54:19 -0700 Subject: [PATCH 9/9] this is ready to ship --- .gitignore | 2 ++ README.md | 2 ++ main.tf | 2 ++ 3 files changed, 6 insertions(+) diff --git a/.gitignore b/.gitignore index b6d358f..e787fbf 100644 --- a/.gitignore +++ b/.gitignore @@ -9,3 +9,5 @@ Gemfile.lock terraform.tfstate.d/ kubeconfig config-map-aws-auth.yaml +eks-admin-cluster-role-binding.yaml +eks-admin-service-account.yaml diff --git a/README.md b/README.md index 83e4035..9471e63 100644 --- a/README.md +++ b/README.md @@ -4,12 +4,14 @@ A terraform module to create a managed Kubernetes cluster on AWS EKS. Available through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). +Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html). | Branch | Build status | | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | | master | [![build Status](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks.svg?branch=master)](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks) | ## Assumptions + * You want to create an EKS cluster and an autoscaling group of workers for the cluster. * You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module. * You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources. diff --git a/main.tf b/main.tf index fd33776..8999fae 100644 --- a/main.tf +++ b/main.tf @@ -5,12 +5,14 @@ * through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). * Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) * and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). +* Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aws.amazon.com/eks/latest/userguide/dashboard-tutorial.html). * | Branch | Build status | * | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | * | master | [![build Status](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks.svg?branch=master)](https://travis-ci.org/terraform-aws-modules/terraform-aws-eks) | * ## Assumptions + ** You want to create an EKS cluster and an autoscaling group of workers for the cluster. ** You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module. ** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources.