merge upstream with release v1.3.0

This commit is contained in:
Bill Wang
2018-07-13 15:05:19 +10:00
18 changed files with 210 additions and 71 deletions

5
.gitignore vendored
View File

@@ -7,7 +7,8 @@
.kitchen.local.yml
Gemfile.lock
terraform.tfstate.d/
kubeconfig
config-map-aws-auth.yaml
eks-admin-cluster-role-binding.yaml
eks-admin-service-account.yaml
.idea/
config-map-aws-auth*.yaml
kubeconfig_*

View File

@@ -5,15 +5,27 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](http://keepachangelog.com/) and this
project adheres to [Semantic Versioning](http://semver.org/).
## [[v1.3.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v1.2.0...v1.3.0)] - 2018-07-??]
## [[v1.4.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v1.3.0...HEAD)] - 2018-07-??]
### Added
- kubelet_node_labels worker group option allows setting --node-labels= in kubelet. (Hat-tip, @bshelton229 👒)
- A tiny but mighty feature. (you're on fire, @me 🔥)
### Changed
- your excellent change. (Boomshakalaka, @self 🏀)
- A subtle but thoughtful change. (Boomshakalaka, @self 🏀)
## [[v1.3.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v1.2.0...v1.3.0)] - 2018-07-11]
### Added
- New variables `map_accounts`, `map_roles` and `map_users` in order to manage additional entries in the `aws-auth` configmap. (by @max-rocket-internet)
- kubelet_node_labels worker group option allows setting --node-labels= in kubelet. (Hat-tip, @bshelton229 👒)
- `worker_iam_role_arn` added to outputs. Sweet, @hatemosphere 🔥
### Changed
- Worker subnets able to be specified as a dedicated list per autoscaling group. (up top, @bshelton229 🙏)
## [[v1.2.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v1.1.0...v1.2.0)] - 2018-07-01]

View File

@@ -15,7 +15,7 @@ Read the [AWS docs on EKS to get connected to the k8s dashboard](https://docs.aw
* You want to create an EKS cluster and an autoscaling group of workers for the cluster.
* You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module.
* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources.
* If using the default variable value (`true`) for `configure_kubectl_session`, it's required that both [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) (>=1.10) and [`heptio-authenticator-aws`](https://github.com/heptio/authenticator#4-set-up-kubectl-to-use-heptio-authenticator-for-aws-tokens) are installed and on your shell's PATH.
* If using the default variable value (`true`) for `configure_kubectl_session`, it's required that both [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) (>=1.10) and [`aws-iam-authenticator`](https://github.com/kubernetes-sigs/aws-iam-authenticator#4-set-up-kubectl-to-use-authentication-tokens-provided-by-aws-iam-authenticator-for-kubernetes) are installed and on your shell's PATH.
## Usage example
@@ -63,7 +63,7 @@ Generate them like so:
```bash
go get github.com/segmentio/terraform-docs
terraform-docs md ./ | cat -s | ghead -n -1 > README.md
terraform-docs md ./ | cat -s | tail -r | tail -n +2 | tail -r > README.md
```
## Contributing
@@ -98,23 +98,27 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| cluster_name | Name of the EKS cluster. Also used as a prefix in names of related resources. | string | - | yes |
| cluster_security_group_id | If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32. | string | `` | no |
| cluster_version | Kubernetes version to use for the EKS cluster. | string | `1.10` | no |
| config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. | string | `./` | no |
| configure_kubectl_session | Configure the current session's kubectl to use the instantiated EKS cluster. | string | `true` | no |
| kubeconfig_aws_authenticator_additional_args | Any additional arguments to pass to the authenticator such as the role to assume ["-r", "MyEksRole"] | string | `<list>` | no |
| kubeconfig_aws_authenticator_command | Command to use to to fetch AWS EKS credentials | string | `heptio-authenticator-aws` | no |
| kubeconfig_aws_authenticator_env_variables | Environment variables that should be used when executing the authenticator i.e. { AWS_PROFILE = "eks"} | string | `<map>` | no |
| kubeconfig_name | Override the default name used for items kubeconfig | string | `` | no |
| root_iops | The amount of provisioned IOPS. This must be set with a volume_type of 'io1'. | string | `` | no |
| config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. Should end in a forward slash / . | string | `./` | no |
| kubeconfig_aws_authenticator_additional_args | Any additional arguments to pass to the authenticator such as the role to assume. e.g. ["-r", "MyEksRole"]. | list | `<list>` | no |
| kubeconfig_aws_authenticator_command | Command to use to to fetch AWS EKS credentials. | string | `aws-iam-authenticator` | no |
| kubeconfig_aws_authenticator_env_variables | Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = "eks"}. | map | `<map>` | no |
| kubeconfig_name | Override the default name used for items kubeconfig. | string | `` | no |
| manage_aws_auth | Whether to write and apply the aws-auth configmap file. | string | `true` | no |
| map_accounts | Additional AWS account numbers to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format. | list | `<list>` | no |
| map_roles | Additional IAM roles to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format. | list | `<list>` | no |
| map_users | Additional IAM users to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format. | list | `<list>` | no |
| root_iops | The amount of provisioned IOPS. This must be set with a volume_type of 'io1'. | string | `0` | no |
| root_volume_size | The root size of the volume in gigabytes. | string | `20` | no |
| root_volume_type | The type of root volume. Can be 'standard', 'gp2', or 'io1' | string | `gp2` | no |
| subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes |
| tags | A map of tags to add to all resources. | string | `<map>` | no |
| tags | A map of tags to add to all resources. | map | `<map>` | no |
| vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes |
| worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `<list>` | no |
| worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no |
| worker_sg_ingress_from_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `1025` | no |
| workers_group_defaults | Default values for target groups as defined by the list of maps. | map | `<map>` | no |
| workstation_cidr | Override the default ingress rule that allows communication with the EKS cluster API. If not given, will use current IP/32. | string | `` | no |
| write_kubeconfig | Whether to write a kubeconfig file containing the cluster configuration. | string | `true` | no |
## Outputs
@@ -127,6 +131,7 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| cluster_version | The Kubernetes server version for the EKS cluster. |
| config_map_aws_auth | A kubernetes configuration to authenticate to this EKS cluster. |
| kubeconfig | kubectl config file contents for this EKS cluster. |
| worker_iam_role_arn | IAM role ID attached to EKS workers |
| worker_iam_role_name | IAM role name attached to EKS workers |
| worker_security_group_id | Security group ID attached to the EKS workers. |
| workers_asg_arns | IDs of the autoscaling groups containing workers. |

59
aws_auth.tf Normal file
View File

@@ -0,0 +1,59 @@
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "${var.config_output_path}config-map-aws-auth_${var.cluster_name}.yaml"
count = "${var.manage_aws_auth ? 1 : 0}"
}
resource "null_resource" "update_config_map_aws_auth" {
provisioner "local-exec" {
command = "kubectl apply -f ${var.config_output_path}config-map-aws-auth_${var.cluster_name}.yaml --kubeconfig ${var.config_output_path}kubeconfig_${var.cluster_name}"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
}
count = "${var.manage_aws_auth ? 1 : 0}"
}
data "template_file" "config_map_aws_auth" {
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
vars {
worker_role_arn = "${aws_iam_role.workers.arn}"
map_users = "${join("", data.template_file.map_users.*.rendered)}"
map_roles = "${join("", data.template_file.map_roles.*.rendered)}"
map_accounts = "${join("", data.template_file.map_accounts.*.rendered)}"
}
}
data "template_file" "map_users" {
count = "${length(var.map_users)}"
template = "${file("${path.module}/templates/config-map-aws-auth-map_users.yaml.tpl")}"
vars {
user_arn = "${lookup(var.map_users[count.index], "user_arn")}"
username = "${lookup(var.map_users[count.index], "username")}"
group = "${lookup(var.map_users[count.index], "group")}"
}
}
data "template_file" "map_roles" {
count = "${length(var.map_roles)}"
template = "${file("${path.module}/templates/config-map-aws-auth-map_roles.yaml.tpl")}"
vars {
role_arn = "${lookup(var.map_roles[count.index], "role_arn")}"
username = "${lookup(var.map_roles[count.index], "username")}"
group = "${lookup(var.map_roles[count.index], "group")}"
}
}
data "template_file" "map_accounts" {
count = "${length(var.map_accounts)}"
template = "${file("${path.module}/templates/config-map-aws-auth-map_accounts.yaml.tpl")}"
vars {
account_number = "${element(var.map_accounts, count.index)}"
}
}

14
data.tf
View File

@@ -48,7 +48,7 @@ data "template_file" "kubeconfig" {
template = "${file("${path.module}/templates/kubeconfig.tpl")}"
vars {
cluster_name = "${var.cluster_name}"
cluster_name = "${aws_eks_cluster.this.name}"
kubeconfig_name = "${local.kubeconfig_name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
region = "${data.aws_region.current.name}"
@@ -73,21 +73,13 @@ EOF
}
}
data template_file config_map_aws_auth {
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
vars {
role_arn = "${aws_iam_role.workers.arn}"
}
}
data template_file userdata {
data "template_file" "userdata" {
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
count = "${length(var.worker_groups)}"
vars {
region = "${data.aws_region.current.name}"
cluster_name = "${var.cluster_name}"
cluster_name = "${aws_eks_cluster.this.name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")))}"

View File

@@ -70,4 +70,7 @@ module "eks" {
tags = "${local.tags}"
vpc_id = "${module.vpc.vpc_id}"
worker_groups = "${local.worker_groups}"
map_roles = "${var.map_roles}"
map_users = "${var.map_users}"
map_accounts = "${var.map_accounts}"
}

View File

@@ -1,3 +1,44 @@
variable "region" {
default = "us-west-2"
}
variable "map_accounts" {
description = "Additional AWS account numbers to add to the aws-auth configmap."
type = "list"
default = [
"777777777777",
"888888888888",
]
}
variable "map_roles" {
description = "Additional IAM roles to add to the aws-auth configmap."
type = "list"
default = [
{
role_arn = "arn:aws:iam::66666666666:role/role1"
username = "role1"
group = "system:masters"
},
]
}
variable "map_users" {
description = "Additional IAM users to add to the aws-auth configmap."
type = "list"
default = [
{
user_arn = "arn:aws:iam::66666666666:user/user1"
username = "user1"
group = "system:masters"
},
{
user_arn = "arn:aws:iam::66666666666:user/user2"
username = "user2"
group = "system:masters"
},
]
}

View File

@@ -1,24 +1,5 @@
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "${var.config_output_path}/kubeconfig_${var.cluster_name}"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "${var.config_output_path}/config-map-aws-auth_${var.cluster_name}.yaml"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "null_resource" "configure_kubectl" {
provisioner "local-exec" {
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth_${var.cluster_name}.yaml --kubeconfig ${var.config_output_path}/kubeconfig_${var.cluster_name}"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
}
count = "${var.configure_kubectl_session ? 1 : 0}"
filename = "${var.config_output_path}kubeconfig_${var.cluster_name}"
count = "${var.write_kubeconfig ? 1 : 0}"
}

View File

@@ -8,8 +8,7 @@ locals {
worker_security_group_id = "${coalesce(join("", aws_security_group.workers.*.id), var.worker_security_group_id)}"
workstation_external_cidr = "${chomp(data.http.workstation_external_ip.body)}/32"
workstation_cidr = "${coalesce(var.workstation_cidr, local.workstation_external_cidr)}"
kubeconfig_name = "${var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name}"
kubeconfig_name = "${var.kubeconfig_name == "" ? "eks_${var.cluster_name}" : var.kubeconfig_name}"
# Mapping from the node type that we selected and the max number of pods that it can run
# Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml

View File

@@ -16,7 +16,7 @@
** You want to create an EKS cluster and an autoscaling group of workers for the cluster.
** You want these resources to exist within security groups that allow communication and coordination. These can be user provided or created within the module.
** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put the EKS resources.
** If using the default variable value (`true`) for `configure_kubectl_session`, it's required that both [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) (>=1.10) and [`heptio-authenticator-aws`](https://github.com/heptio/authenticator#4-set-up-kubectl-to-use-heptio-authenticator-for-aws-tokens) are installed and on your shell's PATH.
** If using the default variable value (`true`) for `configure_kubectl_session`, it's required that both [`kubectl`](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl) (>=1.10) and [`aws-iam-authenticator`](https://github.com/kubernetes-sigs/aws-iam-authenticator#4-set-up-kubectl-to-use-authentication-tokens-provided-by-aws-iam-authenticator-for-kubernetes) are installed and on your shell's PATH.
* ## Usage example
@@ -64,7 +64,7 @@
* ```bash
* go get github.com/segmentio/terraform-docs
* terraform-docs md ./ | cat -s | ghead -n -1 > README.md
* terraform-docs md ./ | cat -s | tail -r | tail -n +2 | tail -r > README.md
* ```
* ## Contributing

View File

@@ -53,3 +53,8 @@ output "worker_iam_role_name" {
description = "IAM role name attached to EKS workers"
value = "${aws_iam_role.workers.name}"
}
output "worker_iam_role_arn" {
description = "IAM role ID attached to EKS workers"
value = "${aws_iam_role.workers.arn}"
}

View File

@@ -0,0 +1 @@
- "${account_number}"

View File

@@ -0,0 +1,4 @@
- rolearn: ${role_arn}
username: ${username}
groups:
- ${group}

View File

@@ -0,0 +1,4 @@
- userarn: ${user_arn}
username: ${username}
groups:
- ${group}

View File

@@ -5,8 +5,13 @@ metadata:
namespace: kube-system
data:
mapRoles: |
- rolearn: ${role_arn}
- rolearn: ${worker_role_arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
${map_roles}
mapUsers: |
${map_users}
mapAccounts: |
${map_accounts}

View File

@@ -8,7 +8,7 @@ variable "cluster_security_group_id" {
}
variable "workstation_cidr" {
description = "Override the default ingress rule that allows communication with the EKS cluster API. If not given, will use current IP/32. "
description = "Override the default ingress rule that allows communication with the EKS cluster API. If not given, will use current IP/32. "
default = ""
}
@@ -18,15 +18,38 @@ variable "cluster_version" {
}
variable "config_output_path" {
description = "Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory."
description = "Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. Should end in a forward slash / ."
default = "./"
}
variable "configure_kubectl_session" {
description = "Configure the current session's kubectl to use the instantiated EKS cluster."
variable "write_kubeconfig" {
description = "Whether to write a kubeconfig file containing the cluster configuration."
default = true
}
variable "manage_aws_auth" {
description = "Whether to write and apply the aws-auth configmap file."
default = true
}
variable "map_accounts" {
description = "Additional AWS account numbers to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format."
type = "list"
default = []
}
variable "map_roles" {
description = "Additional IAM roles to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format."
type = "list"
default = []
}
variable "map_users" {
description = "Additional IAM users to add to the aws-auth configmap. See examples/eks_test_fixture/variables.tf for example format."
type = "list"
default = []
}
variable "subnets" {
description = "A list of subnets to place the EKS cluster and workers within."
type = "list"
@@ -34,6 +57,7 @@ variable "subnets" {
variable "tags" {
description = "A map of tags to add to all resources."
type = "map"
default = {}
}
@@ -82,6 +106,7 @@ variable "workers_group_defaults" {
ebs_optimized = true # sets whether to use ebs optimization on supported types.
public_ip = false # Associate a public ip address with a worker
kubelet_node_labels = "" # This string is passed directly to kubelet via --node-lables= if set. It should be comma delimited with no spaces. If left empty no --node-labels switch is added.
subnets = "" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789
}
}
@@ -96,21 +121,23 @@ variable "worker_sg_ingress_from_port" {
}
variable "kubeconfig_aws_authenticator_command" {
description = "Command to use to to fetch AWS EKS credentials"
default = "heptio-authenticator-aws"
description = "Command to use to to fetch AWS EKS credentials."
default = "aws-iam-authenticator"
}
variable "kubeconfig_aws_authenticator_additional_args" {
description = "Any additional arguments to pass to the authenticator such as the role to assume [\"-r\", \"MyEksRole\"]"
description = "Any additional arguments to pass to the authenticator such as the role to assume. e.g. [\"-r\", \"MyEksRole\"]."
type = "list"
default = []
}
variable "kubeconfig_aws_authenticator_env_variables" {
description = "Environment variables that should be used when executing the authenticator i.e. { AWS_PROFILE = \"eks\"}"
description = "Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = \"eks\"}."
type = "map"
default = {}
}
variable "kubeconfig_name" {
description = "Override the default name used for items kubeconfig"
description = "Override the default name used for items kubeconfig."
default = ""
}

View File

@@ -1 +1 @@
v1.2.0
v1.3.0

View File

@@ -1,16 +1,16 @@
resource "aws_autoscaling_group" "workers" {
name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", lookup(var.workers_group_defaults, "asg_desired_capacity"))}"
max_size = "${lookup(var.worker_groups[count.index], "asg_max_size",lookup(var.workers_group_defaults, "asg_max_size"))}"
min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}"
launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}"
vpc_zone_identifier = ["${var.subnets}"]
vpc_zone_identifier = ["${split(",", coalesce(lookup(var.worker_groups[count.index], "subnets", ""), join(",", var.subnets)))}"]
count = "${length(var.worker_groups)}"
tags = ["${concat(
list(
map("key", "Name", "value", "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
),
local.asg_tags)
}"]
@@ -21,7 +21,7 @@ resource "aws_autoscaling_group" "workers" {
}
resource "aws_launch_configuration" "workers" {
name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
associate_public_ip_address = "${lookup(var.worker_groups[count.index], "public_ip", lookup(var.workers_group_defaults, "public_ip"))}"
security_groups = ["${local.worker_security_group_id}"]
iam_instance_profile = "${aws_iam_instance_profile.workers.id}"
@@ -45,11 +45,11 @@ resource "aws_launch_configuration" "workers" {
}
resource "aws_security_group" "workers" {
name_prefix = "${var.cluster_name}"
name_prefix = "${aws_eks_cluster.this.name}"
description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}"
count = "${var.worker_security_group_id == "" ? 1 : 0}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
tags = "${merge(var.tags, map("Name", "${aws_eks_cluster.this.name}-eks_worker_sg", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "owned"
))}"
}
@@ -87,12 +87,12 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
}
resource "aws_iam_role" "workers" {
name_prefix = "${var.cluster_name}"
name_prefix = "${aws_eks_cluster.this.name}"
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
}
resource "aws_iam_instance_profile" "workers" {
name_prefix = "${var.cluster_name}"
name_prefix = "${aws_eks_cluster.this.name}"
role = "${aws_iam_role.workers.name}"
}