Merge branch 'master' into update-heptio

This commit is contained in:
Brandon J. O'Connor
2018-07-11 01:24:09 -07:00
committed by GitHub
6 changed files with 46 additions and 38 deletions

View File

@@ -99,18 +99,20 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| cluster_version | Kubernetes version to use for the EKS cluster. | string | `1.10` | no |
| config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. | string | `./` | no |
| configure_kubectl_session | Configure the current session's kubectl to use the instantiated EKS cluster. | string | `true` | no |
| kubeconfig_aws_authenticator_additional_args | Any additional arguments to pass to the authenticator such as the role to assume ["-r", "MyEksRole"] | string | `<list>` | no |
| kubeconfig_aws_authenticator_command | Command to use to to fetch AWS EKS credentials | string | `aws-iam-authenticator` | no |
| kubeconfig_aws_authenticator_env_variables | Environment variables that should be used when executing the authenticator i.e. { AWS_PROFILE = "eks"} | string | `<map>` | no |
| kubeconfig_aws_authenticator_additional_args | Any additional arguments to pass to the authenticator such as the role to assume. ["-r", "MyEksRole"] | list | `<list>` | no |
| kubeconfig_aws_authenticator_env_variables | Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = "eks"} | map | `<map>` | no |
| kubeconfig_name | Override the default name used for items kubeconfig. | string | `` | no |
| manage_aws_auth | Whether to write and apply the aws-auth configmap file. | string | `true` | no |
| subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes |
| tags | A map of tags to add to all resources. | string | `<map>` | no |
| tags | A map of tags to add to all resources. | map | `<map>` | no |
| vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes |
| worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `<list>` | no |
| worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no |
| worker_sg_ingress_from_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `1025` | no |
| workers_group_defaults | Default values for target groups as defined by the list of maps. | map | `<map>` | no |
| workstation_cidr | Override the default ingress rule that allows communication with the EKS cluster API. If not given, will use current IP/32. | string | `` | no |
| write_kubeconfig | Whether to write a kubeconfig file containing the cluster configuration | string | `true` | no |
## Outputs

17
aws_auth.tf Normal file
View File

@@ -0,0 +1,17 @@
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "${var.config_output_path}/config-map-aws-auth.yaml"
count = "${var.manage_aws_auth ? 1 : 0}"
}
resource "null_resource" "update_config_map_aws_auth" {
provisioner "local-exec" {
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
}
count = "${var.manage_aws_auth ? 1 : 0}"
}

View File

@@ -48,7 +48,7 @@ data "template_file" "kubeconfig" {
template = "${file("${path.module}/templates/kubeconfig.tpl")}"
vars {
cluster_name = "${var.cluster_name}"
cluster_name = "${aws_eks_cluster.this.name}"
kubeconfig_name = "${local.kubeconfig_name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
region = "${data.aws_region.current.name}"
@@ -73,7 +73,7 @@ EOF
}
}
data template_file config_map_aws_auth {
data "template_file" "config_map_aws_auth" {
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
vars {
@@ -81,13 +81,13 @@ data template_file config_map_aws_auth {
}
}
data template_file userdata {
data "template_file" "userdata" {
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
count = "${length(var.worker_groups)}"
vars {
region = "${data.aws_region.current.name}"
cluster_name = "${var.cluster_name}"
cluster_name = "${aws_eks_cluster.this.name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")))}"

View File

@@ -1,24 +1,5 @@
resource "local_file" "kubeconfig" {
content = "${data.template_file.kubeconfig.rendered}"
filename = "${var.config_output_path}/kubeconfig_${var.cluster_name}"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "local_file" "config_map_aws_auth" {
content = "${data.template_file.config_map_aws_auth.rendered}"
filename = "${var.config_output_path}/config-map-aws-auth_${var.cluster_name}.yaml"
count = "${var.configure_kubectl_session ? 1 : 0}"
}
resource "null_resource" "configure_kubectl" {
provisioner "local-exec" {
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth_${var.cluster_name}.yaml --kubeconfig ${var.config_output_path}/kubeconfig_${var.cluster_name}"
}
triggers {
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
}
count = "${var.configure_kubectl_session ? 1 : 0}"
count = "${var.write_kubeconfig ? 1 : 0}"
}

View File

@@ -22,8 +22,13 @@ variable "config_output_path" {
default = "./"
}
variable "configure_kubectl_session" {
description = "Configure the current session's kubectl to use the instantiated EKS cluster."
variable "write_kubeconfig" {
description = "Whether to write a kubeconfig file containing the cluster configuration"
default = true
}
variable "manage_aws_auth" {
description = "Whether to write and apply the aws-auth configmap file"
default = true
}
@@ -34,6 +39,7 @@ variable "subnets" {
variable "tags" {
description = "A map of tags to add to all resources."
type = "map"
default = {}
}
@@ -87,15 +93,17 @@ variable "kubeconfig_aws_authenticator_command" {
variable "kubeconfig_aws_authenticator_additional_args" {
description = "Any additional arguments to pass to the authenticator such as the role to assume [\"-r\", \"MyEksRole\"]"
type = "list"
default = []
}
variable "kubeconfig_aws_authenticator_env_variables" {
description = "Environment variables that should be used when executing the authenticator i.e. { AWS_PROFILE = \"eks\"}"
type = "map"
default = {}
}
variable "kubeconfig_name" {
description = "Override the default name used for items kubeconfig"
description = "Override the default name used for items kubeconfig."
default = ""
}

View File

@@ -1,5 +1,5 @@
resource "aws_autoscaling_group" "workers" {
name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", lookup(var.workers_group_defaults, "asg_desired_capacity"))}"
max_size = "${lookup(var.worker_groups[count.index], "asg_max_size",lookup(var.workers_group_defaults, "asg_max_size"))}"
min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}"
@@ -9,8 +9,8 @@ resource "aws_autoscaling_group" "workers" {
tags = ["${concat(
list(
map("key", "Name", "value", "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true),
),
local.asg_tags)
}"]
@@ -21,7 +21,7 @@ resource "aws_autoscaling_group" "workers" {
}
resource "aws_launch_configuration" "workers" {
name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
associate_public_ip_address = "${lookup(var.worker_groups[count.index], "public_ip", lookup(var.workers_group_defaults, "public_ip"))}"
security_groups = ["${local.worker_security_group_id}"]
iam_instance_profile = "${aws_iam_instance_profile.workers.id}"
@@ -42,11 +42,11 @@ resource "aws_launch_configuration" "workers" {
}
resource "aws_security_group" "workers" {
name_prefix = "${var.cluster_name}"
name_prefix = "${aws_eks_cluster.this.name}"
description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}"
count = "${var.worker_security_group_id == "" ? 1 : 0}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
tags = "${merge(var.tags, map("Name", "${aws_eks_cluster.this.name}-eks_worker_sg", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "owned"
))}"
}
@@ -84,12 +84,12 @@ resource "aws_security_group_rule" "workers_ingress_cluster" {
}
resource "aws_iam_role" "workers" {
name_prefix = "${var.cluster_name}"
name_prefix = "${aws_eks_cluster.this.name}"
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
}
resource "aws_iam_instance_profile" "workers" {
name_prefix = "${var.cluster_name}"
name_prefix = "${aws_eks_cluster.this.name}"
role = "${aws_iam_role.workers.name}"
}