diff --git a/README.md b/README.md index 0ed4f32..900e048 100644 --- a/README.md +++ b/README.md @@ -99,18 +99,20 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | cluster_version | Kubernetes version to use for the EKS cluster. | string | `1.10` | no | | config_output_path | Determines where config files are placed if using configure_kubectl_session and you want config files to land outside the current working directory. | string | `./` | no | | configure_kubectl_session | Configure the current session's kubectl to use the instantiated EKS cluster. | string | `true` | no | -| kubeconfig_aws_authenticator_additional_args | Any additional arguments to pass to the authenticator such as the role to assume ["-r", "MyEksRole"] | string | `` | no | | kubeconfig_aws_authenticator_command | Command to use to to fetch AWS EKS credentials | string | `aws-iam-authenticator` | no | -| kubeconfig_aws_authenticator_env_variables | Environment variables that should be used when executing the authenticator i.e. { AWS_PROFILE = "eks"} | string | `` | no | +| kubeconfig_aws_authenticator_additional_args | Any additional arguments to pass to the authenticator such as the role to assume. ["-r", "MyEksRole"] | list | `` | no | +| kubeconfig_aws_authenticator_env_variables | Environment variables that should be used when executing the authenticator. e.g. { AWS_PROFILE = "eks"} | map | `` | no | | kubeconfig_name | Override the default name used for items kubeconfig. | string | `` | no | +| manage_aws_auth | Whether to write and apply the aws-auth configmap file. | string | `true` | no | | subnets | A list of subnets to place the EKS cluster and workers within. | list | - | yes | -| tags | A map of tags to add to all resources. | string | `` | no | +| tags | A map of tags to add to all resources. | map | `` | no | | vpc_id | VPC where the cluster and workers will be deployed. | string | - | yes | | worker_groups | A list of maps defining worker group configurations. See workers_group_defaults for valid keys. | list | `` | no | | worker_security_group_id | If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster. | string | `` | no | | worker_sg_ingress_from_port | Minimum port number from which pods will accept communication. Must be changed to a lower value if some pods in your cluster will expose a port lower than 1025 (e.g. 22, 80, or 443). | string | `1025` | no | | workers_group_defaults | Default values for target groups as defined by the list of maps. | map | `` | no | | workstation_cidr | Override the default ingress rule that allows communication with the EKS cluster API. If not given, will use current IP/32. | string | `` | no | +| write_kubeconfig | Whether to write a kubeconfig file containing the cluster configuration | string | `true` | no | ## Outputs diff --git a/aws_auth.tf b/aws_auth.tf new file mode 100644 index 0000000..6038dc5 --- /dev/null +++ b/aws_auth.tf @@ -0,0 +1,17 @@ +resource "local_file" "config_map_aws_auth" { + content = "${data.template_file.config_map_aws_auth.rendered}" + filename = "${var.config_output_path}/config-map-aws-auth.yaml" + count = "${var.manage_aws_auth ? 1 : 0}" +} + +resource "null_resource" "update_config_map_aws_auth" { + provisioner "local-exec" { + command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig" + } + + triggers { + config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}" + } + + count = "${var.manage_aws_auth ? 1 : 0}" +} diff --git a/data.tf b/data.tf index 9d54f87..76a7eb6 100644 --- a/data.tf +++ b/data.tf @@ -48,7 +48,7 @@ data "template_file" "kubeconfig" { template = "${file("${path.module}/templates/kubeconfig.tpl")}" vars { - cluster_name = "${var.cluster_name}" + cluster_name = "${aws_eks_cluster.this.name}" kubeconfig_name = "${local.kubeconfig_name}" endpoint = "${aws_eks_cluster.this.endpoint}" region = "${data.aws_region.current.name}" @@ -73,7 +73,7 @@ EOF } } -data template_file config_map_aws_auth { +data "template_file" "config_map_aws_auth" { template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}" vars { @@ -81,13 +81,13 @@ data template_file config_map_aws_auth { } } -data template_file userdata { +data "template_file" "userdata" { template = "${file("${path.module}/templates/userdata.sh.tpl")}" count = "${length(var.worker_groups)}" vars { region = "${data.aws_region.current.name}" - cluster_name = "${var.cluster_name}" + cluster_name = "${aws_eks_cluster.this.name}" endpoint = "${aws_eks_cluster.this.endpoint}" cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}" max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")))}" diff --git a/kubectl.tf b/kubectl.tf index 4c56a62..ec39963 100644 --- a/kubectl.tf +++ b/kubectl.tf @@ -1,24 +1,5 @@ resource "local_file" "kubeconfig" { content = "${data.template_file.kubeconfig.rendered}" filename = "${var.config_output_path}/kubeconfig_${var.cluster_name}" - count = "${var.configure_kubectl_session ? 1 : 0}" -} - -resource "local_file" "config_map_aws_auth" { - content = "${data.template_file.config_map_aws_auth.rendered}" - filename = "${var.config_output_path}/config-map-aws-auth_${var.cluster_name}.yaml" - count = "${var.configure_kubectl_session ? 1 : 0}" -} - -resource "null_resource" "configure_kubectl" { - provisioner "local-exec" { - command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth_${var.cluster_name}.yaml --kubeconfig ${var.config_output_path}/kubeconfig_${var.cluster_name}" - } - - triggers { - config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}" - kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}" - } - - count = "${var.configure_kubectl_session ? 1 : 0}" + count = "${var.write_kubeconfig ? 1 : 0}" } diff --git a/variables.tf b/variables.tf index 41b04e7..51d9385 100644 --- a/variables.tf +++ b/variables.tf @@ -22,8 +22,13 @@ variable "config_output_path" { default = "./" } -variable "configure_kubectl_session" { - description = "Configure the current session's kubectl to use the instantiated EKS cluster." +variable "write_kubeconfig" { + description = "Whether to write a kubeconfig file containing the cluster configuration" + default = true +} + +variable "manage_aws_auth" { + description = "Whether to write and apply the aws-auth configmap file" default = true } @@ -34,6 +39,7 @@ variable "subnets" { variable "tags" { description = "A map of tags to add to all resources." + type = "map" default = {} } @@ -87,15 +93,17 @@ variable "kubeconfig_aws_authenticator_command" { variable "kubeconfig_aws_authenticator_additional_args" { description = "Any additional arguments to pass to the authenticator such as the role to assume [\"-r\", \"MyEksRole\"]" + type = "list" default = [] } variable "kubeconfig_aws_authenticator_env_variables" { description = "Environment variables that should be used when executing the authenticator i.e. { AWS_PROFILE = \"eks\"}" + type = "map" default = {} } variable "kubeconfig_name" { - description = "Override the default name used for items kubeconfig" + description = "Override the default name used for items kubeconfig." default = "" } diff --git a/workers.tf b/workers.tf index e83412f..ac0b157 100644 --- a/workers.tf +++ b/workers.tf @@ -1,5 +1,5 @@ resource "aws_autoscaling_group" "workers" { - name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}" + name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}" desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", lookup(var.workers_group_defaults, "asg_desired_capacity"))}" max_size = "${lookup(var.worker_groups[count.index], "asg_max_size",lookup(var.workers_group_defaults, "asg_max_size"))}" min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}" @@ -9,8 +9,8 @@ resource "aws_autoscaling_group" "workers" { tags = ["${concat( list( - map("key", "Name", "value", "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true), - map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true), + map("key", "Name", "value", "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true), + map("key", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "value", "owned", "propagate_at_launch", true), ), local.asg_tags) }"] @@ -21,7 +21,7 @@ resource "aws_autoscaling_group" "workers" { } resource "aws_launch_configuration" "workers" { - name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}" + name_prefix = "${aws_eks_cluster.this.name}-${lookup(var.worker_groups[count.index], "name", count.index)}" associate_public_ip_address = "${lookup(var.worker_groups[count.index], "public_ip", lookup(var.workers_group_defaults, "public_ip"))}" security_groups = ["${local.worker_security_group_id}"] iam_instance_profile = "${aws_iam_instance_profile.workers.id}" @@ -42,11 +42,11 @@ resource "aws_launch_configuration" "workers" { } resource "aws_security_group" "workers" { - name_prefix = "${var.cluster_name}" + name_prefix = "${aws_eks_cluster.this.name}" description = "Security group for all nodes in the cluster." vpc_id = "${var.vpc_id}" count = "${var.worker_security_group_id == "" ? 1 : 0}" - tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned" + tags = "${merge(var.tags, map("Name", "${aws_eks_cluster.this.name}-eks_worker_sg", "kubernetes.io/cluster/${aws_eks_cluster.this.name}", "owned" ))}" } @@ -84,12 +84,12 @@ resource "aws_security_group_rule" "workers_ingress_cluster" { } resource "aws_iam_role" "workers" { - name_prefix = "${var.cluster_name}" + name_prefix = "${aws_eks_cluster.this.name}" assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}" } resource "aws_iam_instance_profile" "workers" { - name_prefix = "${var.cluster_name}" + name_prefix = "${aws_eks_cluster.this.name}" role = "${aws_iam_role.workers.name}" }