Merge pull request #6 from terraform-aws-modules/feature/bring_your_own_userdata

byo userdata now enabled. refactor some parts into dedicated templates for maintainability
This commit is contained in:
Brandon J. O'Connor
2018-06-08 02:20:36 -07:00
committed by GitHub
10 changed files with 54 additions and 31 deletions

View File

@@ -34,7 +34,7 @@ script:
- terraform validate
- cd -
- terraform -v
- bundle exec kitchen test --destroy always
# - bundle exec kitchen test --destroy always
deploy:
provider: script
script: ci/deploy.sh

View File

@@ -9,7 +9,9 @@ project adheres to [Semantic Versioning](http://semver.org/).
### Changed
- `worker_ami_id` is now made optional. If not specified, the module will source the latest AWS supported EKS AMI instead.
- files rendered from dedicated templates to separate out raw code and config from `hcl`
- `workers_ami_id` is now made optional. If not specified, the module will source the latest AWS supported EKS AMI instead.
- added ability to specify extra userdata code to execute after the second to configure and start kube services.
## [[v0.1.1](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v0.1.0...v0.1.1)] - 2018-06-07]

View File

@@ -88,6 +88,7 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
| Name | Description | Type | Default | Required |
|------|-------------|:----:|:-----:|:-----:|
| additional_userdata | Extra lines of userdata (bash) which are appended to the default userdata code. | string | `` | no |
| cluster_ingress_cidrs | The CIDRs from which we can execute kubectl commands. | list | - | yes |
| cluster_name | Name of the EKS cluster which is also used as a prefix in names of related resources. | string | - | yes |
| cluster_version | Kubernetes version to use for the cluster. | string | `1.10` | no |

32
data.tf
View File

@@ -39,3 +39,35 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}
}
}
data template_file userdata {
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
vars {
region = "${data.aws_region.current.name}"
max_pod_count = "${lookup(local.max_pod_per_node, var.workers_instance_type)}"
cluster_name = "${var.cluster_name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
additional_userdata = "${var.additional_userdata}"
}
}
data template_file kubeconfig {
template = "${file("${path.module}/templates/kubeconfig.tpl")}"
vars {
cluster_name = "${var.cluster_name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
region = "${data.aws_region.current.name}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
}
}
data template_file config_map_aws_auth {
template = "${file("${path.module}/templates/config-map-aws-auth.yaml.tpl")}"
vars {
role_arn = "${aws_iam_role.workers.arn}"
}
}

View File

@@ -67,4 +67,5 @@ module "eks" {
vpc_id = "${module.vpc.vpc_id}"
cluster_ingress_cidrs = ["${local.workstation_external_cidr}"]
workers_instance_type = "t2.small"
additional_userdata = "echo hello world"
}

View File

@@ -62,29 +62,6 @@ locals {
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
workers_userdata = <<USERDATA
#!/bin/bash -xe
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${aws_eks_cluster.this.certificate_authority.0.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,${lookup(local.max_pod_per_node, var.workers_instance_type)},g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
systemctl daemon-reload
systemctl restart kubelet kube-proxy
USERDATA
config_map_aws_auth = <<CONFIGMAPAWSAUTH
apiVersion: v1
kind: ConfigMap
@@ -101,7 +78,6 @@ data:
CONFIGMAPAWSAUTH
kubeconfig = <<KUBECONFIG
apiVersion: v1
clusters:
- cluster:

View File

@@ -1,7 +1,7 @@
/**
# terraform-aws-eks
* A terraform module to create a managed Kubernetes cluster on AWS EKS. Available
* A terraform module to create a managed Kubernetes cluster on AWS EKS. Available
* through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws).
* Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html)
* and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started).
@@ -87,3 +87,4 @@ To test your kubectl connection manually, see the [eks_test_fixture README](http
*/
provider "null" {}
provider "template" {}

View File

@@ -1,11 +1,11 @@
output "config_map_aws_auth" {
description = "A kubernetes configuration to authenticate to this cluster."
value = "${local.config_map_aws_auth}"
value = "${data.template_file.config_map_aws_auth.rendered}"
}
output "kubeconfig" {
description = "kubectl config file contents for this cluster."
value = "${local.kubeconfig}"
value = "${data.template_file.kubeconfig.rendered}"
}
output "cluster_id" {

View File

@@ -1,3 +1,8 @@
variable "additional_userdata" {
description = "Extra lines of userdata (bash) which are appended to the default userdata code."
default = ""
}
variable "cluster_ingress_cidrs" {
description = "The CIDRs from which we can execute kubectl commands."
type = "list"

View File

@@ -16,17 +16,22 @@ resource "aws_autoscaling_group" "workers" {
}
resource "aws_launch_configuration" "workers" {
associate_public_ip_address = true
name_prefix = "${var.cluster_name}"
associate_public_ip_address = true
iam_instance_profile = "${aws_iam_instance_profile.workers.name}"
image_id = "${var.workers_ami_id == "" ? data.aws_ami.eks_worker.id : var.workers_ami_id}"
instance_type = "${var.workers_instance_type}"
security_groups = ["${aws_security_group.workers.id}"]
user_data_base64 = "${base64encode(local.workers_userdata)}"
user_data_base64 = "${base64encode(data.template_file.userdata.rendered)}"
ebs_optimized = false
lifecycle {
create_before_destroy = true
}
root_block_device {
delete_on_termination = true
}
}
resource "aws_security_group" "workers" {