Add support for the new amazon-eks-node-* AMI (#100)

* add support for the new amazon-eks-node-* AMI

* add CHANGELOG

* remove kubelet_node_labels
This commit is contained in:
Touch Ungboriboonpisal
2018-09-04 03:19:02 -07:00
committed by Max Williams
parent 0736be4553
commit 21f43b8341
5 changed files with 10 additions and 99 deletions

1
.gitignore vendored
View File

@@ -10,5 +10,6 @@ terraform.tfstate.d/
eks-admin-cluster-role-binding.yaml
eks-admin-service-account.yaml
.idea/
*.iml
config-map-aws-auth*.yaml
kubeconfig_*

View File

@@ -10,10 +10,14 @@ project adheres to [Semantic Versioning](http://semver.org/).
### Added
- A useful addition (slam dunk, @self 🔥)
- add support for [`amazon-eks-node-*` AMI with bootstrap script](https://aws.amazon.com/blogs/opensource/improvements-eks-worker-node-provisioning/) (by @erks)
- expose `kubelet_extra_args` worker group option (replacing `kubelet_node_labels`) to allow specifying arbitrary kubelet options (e.g. taints and labels) (by @erks)
### Changed
- A subtle but thoughtful change. (Boomshakalaka, @self 🏀)
- **Breaking change** Removed support for `eks-worker-*` AMI. The cluster specifying a custom AMI based off of `eks-worker-*` AMI will have to rebuild the AMI from `amazon-eks-node-*`. (by @erks)
- **Breaking change** Removed `kubelet_node_labels` worker group option in favor of `kubelet_extra_args`. (by @erks)
## [[v1.5.0](https://github.com/terraform-aws-modules/terraform-aws-eks/compare/v1.4.0...v1.5.0)] - 2018-08-30]

View File

@@ -18,7 +18,7 @@ data "aws_iam_policy_document" "workers_assume_role_policy" {
data "aws_ami" "eks_worker" {
filter {
name = "name"
values = ["eks-worker-*"]
values = ["amazon-eks-node-*"]
}
most_recent = true
@@ -74,13 +74,11 @@ data "template_file" "userdata" {
count = "${var.worker_group_count}"
vars {
region = "${data.aws_region.current.name}"
cluster_name = "${aws_eks_cluster.this.name}"
endpoint = "${aws_eks_cluster.this.endpoint}"
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(local.workers_group_defaults, "instance_type")))}"
pre_userdata = "${lookup(var.worker_groups[count.index], "pre_userdata",lookup(local.workers_group_defaults, "pre_userdata"))}"
additional_userdata = "${lookup(var.worker_groups[count.index], "additional_userdata",lookup(local.workers_group_defaults, "additional_userdata"))}"
kubelet_node_labels = "${lookup(var.worker_groups[count.index], "kubelet_node_labels",lookup(local.workers_group_defaults, "kubelet_node_labels"))}"
kubelet_extra_args = "${lookup(var.worker_groups[count.index], "kubelet_extra_args",lookup(local.workers_group_defaults, "kubelet_extra_args"))}"
}
}

View File

@@ -25,78 +25,13 @@ locals {
ebs_optimized = true # sets whether to use ebs optimization on supported types.
enable_monitoring = true # Enables/disables detailed monitoring.
public_ip = false # Associate a public ip address with a worker
kubelet_node_labels = "" # This string is passed directly to kubelet via --node-labels= if set. It should be comma delimited with no spaces. If left empty no --node-labels switch is added.
kubelet_extra_args = "" # This string is passed directly to kubelet if set. Useful for adding labels or taints.
subnets = "" # A comma delimited string of subnets to place the worker nodes in. i.e. subnet-123,subnet-456,subnet-789
autoscaling_enabled = false # Sets whether policy and matching tags will be added to allow autoscaling.
}
workers_group_defaults = "${merge(local.workers_group_defaults_defaults, var.workers_group_defaults)}"
# Mapping from the node type that we selected and the max number of pods that it can run
# Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
max_pod_per_node = {
c4.large = 29
c4.xlarge = 58
c4.2xlarge = 58
c4.4xlarge = 234
c4.8xlarge = 234
c5.large = 29
c5.xlarge = 58
c5.2xlarge = 58
c5.4xlarge = 234
c5.9xlarge = 234
c5.18xlarge = 737
i3.large = 29
i3.xlarge = 58
i3.2xlarge = 58
i3.4xlarge = 234
i3.8xlarge = 234
i3.16xlarge = 737
m3.medium = 12
m3.large = 29
m3.xlarge = 58
m3.2xlarge = 118
m4.large = 20
m4.xlarge = 58
m4.2xlarge = 58
m4.4xlarge = 234
m4.10xlarge = 234
m5.large = 29
m5.xlarge = 58
m5.2xlarge = 58
m5.4xlarge = 234
m5.12xlarge = 234
m5.24xlarge = 737
p2.xlarge = 58
p2.8xlarge = 234
p2.16xlarge = 234
p3.2xlarge = 58
p3.8xlarge = 234
p3.16xlarge = 234
r3.xlarge = 58
r3.2xlarge = 58
r3.4xlarge = 234
r3.8xlarge = 234
r4.large = 29
r4.xlarge = 58
r4.2xlarge = 58
r4.4xlarge = 234
r4.8xlarge = 234
r4.16xlarge = 737
t2.small = 8
t2.medium = 17
t2.large = 35
t2.xlarge = 44
t2.2xlarge = 44
t3.small = 8
t3.medium = 17
t3.large = 35
t3.xlarge = 44
t3.2xlarge = 44
x1.16xlarge = 234
x1.32xlarge = 234
}
ebs_optimized = {
"c1.medium" = false
"c1.xlarge" = true

View File

@@ -3,35 +3,8 @@
# Allow user supplied pre userdata code
${pre_userdata}
# Certificate Authority config
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${cluster_auth_base64}" | base64 -d >$CA_CERTIFICATE_FILE_PATH
# Set kubelet --node-labels if kubelet_node_labels were set
KUBELET_NODE_LABELS=${kubelet_node_labels}
if [[ $KUBELET_NODE_LABELS != "" ]]; then sed -i '/INTERNAL_IP/a \ \ --node-labels='"$KUBELET_NODE_LABELS"'\ \\' /etc/systemd/system/kubelet.service; fi
# Authentication
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${endpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${cluster_name},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${region},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,${max_pod_count},g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${endpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
# DNS cluster configuration
DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]]; then DNS_CLUSTER_IP=172.20.0.10; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
# start services
systemctl daemon-reload
systemctl restart kubelet
# Bootstrap and join the cluster
/etc/eks/bootstrap.sh --b64-cluster-ca '${cluster_auth_base64}' --apiserver-endpoint '${endpoint}' --kubelet-extra-args '${kubelet_extra_args}' '${cluster_name}'
# Allow user supplied userdata code
${additional_userdata}