mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-03-11 21:11:32 +01:00
workers can now be specified as multiple asgs of different flavors. BYO security group now possible for both workers and cluster
This commit is contained in:
18
cluster.tf
18
cluster.tf
@@ -4,7 +4,7 @@ resource "aws_eks_cluster" "this" {
|
||||
version = "${var.cluster_version}"
|
||||
|
||||
vpc_config {
|
||||
security_group_ids = ["${aws_security_group.cluster.id}"]
|
||||
security_group_ids = ["${local.cluster_security_group_id}"]
|
||||
subnet_ids = ["${var.subnets}"]
|
||||
}
|
||||
|
||||
@@ -16,39 +16,43 @@ resource "aws_eks_cluster" "this" {
|
||||
|
||||
resource "aws_security_group" "cluster" {
|
||||
name_prefix = "${var.cluster_name}"
|
||||
description = "Cluster communication with workers nodes"
|
||||
description = "EKS cluster security group."
|
||||
vpc_id = "${var.vpc_id}"
|
||||
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}"
|
||||
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_egress_internet" {
|
||||
description = "Allow cluster egress to the Internet."
|
||||
description = "Allow cluster egress access to the Internet."
|
||||
protocol = "-1"
|
||||
security_group_id = "${aws_security_group.cluster.id}"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
type = "egress"
|
||||
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
|
||||
description = "Allow pods to communicate with the cluster API Server."
|
||||
description = "Allow pods to communicate with the EKS cluster API."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.cluster.id}"
|
||||
source_security_group_id = "${aws_security_group.workers.id}"
|
||||
source_security_group_id = "${local.worker_security_group_id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "ingress"
|
||||
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "cluster_https_cidr_ingress" {
|
||||
cidr_blocks = ["${var.cluster_ingress_cidrs}"]
|
||||
description = "Allow communication with the cluster API Server."
|
||||
cidr_blocks = ["${local.workstation_external_cidr}"]
|
||||
description = "Allow kubectl communication with the EKS cluster API."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.cluster.id}"
|
||||
from_port = 443
|
||||
to_port = 443
|
||||
type = "ingress"
|
||||
count = "${var.cluster_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "cluster" {
|
||||
|
||||
28
data.tf
28
data.tf
@@ -1,5 +1,9 @@
|
||||
data "aws_region" "current" {}
|
||||
|
||||
data "http" "workstation_external_ip" {
|
||||
url = "http://icanhazip.com"
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "workers_assume_role_policy" {
|
||||
statement {
|
||||
sid = "EKSWorkerAssumeRole"
|
||||
@@ -15,6 +19,16 @@ data "aws_iam_policy_document" "workers_assume_role_policy" {
|
||||
}
|
||||
}
|
||||
|
||||
data "aws_ami" "eks_worker" {
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["eks-worker-*"]
|
||||
}
|
||||
|
||||
most_recent = true
|
||||
owners = ["602401143452"] # Amazon
|
||||
}
|
||||
|
||||
data "aws_iam_policy_document" "cluster_assume_role_policy" {
|
||||
statement {
|
||||
sid = "EKSClusterAssumeRole"
|
||||
@@ -48,3 +62,17 @@ data template_file config_map_aws_auth {
|
||||
role_arn = "${aws_iam_role.workers.arn}"
|
||||
}
|
||||
}
|
||||
|
||||
data template_file userdata {
|
||||
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
|
||||
count = "${length(var.worker_groups)}"
|
||||
|
||||
vars {
|
||||
region = "${data.aws_region.current.name}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
cluster_auth_base64 = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")))}"
|
||||
additional_userdata = "${lookup(var.worker_groups[count.index], "additional_userdata",lookup(var.workers_group_defaults, "additional_userdata"))}"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,18 +11,16 @@ provider "random" {
|
||||
version = "= 1.3.1"
|
||||
}
|
||||
|
||||
provider "http" {}
|
||||
provider "local" {}
|
||||
|
||||
data "aws_availability_zones" "available" {}
|
||||
|
||||
data "http" "workstation_external_ip" {
|
||||
url = "http://icanhazip.com"
|
||||
}
|
||||
|
||||
locals {
|
||||
workstation_external_cidr = "${chomp(data.http.workstation_external_ip.body)}/32"
|
||||
cluster_name = "test-eks-${random_string.suffix.result}"
|
||||
cluster_name = "test-eks-${random_string.suffix.result}"
|
||||
|
||||
worker_groups = "${list(
|
||||
map("instance_type","t2.small",
|
||||
"additional_userdata","echo foo bar"
|
||||
),
|
||||
)}"
|
||||
|
||||
tags = "${map("Environment", "test",
|
||||
"GithubRepo", "terraform-aws-eks",
|
||||
@@ -50,13 +48,10 @@ module "vpc" {
|
||||
}
|
||||
|
||||
module "eks" {
|
||||
source = "../.."
|
||||
cluster_name = "${local.cluster_name}"
|
||||
subnets = "${module.vpc.public_subnets}"
|
||||
tags = "${local.tags}"
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
cluster_ingress_cidrs = ["${local.workstation_external_cidr}"]
|
||||
workers_instance_type = "t2.small"
|
||||
additional_userdata = "echo hello world"
|
||||
configure_kubectl_session = true
|
||||
source = "../.."
|
||||
cluster_name = "${local.cluster_name}"
|
||||
subnets = "${module.vpc.public_subnets}"
|
||||
tags = "${local.tags}"
|
||||
vpc_id = "${module.vpc.vpc_id}"
|
||||
worker_groups = "${local.worker_groups}"
|
||||
}
|
||||
|
||||
@@ -3,9 +3,9 @@ output "cluster_endpoint" {
|
||||
value = "${module.eks.cluster_endpoint}"
|
||||
}
|
||||
|
||||
output "cluster_security_group_ids" {
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ids attached to the cluster control plane."
|
||||
value = "${module.eks.cluster_security_group_ids}"
|
||||
value = "${module.eks.cluster_security_group_id}"
|
||||
}
|
||||
|
||||
output "kubectl_config" {
|
||||
|
||||
24
kubectl.tf
Normal file
24
kubectl.tf
Normal file
@@ -0,0 +1,24 @@
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = "${data.template_file.kubeconfig.rendered}"
|
||||
filename = "${var.config_output_path}/kubeconfig"
|
||||
count = "${var.configure_kubectl_session ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "local_file" "config_map_aws_auth" {
|
||||
content = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
filename = "${var.config_output_path}/config-map-aws-auth.yaml"
|
||||
count = "${var.configure_kubectl_session ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "null_resource" "configure_kubectl" {
|
||||
provisioner "local-exec" {
|
||||
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig"
|
||||
}
|
||||
|
||||
triggers {
|
||||
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
|
||||
}
|
||||
|
||||
count = "${var.configure_kubectl_session ? 1 : 0}"
|
||||
}
|
||||
210
local.tf
210
local.tf
@@ -1,44 +1,174 @@
|
||||
locals {
|
||||
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
|
||||
config_map_aws_auth = <<CONFIGMAPAWSAUTH
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: aws-auth
|
||||
namespace: kube-system
|
||||
data:
|
||||
mapRoles: |
|
||||
- rolearn: ${aws_iam_role.workers.arn}
|
||||
username: system:node:{{EC2PrivateDNSName}}
|
||||
groups:
|
||||
- system:bootstrappers
|
||||
- system:nodes
|
||||
CONFIGMAPAWSAUTH
|
||||
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
|
||||
cluster_security_group_id = "${var.cluster_security_group_id == "" ? aws_security_group.cluster.id : var.cluster_security_group_id}"
|
||||
worker_security_group_id = "${var.worker_security_group_id == "" ? aws_security_group.workers.id : var.worker_security_group_id}"
|
||||
workstation_external_cidr = "${chomp(data.http.workstation_external_ip.body)}/32"
|
||||
|
||||
kubeconfig = <<KUBECONFIG
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: ${aws_eks_cluster.this.endpoint}
|
||||
certificate-authority-data: ${aws_eks_cluster.this.certificate_authority.0.data}
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: aws
|
||||
name: aws
|
||||
current-context: aws
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: aws
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
||||
command: heptio-authenticator-aws
|
||||
args:
|
||||
- "token"
|
||||
- "-i"
|
||||
- "${var.cluster_name}"
|
||||
KUBECONFIG
|
||||
# Mapping from the node type that we selected and the max number of pods that it can run
|
||||
# Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
|
||||
max_pod_per_node = {
|
||||
c4.large = 29
|
||||
c4.xlarge = 58
|
||||
c4.2xlarge = 58
|
||||
c4.4xlarge = 234
|
||||
c4.8xlarge = 234
|
||||
c5.large = 29
|
||||
c5.xlarge = 58
|
||||
c5.2xlarge = 58
|
||||
c5.4xlarge = 234
|
||||
c5.9xlarge = 234
|
||||
c5.18xlarge = 737
|
||||
i3.large = 29
|
||||
i3.xlarge = 58
|
||||
i3.2xlarge = 58
|
||||
i3.4xlarge = 234
|
||||
i3.8xlarge = 234
|
||||
i3.16xlarge = 737
|
||||
m3.medium = 12
|
||||
m3.large = 29
|
||||
m3.xlarge = 58
|
||||
m3.2xlarge = 118
|
||||
m4.large = 20
|
||||
m4.xlarge = 58
|
||||
m4.2xlarge = 58
|
||||
m4.4xlarge = 234
|
||||
m4.10xlarge = 234
|
||||
m5.large = 29
|
||||
m5.xlarge = 58
|
||||
m5.2xlarge = 58
|
||||
m5.4xlarge = 234
|
||||
m5.12xlarge = 234
|
||||
m5.24xlarge = 737
|
||||
p2.xlarge = 58
|
||||
p2.8xlarge = 234
|
||||
p2.16xlarge = 234
|
||||
p3.2xlarge = 58
|
||||
p3.8xlarge = 234
|
||||
p3.16xlarge = 234
|
||||
r3.xlarge = 58
|
||||
r3.2xlarge = 58
|
||||
r3.4xlarge = 234
|
||||
r3.8xlarge = 234
|
||||
r4.large = 29
|
||||
r4.xlarge = 58
|
||||
r4.2xlarge = 58
|
||||
r4.4xlarge = 234
|
||||
r4.8xlarge = 234
|
||||
r4.16xlarge = 737
|
||||
t2.small = 8
|
||||
t2.medium = 17
|
||||
t2.large = 35
|
||||
t2.xlarge = 44
|
||||
t2.2xlarge = 44
|
||||
x1.16xlarge = 234
|
||||
x1.32xlarge = 234
|
||||
}
|
||||
|
||||
ebs_optimized_types = {
|
||||
"c4.large" = true
|
||||
"c4.xlarge" = true
|
||||
"c4.2xlarge" = true
|
||||
"c4.4xlarge" = true
|
||||
"c4.8xlarge" = true
|
||||
"c5.large" = true
|
||||
"c5.xlarge" = true
|
||||
"c5.2xlarge" = true
|
||||
"c5.4xlarge" = true
|
||||
"c5.9xlarge" = true
|
||||
"c5.18xlarge" = true
|
||||
"c5d.large" = true
|
||||
"c5d.xlarge" = true
|
||||
"c5d.2xlarge" = true
|
||||
"c5d.4xlarge" = true
|
||||
"c5d.9xlarge" = true
|
||||
"c5d.18xlarge" = true
|
||||
"d2.xlarge" = true
|
||||
"d2.2xlarge" = true
|
||||
"d2.4xlarge" = true
|
||||
"d2.8xlarge" = true
|
||||
"f1.2xlarge" = true
|
||||
"f1.16xlarge" = true
|
||||
"g3.4xlarge" = true
|
||||
"g3.8xlarge" = true
|
||||
"g3.16xlarge" = true
|
||||
"h1.2xlarge" = true
|
||||
"h1.4xlarge" = true
|
||||
"h1.8xlarge" = true
|
||||
"h1.16xlarge" = true
|
||||
"i3.large" = true
|
||||
"i3.xlarge" = true
|
||||
"i3.2xlarge" = true
|
||||
"i3.4xlarge" = true
|
||||
"i3.8xlarge" = true
|
||||
"i3.16xlarge" = true
|
||||
"i3.metal" = true
|
||||
"m4.large" = true
|
||||
"m4.xlarge" = true
|
||||
"m4.2xlarge" = true
|
||||
"m4.4xlarge" = true
|
||||
"m4.10xlarge" = true
|
||||
"m4.16xlarge" = true
|
||||
"m5.large" = true
|
||||
"m5.xlarge" = true
|
||||
"m5.2xlarge" = true
|
||||
"m5.4xlarge" = true
|
||||
"m5.12xlarge" = true
|
||||
"m5.24xlarge" = true
|
||||
"m5d.large" = true
|
||||
"m5d.xlarge" = true
|
||||
"m5d.2xlarge" = true
|
||||
"m5d.4xlarge" = true
|
||||
"m5d.12xlarge" = true
|
||||
"m5d.24xlarge" = true
|
||||
"p2.xlarge" = true
|
||||
"p2.8xlarge" = true
|
||||
"p2.16xlarge" = true
|
||||
"p3.2xlarge" = true
|
||||
"p3.8xlarge" = true
|
||||
"p3.16xlarge" = true
|
||||
"r4.large" = true
|
||||
"r4.xlarge" = true
|
||||
"r4.2xlarge" = true
|
||||
"r4.4xlarge" = true
|
||||
"r4.8xlarge" = true
|
||||
"r4.16xlarge" = true
|
||||
"x1.16xlarge" = true
|
||||
"x1.32xlarge" = true
|
||||
"x1e.xlarge" = true
|
||||
"x1e.2xlarge" = true
|
||||
"x1e.4xlarge" = true
|
||||
"x1e.8xlarge" = true
|
||||
"x1e.16xlarge" = true
|
||||
"x1e.32xlarge" = true
|
||||
"c5.large" = true
|
||||
"c5.xlarge" = true
|
||||
"c5.2xlarge" = true
|
||||
"c5d.large" = true
|
||||
"c5d.xlarge" = true
|
||||
"c5d.2xlarge" = true
|
||||
"m5.large" = true
|
||||
"m5.xlarge" = true
|
||||
"m5.2xlarge" = true
|
||||
"m5d.large" = true
|
||||
"m5d.xlarge" = true
|
||||
"m5d.2xlarge" = true
|
||||
"c1.xlarge" = true
|
||||
"c3.xlarge" = true
|
||||
"c3.2xlarge" = true
|
||||
"c3.4xlarge" = true
|
||||
"g2.2xlarge" = true
|
||||
"i2.xlarge" = true
|
||||
"i2.2xlarge" = true
|
||||
"i2.4xlarge" = true
|
||||
"m1.large" = true
|
||||
"m1.xlarge" = true
|
||||
"m2.2xlarge" = true
|
||||
"m2.4xlarge" = true
|
||||
"m3.xlarge" = true
|
||||
"m3.2xlarge" = true
|
||||
"r3.xlarge" = true
|
||||
"r3.2xlarge" = true
|
||||
"r3.4xlarge" = true
|
||||
}
|
||||
}
|
||||
|
||||
32
main.tf
32
main.tf
@@ -29,7 +29,6 @@
|
||||
* subnets = ["subnet-abcde012", "subnet-bcde012a"]
|
||||
* tags = "${map("Environment", "test")}"
|
||||
* vpc_id = "vpc-abcde012"
|
||||
* cluster_ingress_cidrs = ["24.18.23.91/32"]
|
||||
* }
|
||||
* ```
|
||||
|
||||
@@ -53,8 +52,9 @@ are installed and on your shell's PATH.
|
||||
* 3. Ensure your AWS environment is configured (i.e. credentials and region) for test.
|
||||
* 4. Test using `bundle exec kitchen test` from the root of the repo.
|
||||
|
||||
For now, connectivity to the kubernetes cluster is not tested but will be in the future.
|
||||
To test your kubectl connection manually, see the [eks_test_fixture README](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_test_fixture/README.md).
|
||||
* For now, connectivity to the kubernetes cluster is not tested but will be in the
|
||||
* future. If `configure_kubectl_session` is set `true`, once the test fixture has
|
||||
* converged, you can query the test cluster with `kubectl get nodes --watch --kubeconfig kubeconfig`.
|
||||
|
||||
* ## Doc generation
|
||||
|
||||
@@ -93,28 +93,4 @@ To test your kubectl connection manually, see the [eks_test_fixture README](http
|
||||
|
||||
provider "null" {}
|
||||
provider "template" {}
|
||||
|
||||
resource "local_file" "kubeconfig" {
|
||||
content = "${data.template_file.kubeconfig.rendered}"
|
||||
filename = "${var.config_output_path}/kubeconfig"
|
||||
count = "${var.configure_kubectl_session ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "local_file" "config_map_aws_auth" {
|
||||
content = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
filename = "${var.config_output_path}/config-map-aws-auth.yaml"
|
||||
count = "${var.configure_kubectl_session ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "null_resource" "configure_kubectl" {
|
||||
provisioner "local-exec" {
|
||||
command = "kubectl apply -f ${var.config_output_path}/config-map-aws-auth.yaml --kubeconfig ${var.config_output_path}/kubeconfig"
|
||||
}
|
||||
|
||||
triggers {
|
||||
config_map_rendered = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
kubeconfig_rendered = "${data.template_file.kubeconfig.rendered}"
|
||||
}
|
||||
|
||||
count = "${var.configure_kubectl_session ? 1 : 0}"
|
||||
}
|
||||
provider "http" {}
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
data "aws_ami" "eks_worker" {
|
||||
filter {
|
||||
name = "name"
|
||||
values = ["eks-worker-*"]
|
||||
}
|
||||
|
||||
most_recent = true
|
||||
owners = ["602401143452"] # Amazon
|
||||
}
|
||||
@@ -1,170 +0,0 @@
|
||||
locals {
|
||||
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
|
||||
|
||||
# Mapping from the node type that we selected and the max number of pods that it can run
|
||||
# Taken from https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
|
||||
max_pod_per_node = {
|
||||
c4.large = 29
|
||||
c4.xlarge = 58
|
||||
c4.2xlarge = 58
|
||||
c4.4xlarge = 234
|
||||
c4.8xlarge = 234
|
||||
c5.large = 29
|
||||
c5.xlarge = 58
|
||||
c5.2xlarge = 58
|
||||
c5.4xlarge = 234
|
||||
c5.9xlarge = 234
|
||||
c5.18xlarge = 737
|
||||
i3.large = 29
|
||||
i3.xlarge = 58
|
||||
i3.2xlarge = 58
|
||||
i3.4xlarge = 234
|
||||
i3.8xlarge = 234
|
||||
i3.16xlarge = 737
|
||||
m3.medium = 12
|
||||
m3.large = 29
|
||||
m3.xlarge = 58
|
||||
m3.2xlarge = 118
|
||||
m4.large = 20
|
||||
m4.xlarge = 58
|
||||
m4.2xlarge = 58
|
||||
m4.4xlarge = 234
|
||||
m4.10xlarge = 234
|
||||
m5.large = 29
|
||||
m5.xlarge = 58
|
||||
m5.2xlarge = 58
|
||||
m5.4xlarge = 234
|
||||
m5.12xlarge = 234
|
||||
m5.24xlarge = 737
|
||||
p2.xlarge = 58
|
||||
p2.8xlarge = 234
|
||||
p2.16xlarge = 234
|
||||
p3.2xlarge = 58
|
||||
p3.8xlarge = 234
|
||||
p3.16xlarge = 234
|
||||
r3.xlarge = 58
|
||||
r3.2xlarge = 58
|
||||
r3.4xlarge = 234
|
||||
r3.8xlarge = 234
|
||||
r4.large = 29
|
||||
r4.xlarge = 58
|
||||
r4.2xlarge = 58
|
||||
r4.4xlarge = 234
|
||||
r4.8xlarge = 234
|
||||
r4.16xlarge = 737
|
||||
t2.small = 8
|
||||
t2.medium = 17
|
||||
t2.large = 35
|
||||
t2.xlarge = 44
|
||||
t2.2xlarge = 44
|
||||
x1.16xlarge = 234
|
||||
x1.32xlarge = 234
|
||||
}
|
||||
|
||||
ebs_optimized_types = {
|
||||
"c4.large" = true
|
||||
"c4.xlarge" = true
|
||||
"c4.2xlarge" = true
|
||||
"c4.4xlarge" = true
|
||||
"c4.8xlarge" = true
|
||||
"c5.large" = true
|
||||
"c5.xlarge" = true
|
||||
"c5.2xlarge" = true
|
||||
"c5.4xlarge" = true
|
||||
"c5.9xlarge" = true
|
||||
"c5.18xlarge" = true
|
||||
"c5d.large" = true
|
||||
"c5d.xlarge" = true
|
||||
"c5d.2xlarge" = true
|
||||
"c5d.4xlarge" = true
|
||||
"c5d.9xlarge" = true
|
||||
"c5d.18xlarge" = true
|
||||
"d2.xlarge" = true
|
||||
"d2.2xlarge" = true
|
||||
"d2.4xlarge" = true
|
||||
"d2.8xlarge" = true
|
||||
"f1.2xlarge" = true
|
||||
"f1.16xlarge" = true
|
||||
"g3.4xlarge" = true
|
||||
"g3.8xlarge" = true
|
||||
"g3.16xlarge" = true
|
||||
"h1.2xlarge" = true
|
||||
"h1.4xlarge" = true
|
||||
"h1.8xlarge" = true
|
||||
"h1.16xlarge" = true
|
||||
"i3.large" = true
|
||||
"i3.xlarge" = true
|
||||
"i3.2xlarge" = true
|
||||
"i3.4xlarge" = true
|
||||
"i3.8xlarge" = true
|
||||
"i3.16xlarge" = true
|
||||
"i3.metal" = true
|
||||
"m4.large" = true
|
||||
"m4.xlarge" = true
|
||||
"m4.2xlarge" = true
|
||||
"m4.4xlarge" = true
|
||||
"m4.10xlarge" = true
|
||||
"m4.16xlarge" = true
|
||||
"m5.large" = true
|
||||
"m5.xlarge" = true
|
||||
"m5.2xlarge" = true
|
||||
"m5.4xlarge" = true
|
||||
"m5.12xlarge" = true
|
||||
"m5.24xlarge" = true
|
||||
"m5d.large" = true
|
||||
"m5d.xlarge" = true
|
||||
"m5d.2xlarge" = true
|
||||
"m5d.4xlarge" = true
|
||||
"m5d.12xlarge" = true
|
||||
"m5d.24xlarge" = true
|
||||
"p2.xlarge" = true
|
||||
"p2.8xlarge" = true
|
||||
"p2.16xlarge" = true
|
||||
"p3.2xlarge" = true
|
||||
"p3.8xlarge" = true
|
||||
"p3.16xlarge" = true
|
||||
"r4.large" = true
|
||||
"r4.xlarge" = true
|
||||
"r4.2xlarge" = true
|
||||
"r4.4xlarge" = true
|
||||
"r4.8xlarge" = true
|
||||
"r4.16xlarge" = true
|
||||
"x1.16xlarge" = true
|
||||
"x1.32xlarge" = true
|
||||
"x1e.xlarge" = true
|
||||
"x1e.2xlarge" = true
|
||||
"x1e.4xlarge" = true
|
||||
"x1e.8xlarge" = true
|
||||
"x1e.16xlarge" = true
|
||||
"x1e.32xlarge" = true
|
||||
"c5.large" = true
|
||||
"c5.xlarge" = true
|
||||
"c5.2xlarge" = true
|
||||
"c5d.large" = true
|
||||
"c5d.xlarge" = true
|
||||
"c5d.2xlarge" = true
|
||||
"m5.large" = true
|
||||
"m5.xlarge" = true
|
||||
"m5.2xlarge" = true
|
||||
"m5d.large" = true
|
||||
"m5d.xlarge" = true
|
||||
"m5d.2xlarge" = true
|
||||
"c1.xlarge" = true
|
||||
"c3.xlarge" = true
|
||||
"c3.2xlarge" = true
|
||||
"c3.4xlarge" = true
|
||||
"g2.2xlarge" = true
|
||||
"i2.xlarge" = true
|
||||
"i2.2xlarge" = true
|
||||
"i2.4xlarge" = true
|
||||
"m1.large" = true
|
||||
"m1.xlarge" = true
|
||||
"m2.2xlarge" = true
|
||||
"m2.4xlarge" = true
|
||||
"m3.xlarge" = true
|
||||
"m3.2xlarge" = true
|
||||
"r3.xlarge" = true
|
||||
"r3.2xlarge" = true
|
||||
"r3.4xlarge" = true
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
resource "aws_autoscaling_group" "workers" {
|
||||
name_prefix = "${lookup(var.worker_groups[count.index], "name")}.${var.cluster_name}"
|
||||
launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}"
|
||||
desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity")}"
|
||||
max_size = "${lookup(var.worker_groups[count.index], "asg_max_size")}"
|
||||
min_size = "${lookup(var.worker_groups[count.index], "asg_min_size")}"
|
||||
vpc_zone_identifier = ["${var.subnets}"]
|
||||
count = "${length(var.worker_groups)}"
|
||||
|
||||
tags = ["${concat(
|
||||
list(
|
||||
map("key", "Name", "value", "${lookup(var.worker_groups[count.index], "name")}.${var.cluster_name}-eks_asg", "propagate_at_launch", true),
|
||||
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
|
||||
),
|
||||
local.asg_tags)
|
||||
}"]
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "workers" {
|
||||
name_prefix = "${lookup(var.worker_groups[count.index], "name")}.${lookup(var.worker_groups[count.index], "name")}.${var.cluster_name}"
|
||||
associate_public_ip_address = true
|
||||
iam_instance_profile = "${var.iam_instance_profile}"
|
||||
image_id = "${lookup(var.worker_groups[count.index], "ami_id") == "" ? data.aws_ami.eks_worker.id : lookup(var.worker_groups[count.index], "ami_id")}"
|
||||
instance_type = "${lookup(var.worker_groups[count.index], "instance_type")}"
|
||||
security_groups = ["${var.security_group_id}"]
|
||||
user_data_base64 = "${base64encode(element(data.template_file.userdata.*.rendered, count.index))}"
|
||||
ebs_optimized = "${var.ebs_optimized_workers ? lookup(local.ebs_optimized_types, lookup(var.worker_groups[count.index], "instance_type"), false) : false}"
|
||||
count = "${length(var.worker_groups)}"
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
root_block_device {
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
data template_file userdata {
|
||||
template = "${file("${path.module}/templates/userdata.sh.tpl")}"
|
||||
count = "${length(var.worker_groups)}"
|
||||
|
||||
vars {
|
||||
region = "${var.aws_region}"
|
||||
max_pod_count = "${lookup(local.max_pod_per_node, lookup(var.worker_groups[count.index], "instance_type"))}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
endpoint = "${var.endpoint}"
|
||||
cluster_auth_base64 = "${var.certificate_authority}"
|
||||
additional_userdata = "${var.additional_userdata}"
|
||||
}
|
||||
}
|
||||
|
||||
resource "null_resource" "tags_as_list_of_maps" {
|
||||
count = "${length(keys(var.tags))}"
|
||||
|
||||
triggers = "${map(
|
||||
"key", "${element(keys(var.tags), count.index)}",
|
||||
"value", "${element(values(var.tags), count.index)}",
|
||||
"propagate_at_launch", "true"
|
||||
)}"
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
variable "additional_userdata" {
|
||||
description = "Extra lines of userdata (bash) which are appended to the default userdata code."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "aws_region" {
|
||||
description = "The AWS region where the cluster resides."
|
||||
}
|
||||
|
||||
variable "certificate_authority" {
|
||||
description = "Base64 encoded certificate authority of the cluster."
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster which is also used as a prefix in names of related resources."
|
||||
}
|
||||
|
||||
variable "ebs_optimized_workers" {
|
||||
description = "If left at default of true, will use ebs optimization if available on the given instance type."
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "endpoint" {
|
||||
description = "API endpoint of the cluster."
|
||||
}
|
||||
|
||||
variable "iam_instance_profile" {
|
||||
description = "Worker IAM instance profile name."
|
||||
}
|
||||
|
||||
variable "security_group_id" {
|
||||
description = "Worker security group ID."
|
||||
}
|
||||
|
||||
variable "subnets" {
|
||||
description = "A list of subnets to associate with the cluster's underlying instances."
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "tags" {
|
||||
description = "A map of tags to add to all resources."
|
||||
default = {}
|
||||
}
|
||||
|
||||
variable "workers_ami_id" {
|
||||
description = "AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "worker_groups" {
|
||||
description = "A list of maps defining worker group configurations."
|
||||
type = "list"
|
||||
|
||||
default = [
|
||||
{
|
||||
name = "nodes" # Name of the worker group.
|
||||
ami_id = "" # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI.
|
||||
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group.
|
||||
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
|
||||
asg_min_size = "1" # Minimum worker capacity in the autoscaling group.
|
||||
instance_type = "m4.large" # Size of the workers instances.
|
||||
},
|
||||
]
|
||||
}
|
||||
37
outputs.tf
37
outputs.tf
@@ -1,15 +1,5 @@
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this cluster."
|
||||
value = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
description = "kubectl config file contents for this cluster."
|
||||
value = "${data.template_file.kubeconfig.rendered}"
|
||||
}
|
||||
|
||||
output "cluster_id" {
|
||||
description = "The name/id of the cluster."
|
||||
description = "The name/id of the EKS cluster."
|
||||
value = "${aws_eks_cluster.this.id}"
|
||||
}
|
||||
|
||||
@@ -25,16 +15,31 @@ output "cluster_certificate_authority_data" {
|
||||
}
|
||||
|
||||
output "cluster_endpoint" {
|
||||
description = "The endpoint for your Kubernetes API server."
|
||||
description = "The endpoint for your EKS Kubernetes API."
|
||||
value = "${aws_eks_cluster.this.endpoint}"
|
||||
}
|
||||
|
||||
output "cluster_version" {
|
||||
description = "The Kubernetes server version for the cluster."
|
||||
description = "The Kubernetes server version for the EKS cluster."
|
||||
value = "${aws_eks_cluster.this.version}"
|
||||
}
|
||||
|
||||
output "cluster_security_group_ids" {
|
||||
description = "description"
|
||||
value = "${aws_eks_cluster.this.vpc_config.0.security_group_ids}"
|
||||
output "cluster_security_group_id" {
|
||||
description = "Security group ID attached to the EKS cluster."
|
||||
value = "${local.cluster_security_group_id}"
|
||||
}
|
||||
|
||||
output "config_map_aws_auth" {
|
||||
description = "A kubernetes configuration to authenticate to this EKS cluster."
|
||||
value = "${data.template_file.config_map_aws_auth.rendered}"
|
||||
}
|
||||
|
||||
output "kubeconfig" {
|
||||
description = "kubectl config file contents for this EKS cluster."
|
||||
value = "${data.template_file.kubeconfig.rendered}"
|
||||
}
|
||||
|
||||
output "worker_security_group_id" {
|
||||
description = "Security group ID attached to the EKS workers."
|
||||
value = "${local.worker_security_group_id}"
|
||||
}
|
||||
|
||||
66
variables.tf
66
variables.tf
@@ -1,19 +1,14 @@
|
||||
variable "additional_userdata" {
|
||||
description = "Extra lines of userdata (bash) which are appended to the default userdata code."
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster. Also used as a prefix in names of related resources."
|
||||
}
|
||||
|
||||
variable "cluster_security_group_id" {
|
||||
description = "If provided, the EKS cluster will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the workers and provide API access to your current IP/32."
|
||||
default = ""
|
||||
}
|
||||
|
||||
variable "cluster_ingress_cidrs" {
|
||||
description = "The CIDRs from which we can execute kubectl commands."
|
||||
type = "list"
|
||||
}
|
||||
|
||||
variable "cluster_name" {
|
||||
description = "Name of the EKS cluster which is also used as a prefix in names of related resources."
|
||||
}
|
||||
|
||||
variable "cluster_version" {
|
||||
description = "Kubernetes version to use for the cluster."
|
||||
description = "Kubernetes version to use for the EKS cluster."
|
||||
default = "1.10"
|
||||
}
|
||||
|
||||
@@ -23,17 +18,12 @@ variable "config_output_path" {
|
||||
}
|
||||
|
||||
variable "configure_kubectl_session" {
|
||||
description = "Configure the current session's kubectl to use the instantiated cluster."
|
||||
default = false
|
||||
}
|
||||
|
||||
variable "ebs_optimized_workers" {
|
||||
description = "If left at default of true, will use ebs optimization if available on the given instance type."
|
||||
description = "Configure the current session's kubectl to use the instantiated EKS cluster."
|
||||
default = true
|
||||
}
|
||||
|
||||
variable "subnets" {
|
||||
description = "A list of subnets to associate with the cluster's underlying instances."
|
||||
description = "A list of subnets to place the EKS cluster and workers within."
|
||||
type = "list"
|
||||
}
|
||||
|
||||
@@ -43,21 +33,35 @@ variable "tags" {
|
||||
}
|
||||
|
||||
variable "vpc_id" {
|
||||
description = "VPC id where the cluster and other resources will be deployed."
|
||||
description = "VPC where the cluster and workers will be deployed."
|
||||
}
|
||||
|
||||
variable "worker_groups" {
|
||||
description = "A list of maps defining worker group configurations."
|
||||
description = "A list of maps defining worker group configurations. See workers_group_defaults for valid keys."
|
||||
type = "list"
|
||||
|
||||
default = [
|
||||
{
|
||||
name = "nodes" # Name of the worker group.
|
||||
ami_id = "" # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI.
|
||||
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group.
|
||||
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
|
||||
asg_min_size = "1" # Minimum worker capacity in the autoscaling group.
|
||||
instance_type = "m4.large" # Size of the workers instances.
|
||||
},
|
||||
]
|
||||
default = [{
|
||||
"name" = "default"
|
||||
}]
|
||||
}
|
||||
|
||||
variable "workers_group_defaults" {
|
||||
description = "Default values for target groups as defined by the list of maps."
|
||||
type = "map"
|
||||
|
||||
default = {
|
||||
name = "count.index" # Name of the worker group. Literal count.index will never be used but if name is not set, the count.index interpolation will be used.
|
||||
ami_id = "" # AMI ID for the eks workers. If none is provided, Terraform will search for the latest version of their EKS optimized worker AMI.
|
||||
asg_desired_capacity = "1" # Desired worker capacity in the autoscaling group.
|
||||
asg_max_size = "3" # Maximum worker capacity in the autoscaling group.
|
||||
asg_min_size = "1" # Minimum worker capacity in the autoscaling group.
|
||||
instance_type = "m4.large" # Size of the workers instances.
|
||||
additional_userdata = "" # userdata to append to the default userdata.
|
||||
ebs_optimized = true # sets whether to use ebs optimization on supported types.
|
||||
}
|
||||
}
|
||||
|
||||
variable "worker_security_group_id" {
|
||||
description = "If provided, all workers will be attached to this security group. If not given, a security group will be created with necessary ingres/egress to work with the EKS cluster."
|
||||
default = ""
|
||||
}
|
||||
|
||||
63
workers.tf
63
workers.tf
@@ -1,20 +1,46 @@
|
||||
module "worker_groups" {
|
||||
source = "./modules/worker_groups"
|
||||
aws_region = "{data.aws_region.current.name}"
|
||||
cluster_name = "${var.cluster_name}"
|
||||
certificate_authority = "${aws_eks_cluster.this.certificate_authority.0.data}"
|
||||
endpoint = "${aws_eks_cluster.this.endpoint}"
|
||||
iam_instance_profile = "${aws_iam_instance_profile.workers.name}"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
subnets = "${var.subnets}"
|
||||
tags = "${var.tags}"
|
||||
worker_groups = "${var.worker_groups}"
|
||||
resource "aws_autoscaling_group" "workers" {
|
||||
name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}"
|
||||
desired_capacity = "${lookup(var.worker_groups[count.index], "asg_desired_capacity", lookup(var.workers_group_defaults, "asg_desired_capacity"))}"
|
||||
max_size = "${lookup(var.worker_groups[count.index], "asg_max_size",lookup(var.workers_group_defaults, "asg_max_size"))}"
|
||||
min_size = "${lookup(var.worker_groups[count.index], "asg_min_size",lookup(var.workers_group_defaults, "asg_min_size"))}"
|
||||
launch_configuration = "${element(aws_launch_configuration.workers.*.id, count.index)}"
|
||||
vpc_zone_identifier = ["${var.subnets}"]
|
||||
count = "${length(var.worker_groups)}"
|
||||
|
||||
tags = ["${concat(
|
||||
list(
|
||||
map("key", "Name", "value", "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-eks_asg", "propagate_at_launch", true),
|
||||
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
|
||||
),
|
||||
local.asg_tags)
|
||||
}"]
|
||||
}
|
||||
|
||||
resource "aws_launch_configuration" "workers" {
|
||||
name_prefix = "${var.cluster_name}-${lookup(var.worker_groups[count.index], "name", count.index)}-"
|
||||
associate_public_ip_address = true
|
||||
security_groups = ["${local.worker_security_group_id}"]
|
||||
iam_instance_profile = "${aws_iam_instance_profile.workers.id}"
|
||||
image_id = "${lookup(var.worker_groups[count.index], "ami_id", data.aws_ami.eks_worker.id)}"
|
||||
instance_type = "${lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type"))}"
|
||||
user_data_base64 = "${base64encode(element(data.template_file.userdata.*.rendered, count.index))}"
|
||||
ebs_optimized = "${lookup(var.worker_groups[count.index], "ebs_optimized", lookup(local.ebs_optimized_types, lookup(var.worker_groups[count.index], "instance_type", lookup(var.workers_group_defaults, "instance_type")), false))}"
|
||||
count = "${length(var.worker_groups)}"
|
||||
|
||||
lifecycle {
|
||||
create_before_destroy = true
|
||||
}
|
||||
|
||||
root_block_device {
|
||||
delete_on_termination = true
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_security_group" "workers" {
|
||||
name_prefix = "${var.cluster_name}"
|
||||
description = "Security group for all nodes in the cluster."
|
||||
vpc_id = "${var.vpc_id}"
|
||||
count = "${var.worker_security_group_id == "" ? 1 : 0}"
|
||||
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
|
||||
))}"
|
||||
}
|
||||
@@ -27,6 +53,7 @@ resource "aws_security_group_rule" "workers_egress_internet" {
|
||||
from_port = 0
|
||||
to_port = 0
|
||||
type = "egress"
|
||||
count = "${var.worker_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_ingress_self" {
|
||||
@@ -37,16 +64,18 @@ resource "aws_security_group_rule" "workers_ingress_self" {
|
||||
from_port = 0
|
||||
to_port = 65535
|
||||
type = "ingress"
|
||||
count = "${var.worker_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_security_group_rule" "workers_ingress_cluster" {
|
||||
description = "Allow workers Kubelets and pods to receive communication from the cluster control plane."
|
||||
protocol = "tcp"
|
||||
security_group_id = "${aws_security_group.workers.id}"
|
||||
source_security_group_id = "${aws_security_group.cluster.id}"
|
||||
source_security_group_id = "${local.cluster_security_group_id}"
|
||||
from_port = 1025
|
||||
to_port = 65535
|
||||
type = "ingress"
|
||||
count = "${var.worker_security_group_id == "" ? 1 : 0}"
|
||||
}
|
||||
|
||||
resource "aws_iam_role" "workers" {
|
||||
@@ -73,3 +102,13 @@ resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryRea
|
||||
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
|
||||
role = "${aws_iam_role.workers.name}"
|
||||
}
|
||||
|
||||
resource "null_resource" "tags_as_list_of_maps" {
|
||||
count = "${length(keys(var.tags))}"
|
||||
|
||||
triggers = "${map(
|
||||
"key", "${element(keys(var.tags), count.index)}",
|
||||
"value", "${element(values(var.tags), count.index)}",
|
||||
"propagate_at_launch", "true"
|
||||
)}"
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user