somehow missed fmt

This commit is contained in:
brandoconnor
2018-06-06 22:32:15 -07:00
parent 5082ba17d6
commit 6a137f751e
3 changed files with 272 additions and 272 deletions

View File

@@ -1,67 +1,67 @@
resource "aws_eks_cluster" "this" {
name = "${var.cluster_name}"
role_arn = "${aws_iam_role.cluster.arn}"
version = "${var.cluster_version}"
vpc_config {
security_group_ids = ["${aws_security_group.cluster.id}"]
subnet_ids = ["${var.subnets}"]
}
depends_on = [
"aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy",
"aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy",
]
}
resource "aws_security_group" "cluster" {
name_prefix = "${var.cluster_name}"
description = "Cluster communication with workers nodes"
vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}"
}
resource "aws_security_group_rule" "cluster_egress_internet" {
description = "Allow cluster egress to the Internet."
protocol = "-1"
security_group_id = "${aws_security_group.cluster.id}"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
description = "Allow pods to communicate with the cluster API Server."
protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}"
source_security_group_id = "${aws_security_group.workers.id}"
from_port = 443
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "cluster_https_cidr_ingress" {
cidr_blocks = ["${var.cluster_ingress_cidrs}"]
description = "Allow communication with the cluster API Server."
protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}"
from_port = 443
to_port = 443
type = "ingress"
}
resource "aws_iam_role" "cluster" {
name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}"
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.cluster.name}"
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.cluster.name}"
}
resource "aws_eks_cluster" "this" {
name = "${var.cluster_name}"
role_arn = "${aws_iam_role.cluster.arn}"
version = "${var.cluster_version}"
vpc_config {
security_group_ids = ["${aws_security_group.cluster.id}"]
subnet_ids = ["${var.subnets}"]
}
depends_on = [
"aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy",
"aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy",
]
}
resource "aws_security_group" "cluster" {
name_prefix = "${var.cluster_name}"
description = "Cluster communication with workers nodes"
vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}"
}
resource "aws_security_group_rule" "cluster_egress_internet" {
description = "Allow cluster egress to the Internet."
protocol = "-1"
security_group_id = "${aws_security_group.cluster.id}"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
description = "Allow pods to communicate with the cluster API Server."
protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}"
source_security_group_id = "${aws_security_group.workers.id}"
from_port = 443
to_port = 443
type = "ingress"
}
resource "aws_security_group_rule" "cluster_https_cidr_ingress" {
cidr_blocks = ["${var.cluster_ingress_cidrs}"]
description = "Allow communication with the cluster API Server."
protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}"
from_port = 443
to_port = 443
type = "ingress"
}
resource "aws_iam_role" "cluster" {
name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}"
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.cluster.name}"
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.cluster.name}"
}

224
data.tf
View File

@@ -1,112 +1,112 @@
data "aws_region" "current" {}
data "aws_iam_policy_document" "workers_assume_role_policy" {
statement {
sid = "EKSWorkerAssumeRole"
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
data "aws_iam_policy_document" "cluster_assume_role_policy" {
statement {
sid = "EKSClusterAssumeRole"
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
resource "null_resource" "tags_as_list_of_maps" {
count = "${length(keys(var.tags))}"
triggers = "${map(
"key", "${element(keys(var.tags), count.index)}",
"value", "${element(values(var.tags), count.index)}",
"propagate_at_launch", "true"
)}"
}
locals {
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
workers_userdata = <<USERDATA
#!/bin/bash -xe
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${aws_eks_cluster.this.certificate_authority.0.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
systemctl daemon-reload
systemctl restart kubelet kube-proxy
USERDATA
config_map_aws_auth = <<CONFIGMAPAWSAUTH
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${aws_iam_role.workers.arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
CONFIGMAPAWSAUTH
kubeconfig = <<KUBECONFIG
apiVersion: v1
clusters:
- cluster:
server: ${aws_eks_cluster.this.endpoint}
certificate-authority-data: ${aws_eks_cluster.this.certificate_authority.0.data}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: aws
name: aws
current-context: aws
kind: Config
preferences: {}
users:
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: heptio-authenticator-aws
args:
- "token"
- "-i"
- "${var.cluster_name}"
KUBECONFIG
}
data "aws_region" "current" {}
data "aws_iam_policy_document" "workers_assume_role_policy" {
statement {
sid = "EKSWorkerAssumeRole"
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
data "aws_iam_policy_document" "cluster_assume_role_policy" {
statement {
sid = "EKSClusterAssumeRole"
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
resource "null_resource" "tags_as_list_of_maps" {
count = "${length(keys(var.tags))}"
triggers = "${map(
"key", "${element(keys(var.tags), count.index)}",
"value", "${element(values(var.tags), count.index)}",
"propagate_at_launch", "true"
)}"
}
locals {
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
workers_userdata = <<USERDATA
#!/bin/bash -xe
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${aws_eks_cluster.this.certificate_authority.0.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
systemctl daemon-reload
systemctl restart kubelet kube-proxy
USERDATA
config_map_aws_auth = <<CONFIGMAPAWSAUTH
apiVersion: v1
kind: ConfigMap
metadata:
name: aws-auth
namespace: kube-system
data:
mapRoles: |
- rolearn: ${aws_iam_role.workers.arn}
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
CONFIGMAPAWSAUTH
kubeconfig = <<KUBECONFIG
apiVersion: v1
clusters:
- cluster:
server: ${aws_eks_cluster.this.endpoint}
certificate-authority-data: ${aws_eks_cluster.this.certificate_authority.0.data}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: aws
name: aws
current-context: aws
kind: Config
preferences: {}
users:
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: heptio-authenticator-aws
args:
- "token"
- "-i"
- "${var.cluster_name}"
KUBECONFIG
}

View File

@@ -1,93 +1,93 @@
resource "aws_autoscaling_group" "workers" {
name_prefix = "${var.cluster_name}"
launch_configuration = "${aws_launch_configuration.workers.id}"
desired_capacity = "${var.workers_asg_desired_capacity}"
max_size = "${var.workers_asg_max_size}"
min_size = "${var.workers_asg_min_size}"
vpc_zone_identifier = ["${var.subnets}"]
tags = ["${concat(
list(
map("key", "Name", "value", "${var.cluster_name}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
),
local.asg_tags)
}"]
}
resource "aws_launch_configuration" "workers" {
associate_public_ip_address = true
name_prefix = "${var.cluster_name}"
iam_instance_profile = "${aws_iam_instance_profile.workers.name}"
image_id = "${var.workers_ami_id}"
instance_type = "${var.workers_instance_type}"
security_groups = ["${aws_security_group.workers.id}"]
user_data_base64 = "${base64encode(local.workers_userdata)}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "workers" {
name_prefix = "${var.cluster_name}"
description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
))}"
}
resource "aws_security_group_rule" "workers_egress_internet" {
description = "Allow nodes all egress to the Internet."
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "workers_ingress_self" {
description = "Allow node to communicate with each other."
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.workers.id}"
from_port = 0
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "workers_ingress_cluster" {
description = "Allow workers Kubelets and pods to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.cluster.id}"
from_port = 1025
to_port = 65535
type = "ingress"
}
resource "aws_iam_role" "workers" {
name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
}
resource "aws_iam_instance_profile" "workers" {
name_prefix = "${var.cluster_name}"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.workers.name}"
}
resource "aws_autoscaling_group" "workers" {
name_prefix = "${var.cluster_name}"
launch_configuration = "${aws_launch_configuration.workers.id}"
desired_capacity = "${var.workers_asg_desired_capacity}"
max_size = "${var.workers_asg_max_size}"
min_size = "${var.workers_asg_min_size}"
vpc_zone_identifier = ["${var.subnets}"]
tags = ["${concat(
list(
map("key", "Name", "value", "${var.cluster_name}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
),
local.asg_tags)
}"]
}
resource "aws_launch_configuration" "workers" {
associate_public_ip_address = true
name_prefix = "${var.cluster_name}"
iam_instance_profile = "${aws_iam_instance_profile.workers.name}"
image_id = "${var.workers_ami_id}"
instance_type = "${var.workers_instance_type}"
security_groups = ["${aws_security_group.workers.id}"]
user_data_base64 = "${base64encode(local.workers_userdata)}"
lifecycle {
create_before_destroy = true
}
}
resource "aws_security_group" "workers" {
name_prefix = "${var.cluster_name}"
description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
))}"
}
resource "aws_security_group_rule" "workers_egress_internet" {
description = "Allow nodes all egress to the Internet."
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "workers_ingress_self" {
description = "Allow node to communicate with each other."
protocol = "-1"
security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.workers.id}"
from_port = 0
to_port = 65535
type = "ingress"
}
resource "aws_security_group_rule" "workers_ingress_cluster" {
description = "Allow workers Kubelets and pods to receive communication from the cluster control plane."
protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.cluster.id}"
from_port = 1025
to_port = 65535
type = "ingress"
}
resource "aws_iam_role" "workers" {
name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
}
resource "aws_iam_instance_profile" "workers" {
name_prefix = "${var.cluster_name}"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.workers.name}"
}