somehow missed fmt

This commit is contained in:
brandoconnor
2018-06-06 22:32:15 -07:00
parent 5082ba17d6
commit 6a137f751e
3 changed files with 272 additions and 272 deletions

View File

@@ -1,67 +1,67 @@
resource "aws_eks_cluster" "this" { resource "aws_eks_cluster" "this" {
name = "${var.cluster_name}" name = "${var.cluster_name}"
role_arn = "${aws_iam_role.cluster.arn}" role_arn = "${aws_iam_role.cluster.arn}"
version = "${var.cluster_version}" version = "${var.cluster_version}"
vpc_config { vpc_config {
security_group_ids = ["${aws_security_group.cluster.id}"] security_group_ids = ["${aws_security_group.cluster.id}"]
subnet_ids = ["${var.subnets}"] subnet_ids = ["${var.subnets}"]
} }
depends_on = [ depends_on = [
"aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy", "aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy",
"aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy", "aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy",
] ]
} }
resource "aws_security_group" "cluster" { resource "aws_security_group" "cluster" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
description = "Cluster communication with workers nodes" description = "Cluster communication with workers nodes"
vpc_id = "${var.vpc_id}" vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}" tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_cluster_sg"))}"
} }
resource "aws_security_group_rule" "cluster_egress_internet" { resource "aws_security_group_rule" "cluster_egress_internet" {
description = "Allow cluster egress to the Internet." description = "Allow cluster egress to the Internet."
protocol = "-1" protocol = "-1"
security_group_id = "${aws_security_group.cluster.id}" security_group_id = "${aws_security_group.cluster.id}"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
from_port = 0 from_port = 0
to_port = 0 to_port = 0
type = "egress" type = "egress"
} }
resource "aws_security_group_rule" "cluster_https_worker_ingress" { resource "aws_security_group_rule" "cluster_https_worker_ingress" {
description = "Allow pods to communicate with the cluster API Server." description = "Allow pods to communicate with the cluster API Server."
protocol = "tcp" protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}" security_group_id = "${aws_security_group.cluster.id}"
source_security_group_id = "${aws_security_group.workers.id}" source_security_group_id = "${aws_security_group.workers.id}"
from_port = 443 from_port = 443
to_port = 443 to_port = 443
type = "ingress" type = "ingress"
} }
resource "aws_security_group_rule" "cluster_https_cidr_ingress" { resource "aws_security_group_rule" "cluster_https_cidr_ingress" {
cidr_blocks = ["${var.cluster_ingress_cidrs}"] cidr_blocks = ["${var.cluster_ingress_cidrs}"]
description = "Allow communication with the cluster API Server." description = "Allow communication with the cluster API Server."
protocol = "tcp" protocol = "tcp"
security_group_id = "${aws_security_group.cluster.id}" security_group_id = "${aws_security_group.cluster.id}"
from_port = 443 from_port = 443
to_port = 443 to_port = 443
type = "ingress" type = "ingress"
} }
resource "aws_iam_role" "cluster" { resource "aws_iam_role" "cluster" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}" assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}"
} }
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" { resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy" policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.cluster.name}" role = "${aws_iam_role.cluster.name}"
} }
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" { resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = "${aws_iam_role.cluster.name}" role = "${aws_iam_role.cluster.name}"
} }

224
data.tf
View File

@@ -1,112 +1,112 @@
data "aws_region" "current" {} data "aws_region" "current" {}
data "aws_iam_policy_document" "workers_assume_role_policy" { data "aws_iam_policy_document" "workers_assume_role_policy" {
statement { statement {
sid = "EKSWorkerAssumeRole" sid = "EKSWorkerAssumeRole"
actions = [ actions = [
"sts:AssumeRole", "sts:AssumeRole",
] ]
principals { principals {
type = "Service" type = "Service"
identifiers = ["ec2.amazonaws.com"] identifiers = ["ec2.amazonaws.com"]
} }
} }
} }
data "aws_iam_policy_document" "cluster_assume_role_policy" { data "aws_iam_policy_document" "cluster_assume_role_policy" {
statement { statement {
sid = "EKSClusterAssumeRole" sid = "EKSClusterAssumeRole"
actions = [ actions = [
"sts:AssumeRole", "sts:AssumeRole",
] ]
principals { principals {
type = "Service" type = "Service"
identifiers = ["eks.amazonaws.com"] identifiers = ["eks.amazonaws.com"]
} }
} }
} }
resource "null_resource" "tags_as_list_of_maps" { resource "null_resource" "tags_as_list_of_maps" {
count = "${length(keys(var.tags))}" count = "${length(keys(var.tags))}"
triggers = "${map( triggers = "${map(
"key", "${element(keys(var.tags), count.index)}", "key", "${element(keys(var.tags), count.index)}",
"value", "${element(values(var.tags), count.index)}", "value", "${element(values(var.tags), count.index)}",
"propagate_at_launch", "true" "propagate_at_launch", "true"
)}" )}"
} }
locals { locals {
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"] asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml # More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
workers_userdata = <<USERDATA workers_userdata = <<USERDATA
#!/bin/bash -xe #!/bin/bash -xe
CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki CA_CERTIFICATE_DIRECTORY=/etc/kubernetes/pki
CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt CA_CERTIFICATE_FILE_PATH=$CA_CERTIFICATE_DIRECTORY/ca.crt
mkdir -p $CA_CERTIFICATE_DIRECTORY mkdir -p $CA_CERTIFICATE_DIRECTORY
echo "${aws_eks_cluster.this.certificate_authority.0.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH echo "${aws_eks_cluster.this.certificate_authority.0.data}" | base64 -d > $CA_CERTIFICATE_FILE_PATH
INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4)
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /var/lib/kubelet/kubeconfig sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /var/lib/kubelet/kubeconfig
sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig
sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service
sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service
sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /etc/systemd/system/kubelet.service sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /etc/systemd/system/kubelet.service
sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service
DNS_CLUSTER_IP=10.100.0.10 DNS_CLUSTER_IP=10.100.0.10
if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi
sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service
sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig
sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service
systemctl daemon-reload systemctl daemon-reload
systemctl restart kubelet kube-proxy systemctl restart kubelet kube-proxy
USERDATA USERDATA
config_map_aws_auth = <<CONFIGMAPAWSAUTH config_map_aws_auth = <<CONFIGMAPAWSAUTH
apiVersion: v1 apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: aws-auth name: aws-auth
namespace: kube-system namespace: kube-system
data: data:
mapRoles: | mapRoles: |
- rolearn: ${aws_iam_role.workers.arn} - rolearn: ${aws_iam_role.workers.arn}
username: system:node:{{EC2PrivateDNSName}} username: system:node:{{EC2PrivateDNSName}}
groups: groups:
- system:bootstrappers - system:bootstrappers
- system:nodes - system:nodes
CONFIGMAPAWSAUTH CONFIGMAPAWSAUTH
kubeconfig = <<KUBECONFIG kubeconfig = <<KUBECONFIG
apiVersion: v1 apiVersion: v1
clusters: clusters:
- cluster: - cluster:
server: ${aws_eks_cluster.this.endpoint} server: ${aws_eks_cluster.this.endpoint}
certificate-authority-data: ${aws_eks_cluster.this.certificate_authority.0.data} certificate-authority-data: ${aws_eks_cluster.this.certificate_authority.0.data}
name: kubernetes name: kubernetes
contexts: contexts:
- context: - context:
cluster: kubernetes cluster: kubernetes
user: aws user: aws
name: aws name: aws
current-context: aws current-context: aws
kind: Config kind: Config
preferences: {} preferences: {}
users: users:
- name: aws - name: aws
user: user:
exec: exec:
apiVersion: client.authentication.k8s.io/v1alpha1 apiVersion: client.authentication.k8s.io/v1alpha1
command: heptio-authenticator-aws command: heptio-authenticator-aws
args: args:
- "token" - "token"
- "-i" - "-i"
- "${var.cluster_name}" - "${var.cluster_name}"
KUBECONFIG KUBECONFIG
} }

View File

@@ -1,93 +1,93 @@
resource "aws_autoscaling_group" "workers" { resource "aws_autoscaling_group" "workers" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
launch_configuration = "${aws_launch_configuration.workers.id}" launch_configuration = "${aws_launch_configuration.workers.id}"
desired_capacity = "${var.workers_asg_desired_capacity}" desired_capacity = "${var.workers_asg_desired_capacity}"
max_size = "${var.workers_asg_max_size}" max_size = "${var.workers_asg_max_size}"
min_size = "${var.workers_asg_min_size}" min_size = "${var.workers_asg_min_size}"
vpc_zone_identifier = ["${var.subnets}"] vpc_zone_identifier = ["${var.subnets}"]
tags = ["${concat( tags = ["${concat(
list( list(
map("key", "Name", "value", "${var.cluster_name}-eks_asg", "propagate_at_launch", true), map("key", "Name", "value", "${var.cluster_name}-eks_asg", "propagate_at_launch", true),
map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true), map("key", "kubernetes.io/cluster/${var.cluster_name}", "value", "owned", "propagate_at_launch", true),
), ),
local.asg_tags) local.asg_tags)
}"] }"]
} }
resource "aws_launch_configuration" "workers" { resource "aws_launch_configuration" "workers" {
associate_public_ip_address = true associate_public_ip_address = true
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
iam_instance_profile = "${aws_iam_instance_profile.workers.name}" iam_instance_profile = "${aws_iam_instance_profile.workers.name}"
image_id = "${var.workers_ami_id}" image_id = "${var.workers_ami_id}"
instance_type = "${var.workers_instance_type}" instance_type = "${var.workers_instance_type}"
security_groups = ["${aws_security_group.workers.id}"] security_groups = ["${aws_security_group.workers.id}"]
user_data_base64 = "${base64encode(local.workers_userdata)}" user_data_base64 = "${base64encode(local.workers_userdata)}"
lifecycle { lifecycle {
create_before_destroy = true create_before_destroy = true
} }
} }
resource "aws_security_group" "workers" { resource "aws_security_group" "workers" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
description = "Security group for all nodes in the cluster." description = "Security group for all nodes in the cluster."
vpc_id = "${var.vpc_id}" vpc_id = "${var.vpc_id}"
tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned" tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg", "kubernetes.io/cluster/${var.cluster_name}", "owned"
))}" ))}"
} }
resource "aws_security_group_rule" "workers_egress_internet" { resource "aws_security_group_rule" "workers_egress_internet" {
description = "Allow nodes all egress to the Internet." description = "Allow nodes all egress to the Internet."
protocol = "-1" protocol = "-1"
security_group_id = "${aws_security_group.workers.id}" security_group_id = "${aws_security_group.workers.id}"
cidr_blocks = ["0.0.0.0/0"] cidr_blocks = ["0.0.0.0/0"]
from_port = 0 from_port = 0
to_port = 0 to_port = 0
type = "egress" type = "egress"
} }
resource "aws_security_group_rule" "workers_ingress_self" { resource "aws_security_group_rule" "workers_ingress_self" {
description = "Allow node to communicate with each other." description = "Allow node to communicate with each other."
protocol = "-1" protocol = "-1"
security_group_id = "${aws_security_group.workers.id}" security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.workers.id}" source_security_group_id = "${aws_security_group.workers.id}"
from_port = 0 from_port = 0
to_port = 65535 to_port = 65535
type = "ingress" type = "ingress"
} }
resource "aws_security_group_rule" "workers_ingress_cluster" { resource "aws_security_group_rule" "workers_ingress_cluster" {
description = "Allow workers Kubelets and pods to receive communication from the cluster control plane." description = "Allow workers Kubelets and pods to receive communication from the cluster control plane."
protocol = "tcp" protocol = "tcp"
security_group_id = "${aws_security_group.workers.id}" security_group_id = "${aws_security_group.workers.id}"
source_security_group_id = "${aws_security_group.cluster.id}" source_security_group_id = "${aws_security_group.cluster.id}"
from_port = 1025 from_port = 1025
to_port = 65535 to_port = 65535
type = "ingress" type = "ingress"
} }
resource "aws_iam_role" "workers" { resource "aws_iam_role" "workers" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}" assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}"
} }
resource "aws_iam_instance_profile" "workers" { resource "aws_iam_instance_profile" "workers" {
name_prefix = "${var.cluster_name}" name_prefix = "${var.cluster_name}"
role = "${aws_iam_role.workers.name}" role = "${aws_iam_role.workers.name}"
} }
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" { resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy" policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.workers.name}" role = "${aws_iam_role.workers.name}"
} }
resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" { resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
role = "${aws_iam_role.workers.name}" role = "${aws_iam_role.workers.name}"
} }
resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" { resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
role = "${aws_iam_role.workers.name}" role = "${aws_iam_role.workers.name}"
} }