From 309e7f70832270f7a40bcaf751380242515c2989 Mon Sep 17 00:00:00 2001 From: brandoconnor Date: Wed, 6 Jun 2018 20:55:44 -0700 Subject: [PATCH] testing initial work now --- README.md | 34 +++--- cluster.tf | 106 +++++++----------- data.tf | 62 ++++++++-- examples/eks_test_fixture/README.md | 68 +---------- examples/eks_test_fixture/data.tf | 21 ---- examples/eks_test_fixture/locals.tf | 7 -- examples/eks_test_fixture/main.tf | 55 ++++++--- examples/eks_test_fixture/outputs.tf | 5 + main.tf | 21 ++-- outputs.tf | 16 +-- variables.tf | 43 +++++-- worker_nodes.tf | 162 --------------------------- workers.tf | 85 ++++++++++++++ 13 files changed, 300 insertions(+), 385 deletions(-) delete mode 100644 examples/eks_test_fixture/data.tf delete mode 100644 examples/eks_test_fixture/locals.tf delete mode 100644 worker_nodes.tf create mode 100644 workers.tf diff --git a/README.md b/README.md index caf76e2..ecd047d 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ A terraform module to create a managed Kubernetes cluster on AWS EKS. Available through the [Terraform registry](https://registry.terraform.io/modules/terraform-aws-modules/eks/aws). +Inspired by and adapted from [this doc](https://www.terraform.io/docs/providers/aws/guides/eks-getting-started.html) +and its [source code](https://github.com/terraform-providers/terraform-provider-aws/tree/master/examples/eks-getting-started). | Branch | Build status | | ------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------- | @@ -13,8 +15,7 @@ through the [Terraform registry](https://registry.terraform.io/modules/terraform * You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS. It's recommended you use this module with [terraform-aws-vpc](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws), -[terraform-aws-security-group](https://registry.terraform.io/modules/terraform-aws-modules/security-group/aws), and -[terraform-aws-autoscaling](https://registry.terraform.io/modules/terraform-aws-modules/autoscaling/aws/). +and [terraform-aws-security-group](https://registry.terraform.io/modules/terraform-aws-modules/security-group/aws). ## Usage example @@ -22,13 +23,14 @@ A full example leveraging other community modules is contained in the [examples/ ```hcl module "eks" { - source = "terraform-aws-modules/eks/aws" - version = "0.1.0" - cluster_name = "test-eks-cluster" - security_groups = ["sg-edcd9784", "sg-edcd9785"] - subnets = ["subnet-abcde012", "subnet-bcde012a"] - tags = "${map("Environment", "test")}" - vpc_id = "vpc-abcde012" + source = "terraform-aws-modules/eks/aws" + version = "0.1.0" + cluster_name = "test-eks-cluster" + subnets = ["subnet-abcde012", "subnet-bcde012a"] + tags = "${map("Environment", "test")}" + vpc_id = "vpc-abcde012" + workers_ami_id = "ami-123456" + cluster_ingress_cidrs = ["24.18.23.91/32"] } ``` @@ -84,22 +86,26 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a | Name | Description | Type | Default | Required | |------|-------------|:----:|:-----:|:-----:| +| cluster_ingress_cidrs | The CIDRs from which we can execute kubectl commands. | list | - | yes | | cluster_name | Name of the EKS cluster. | string | - | yes | -| security_groups | The security groups to attach to the EKS cluster instances | list | - | yes | +| cluster_version | Kubernetes version to use for the cluster. | string | `1.10` | no | | subnets | A list of subnets to associate with the cluster's underlying instances. | list | - | yes | | tags | A map of tags to add to all resources | string | `` | no | | vpc_id | VPC id where the cluster and other resources will be deployed. | string | - | yes | +| workers_ami_id | AMI ID for the eks workers. | string | - | yes | +| workers_asg_desired_capacity | description | string | `1` | no | +| workers_asg_max_size | description | string | `3` | no | +| workers_asg_min_size | description | string | `1` | no | +| workers_instance_type | Size of the workers instances. | string | `m4.large` | no | ## Outputs | Name | Description | |------|-------------| -| cluster_arn | The Amazon Resource Name (ARN) of the cluster. | -| cluster_certificate_authority | Nested attribute containing certificate-authority-data for your cluster | -| cluster_data | The base64 encoded certificate data required to communicate with your cluster. Add this to the certificate-authority-data section of the kubeconfig file for your cluster. | +| cluster_certificate_authority_data | Nested attribute containing certificate-authority-data for your cluster. Tis is the base64 encoded certificate data required to communicate with your cluster. | | cluster_endpoint | The endpoint for your Kubernetes API server. | | cluster_id | The name of the cluster. | +| cluster_security_group_ids | description | | cluster_version | The Kubernetes server version for the cluster. | -| cluster_vpc_config | description | | config_map_aws_auth | description | | kubeconfig | description | diff --git a/cluster.tf b/cluster.tf index 2dc7572..4ad3f44 100644 --- a/cluster.tf +++ b/cluster.tf @@ -1,87 +1,67 @@ -# -# EKS Cluster Resources -# * IAM Role to allow EKS service to manage other AWS services -# * EC2 Security Group to allow networking traffic with EKS cluster -# * EKS Cluster -# - -resource "aws_eks_cluster" "demo" { +resource "aws_eks_cluster" "this" { name = "${var.cluster_name}" - role_arn = "${aws_iam_role.demo-cluster.arn}" + role_arn = "${aws_iam_role.cluster.arn}" + version = "${var.cluster_version}" vpc_config { - security_group_ids = ["${aws_security_group.demo-cluster.id}"] + security_group_ids = ["${aws_security_group.cluster.id}"] subnet_ids = ["${var.subnets}"] } depends_on = [ - "aws_iam_role_policy_attachment.demo-cluster-AmazonEKSClusterPolicy", - "aws_iam_role_policy_attachment.demo-cluster-AmazonEKSServicePolicy", + "aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy", + "aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy", ] } -resource "aws_iam_role" "demo-cluster" { - name = "terraform-eks-demo-cluster" - - assume_role_policy = < $CA_CERTIFICATE_FILE_PATH +INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) +sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /var/lib/kubelet/kubeconfig +sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig +sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service +sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service +sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.this.endpoint},g /etc/systemd/system/kubelet.service +sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service +DNS_CLUSTER_IP=10.100.0.10 +if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi +sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service +sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig +sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service +systemctl daemon-reload +systemctl restart kubelet kube-proxy +USERDATA config_map_aws_auth = < $CA_CERTIFICATE_FILE_PATH -INTERNAL_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) -sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.demo.endpoint},g /var/lib/kubelet/kubeconfig -sed -i s,CLUSTER_NAME,${var.cluster_name},g /var/lib/kubelet/kubeconfig -sed -i s,REGION,${data.aws_region.current.name},g /etc/systemd/system/kubelet.service -sed -i s,MAX_PODS,20,g /etc/systemd/system/kubelet.service -sed -i s,MASTER_ENDPOINT,${aws_eks_cluster.demo.endpoint},g /etc/systemd/system/kubelet.service -sed -i s,INTERNAL_IP,$INTERNAL_IP,g /etc/systemd/system/kubelet.service -DNS_CLUSTER_IP=10.100.0.10 -if [[ $INTERNAL_IP == 10.* ]] ; then DNS_CLUSTER_IP=172.20.0.10; fi -sed -i s,DNS_CLUSTER_IP,$DNS_CLUSTER_IP,g /etc/systemd/system/kubelet.service -sed -i s,CERTIFICATE_AUTHORITY_FILE,$CA_CERTIFICATE_FILE_PATH,g /var/lib/kubelet/kubeconfig -sed -i s,CLIENT_CA_FILE,$CA_CERTIFICATE_FILE_PATH,g /etc/systemd/system/kubelet.service -systemctl daemon-reload -systemctl restart kubelet kube-proxy -USERDATA -} - -resource "aws_launch_configuration" "demo" { - associate_public_ip_address = true - iam_instance_profile = "${aws_iam_instance_profile.demo-node.name}" - image_id = "${data.aws_ami.eks-worker.id}" - instance_type = "m4.large" - name_prefix = "terraform-eks-demo" - security_groups = ["${aws_security_group.demo-node.id}"] - user_data_base64 = "${base64encode(local.demo-node-userdata)}" - - lifecycle { - create_before_destroy = true - } -} - -resource "aws_autoscaling_group" "demo" { - desired_capacity = 2 - launch_configuration = "${aws_launch_configuration.demo.id}" - max_size = 2 - min_size = 1 - name = "terraform-eks-demo" - vpc_zone_identifier = ["${var.subnets}"] - - tag { - key = "Name" - value = "terraform-eks-demo" - propagate_at_launch = true - } - - tag { - key = "kubernetes.io/cluster/${var.cluster_name}" - value = "owned" - propagate_at_launch = true - } -} diff --git a/workers.tf b/workers.tf new file mode 100644 index 0000000..1f19221 --- /dev/null +++ b/workers.tf @@ -0,0 +1,85 @@ +resource "aws_autoscaling_group" "workers" { + name_prefix = "${var.cluster_name}" + launch_configuration = "${aws_launch_configuration.workers.id}" + desired_capacity = "${var.workers_asg_desired_capacity}" + max_size = "${var.workers_asg_max_size}" + min_size = "${var.workers_asg_min_size}" + vpc_zone_identifier = ["${var.subnets}"] + tags = ["${merge(var.tags, map("Name", "${var.cluster_name}-eks_asg", "propagate_at_launch", "true"))}"] +} + +resource "aws_launch_configuration" "workers" { + associate_public_ip_address = true + name_prefix = "${var.cluster_name}" + iam_instance_profile = "${aws_iam_instance_profile.workers.name}" + image_id = "${var.workers_ami_id}" + instance_type = "${var.workers_instance_type}" + security_groups = ["${aws_security_group.workers.id}"] + user_data_base64 = "${base64encode(local.workers_userdata)}" + + lifecycle { + create_before_destroy = true + } +} + +resource "aws_security_group" "workers" { + name_prefix = "${var.cluster_name}" + description = "Security group for all nodes in the cluster." + vpc_id = "${var.vpc_id}" + tags = "${merge(var.tags, map("Name", "${var.cluster_name}-eks_worker_sg"))}" +} + +resource "aws_security_group_rule" "workers_egress_internet" { + description = "Allow nodes all egress to the Internet." + protocol = "-1" + security_group_id = "${aws_security_group.workers.id}" + cidr_blocks = ["0.0.0.0/0"] + from_port = 0 + to_port = 0 + type = "egress" +} + +resource "aws_security_group_rule" "workers_ingress_self" { + description = "Allow node to communicate with each other." + protocol = "-1" + security_group_id = "${aws_security_group.workers.id}" + source_security_group_id = "${aws_security_group.workers.id}" + from_port = 0 + to_port = 65535 + type = "ingress" +} + +resource "aws_security_group_rule" "workers_ingress_cluster" { + description = "Allow workers Kubelets and pods to receive communication from the cluster control plane." + protocol = "tcp" + security_group_id = "${aws_security_group.workers.id}" + source_security_group_id = "${aws_security_group.cluster.id}" + from_port = 1025 + to_port = 65535 + type = "ingress" +} + +resource "aws_iam_role" "workers" { + name_prefix = "${var.cluster_name}" + assume_role_policy = "${data.aws_iam_policy_document.workers_assume_role_policy.json}" +} + +resource "aws_iam_instance_profile" "workers" { + name_prefix = "${var.cluster_name}" + role = "${aws_iam_role.workers.name}" +} + +resource "aws_iam_role_policy_attachment" "workers_AmazonEKSworkersNodePolicy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKSworkersNodePolicy" + role = "${aws_iam_role.workers.name}" +} + +resource "aws_iam_role_policy_attachment" "workers_AmazonEKS_CNI_Policy" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy" + role = "${aws_iam_role.workers.name}" +} + +resource "aws_iam_role_policy_attachment" "workers_AmazonEC2ContainerRegistryReadOnly" { + policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + role = "${aws_iam_role.workers.name}" +}