cleaning up before initial release

This commit is contained in:
brandoconnor
2018-06-06 21:58:12 -07:00
parent 309e7f7083
commit 283e9b203c
9 changed files with 109 additions and 26 deletions

View File

@@ -1,9 +1,17 @@
# See http://pre-commit.com for more information
# See http://pre-commit.com/hooks.html for more hooks
repos:
- repo: https://github.com/pre-commit/pre-commit-hooks
sha: v0.9.2
hooks:
- repo: git://github.com/antonbabenko/pre-commit-terraform
rev: v1.7.1
sha: 091f8b15d7b458e5a0aca642483deb2205e7db02
hooks:
- id: terraform_fmt
# - id: terraform_docs
- repo: git://github.com/pre-commit/pre-commit-hooks
rev: v1.2.3
sha: 92e1570c282e3c69a1f8b5b8dd8d286fe27cfaa7
hooks:
- id: check-merge-conflict
- id: trailing-whitespace
# - id: end-of-file-fixer
- id: check-yaml

48
.travis.yml Normal file
View File

@@ -0,0 +1,48 @@
language: ruby
sudo: required
dist: trusty
services:
- docker
rvm:
- 2.4.2
before_install:
- echo "before_install"
install:
- echo "install"
- gem install bundler --no-rdoc --no-ri
- bundle install
before_script:
- echo 'before_script'
- export AWS_REGION='us-west-2'
- export TF_VAR_region=${AWS_REGION}
- echo "using AWS_REGION=${AWS_REGION}"
- export TF_WARN_OUTPUT_ERRORS=1
- curl --silent --output terraform.zip https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip
- sha256sum terraform.zip | grep "6b8ce67647a59b2a3f70199c304abca0ddec0e49fd060944c26f666298e23418"
- unzip terraform.zip ; rm -f terraform.zip; chmod +x terraform
- mkdir -p ${HOME}/bin ; export PATH=${PATH}:${HOME}/bin; mv terraform ${HOME}/bin/
- terraform -v
script:
- echo 'script'
- terraform init
- terraform fmt -check=true
- terraform validate -var "region=${AWS_REGION}" -var "subnets=[]" -var "vpc_id=vpc-abcde012" -var "load_balancer_name=my-lb" -var "log_bucket_name=my-log-bucket" -var "security_groups=[]"
- docker run --rm -v $(pwd):/app/ --workdir=/app/ -t wata727/tflint --error-with-issues
- cd examples/eks_test_fixture
- terraform init
- terraform fmt -check=true
- terraform validate
- cd -
- terraform -v
- bundle exec kitchen test --destroy always
deploy:
provider: script
script: ci/deploy.sh
on:
branch: master
notifications:
email:
recipients:
- brandon@atscale.run
on_success: change
on_failure: change

View File

@@ -14,9 +14,6 @@ and its [source code](https://github.com/terraform-providers/terraform-provider-
* You want to create a set of resources around an EKS cluster: namely an autoscaling group of workers and a security group for them.
* You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS.
It's recommended you use this module with [terraform-aws-vpc](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws),
and [terraform-aws-security-group](https://registry.terraform.io/modules/terraform-aws-modules/security-group/aws).
## Usage example
A full example leveraging other community modules is contained in the [examples/eks_test_fixture directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_test_fixture). Here's the gist of using it via the Terraform registry:
@@ -104,8 +101,8 @@ MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-a
|------|-------------|
| cluster_certificate_authority_data | Nested attribute containing certificate-authority-data for your cluster. Tis is the base64 encoded certificate data required to communicate with your cluster. |
| cluster_endpoint | The endpoint for your Kubernetes API server. |
| cluster_id | The name of the cluster. |
| cluster_id | The name/id of the cluster. |
| cluster_security_group_ids | description |
| cluster_version | The Kubernetes server version for the cluster. |
| config_map_aws_auth | description |
| kubeconfig | description |
| config_map_aws_auth | |
| kubeconfig | kubectl config file contents for this cluster. |

View File

@@ -14,11 +14,6 @@ resource "aws_eks_cluster" "this" {
]
}
resource "aws_iam_role" "cluster" {
name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}"
}
resource "aws_security_group" "cluster" {
name_prefix = "${var.cluster_name}"
description = "Cluster communication with workers nodes"
@@ -56,6 +51,11 @@ resource "aws_security_group_rule" "cluster_https_cidr_ingress" {
type = "ingress"
}
resource "aws_iam_role" "cluster" {
name_prefix = "${var.cluster_name}"
assume_role_policy = "${data.aws_iam_policy_document.cluster_assume_role_policy.json}"
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = "${aws_iam_role.cluster.name}"

12
data.tf
View File

@@ -30,7 +30,19 @@ data "aws_iam_policy_document" "cluster_assume_role_policy" {
}
}
resource "null_resource" "tags_as_list_of_maps" {
count = "${length(keys(var.tags))}"
triggers = "${map(
"key", "${element(keys(var.tags), count.index)}",
"value", "${element(values(var.tags), count.index)}",
"propagate_at_launch", "true"
)}"
}
locals {
asg_tags = ["${null_resource.tags_as_list_of_maps.*.triggers}"]
# More information: https://amazon-eks.s3-us-west-2.amazonaws.com/1.10.3/2018-06-05/amazon-eks-nodegroup.yaml
workers_userdata = <<USERDATA
#!/bin/bash -xe

View File

@@ -1,9 +1,24 @@
output "cluster_endpoint" {
description = "Endpoint for EKS controlplane."
description = "Endpoint for EKS control plane."
value = "${module.eks.cluster_endpoint}"
}
output "cluster_security_group_ids" {
description = "."
description = "Security group ids attached to the cluster control plane."
value = "${module.eks.cluster_security_group_ids}"
}
output "kubectl_config" {
description = "kubectl config as generated by the module."
value = "${module.eks.kubeconfig}"
}
output "config_map_aws_auth" {
description = ""
value = "${module.eks.config_map_aws_auth}"
}
output "region" {
description = "AWS region."
value = "${var.region}"
}

View File

@@ -15,9 +15,6 @@
** You want to create a set of resources around an EKS cluster: namely an autoscaling group of workers and a security group for them.
** You've created a Virtual Private Cloud (VPC) and subnets where you intend to put this EKS.
* It's recommended you use this module with [terraform-aws-vpc](https://registry.terraform.io/modules/terraform-aws-modules/vpc/aws),
* and [terraform-aws-security-group](https://registry.terraform.io/modules/terraform-aws-modules/security-group/aws).
* ## Usage example
* A full example leveraging other community modules is contained in the [examples/eks_test_fixture directory](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/examples/eks_test_fixture). Here's the gist of using it via the Terraform registry:
@@ -84,4 +81,4 @@
* MIT Licensed. See [LICENSE](https://github.com/terraform-aws-modules/terraform-aws-eks/tree/master/LICENSE) for full details.
*/
provider "http" {}
provider "null" {}

View File

@@ -1,15 +1,15 @@
output "config_map_aws_auth" {
description = "description"
description = ""
value = "${local.config_map_aws_auth}"
}
output "kubeconfig" {
description = "description"
description = "kubectl config file contents for this cluster."
value = "${local.kubeconfig}"
}
output "cluster_id" {
description = "The name of the cluster."
description = "The name/id of the cluster."
value = "${aws_eks_cluster.this.id}"
}

View File

@@ -5,7 +5,13 @@ resource "aws_autoscaling_group" "workers" {
max_size = "${var.workers_asg_max_size}"
min_size = "${var.workers_asg_min_size}"
vpc_zone_identifier = ["${var.subnets}"]
tags = ["${merge(var.tags, map("Name", "${var.cluster_name}-eks_asg", "propagate_at_launch", "true"))}"]
tags = ["${concat(
list(
map("key", "Name", "value", "${var.cluster_name}-eks_asg", "propagate_at_launch", true),
),
local.asg_tags)
}"]
}
resource "aws_launch_configuration" "workers" {
@@ -69,8 +75,8 @@ resource "aws_iam_instance_profile" "workers" {
role = "${aws_iam_role.workers.name}"
}
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSworkersNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSworkersNodePolicy"
resource "aws_iam_role_policy_attachment" "workers_AmazonEKSWorkerNodePolicy" {
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
role = "${aws_iam_role.workers.name}"
}