Files
terraform-aws-eks/cluster.tf
Shaun Cutts d79c8ab6f2 Wait cluster responsive (#639)
* wait for cluster to respond before creating auth config map

* adds changelog entry

* fixup tf format

* fixup kubernetes required version

* fixup missing local for kubeconfig_filename

* combine wait for cluster into provisioner on cluster; change status check to /healthz on endpoint

* fix: make kubernetes provider version more permissive
2020-01-07 12:28:56 +01:00

97 lines
3.4 KiB
HCL

resource "aws_cloudwatch_log_group" "this" {
count = length(var.cluster_enabled_log_types) > 0 && var.create_eks ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.cluster_log_retention_in_days
kms_key_id = var.cluster_log_kms_key_id
tags = var.tags
}
resource "aws_eks_cluster" "this" {
count = var.create_eks ? 1 : 0
name = var.cluster_name
enabled_cluster_log_types = var.cluster_enabled_log_types
role_arn = local.cluster_iam_role_arn
version = var.cluster_version
tags = var.tags
vpc_config {
security_group_ids = [local.cluster_security_group_id]
subnet_ids = var.subnets
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
}
timeouts {
create = var.cluster_create_timeout
delete = var.cluster_delete_timeout
}
depends_on = [
aws_iam_role_policy_attachment.cluster_AmazonEKSClusterPolicy,
aws_iam_role_policy_attachment.cluster_AmazonEKSServicePolicy,
aws_cloudwatch_log_group.this
]
provisioner "local-exec" {
command = <<EOT
until curl -k ${aws_eks_cluster.this[0].endpoint}/healthz >/dev/null; do sleep 4; done
EOT
}
}
resource "aws_security_group" "cluster" {
count = var.cluster_security_group_id == "" && var.create_eks ? 1 : 0
name_prefix = var.cluster_name
description = "EKS cluster security group."
vpc_id = var.vpc_id
tags = merge(
var.tags,
{
"Name" = "${var.cluster_name}-eks_cluster_sg"
},
)
}
resource "aws_security_group_rule" "cluster_egress_internet" {
count = var.cluster_security_group_id == "" && var.create_eks ? 1 : 0
description = "Allow cluster egress access to the Internet."
protocol = "-1"
security_group_id = local.cluster_security_group_id
cidr_blocks = ["0.0.0.0/0"]
from_port = 0
to_port = 0
type = "egress"
}
resource "aws_security_group_rule" "cluster_https_worker_ingress" {
count = var.create_eks ? 1 : 0
description = "Allow pods to communicate with the EKS cluster API."
protocol = "tcp"
security_group_id = local.cluster_security_group_id
source_security_group_id = local.worker_security_group_id
from_port = 443
to_port = 443
type = "ingress"
}
resource "aws_iam_role" "cluster" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
name_prefix = var.cluster_name
assume_role_policy = data.aws_iam_policy_document.cluster_assume_role_policy.json
permissions_boundary = var.permissions_boundary
path = var.iam_path
force_detach_policies = true
tags = var.tags
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSClusterPolicy" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
role = local.cluster_iam_role_name
}
resource "aws_iam_role_policy_attachment" "cluster_AmazonEKSServicePolicy" {
count = var.manage_cluster_iam_resources && var.create_eks ? 1 : 0
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
role = local.cluster_iam_role_name
}