Files
terraform-aws-eks/main.tf

263 lines
9.0 KiB
HCL

data "aws_partition" "current" {}
################################################################################
# Cluster
################################################################################
resource "aws_eks_cluster" "this" {
count = var.create ? 1 : 0
name = var.cluster_name
role_arn = try(aws_iam_role.this[0].arn, var.iam_role_arn)
version = var.cluster_version
enabled_cluster_log_types = var.cluster_enabled_log_types
vpc_config {
security_group_ids = distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id]))
subnet_ids = var.subnet_ids
endpoint_private_access = var.cluster_endpoint_private_access
endpoint_public_access = var.cluster_endpoint_public_access
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
}
kubernetes_network_config {
ip_family = var.cluster_ip_family
service_ipv4_cidr = var.cluster_service_ipv4_cidr
}
dynamic "encryption_config" {
for_each = toset(var.cluster_encryption_config)
content {
provider {
key_arn = encryption_config.value["provider_key_arn"]
}
resources = encryption_config.value["resources"]
}
}
tags = merge(
var.tags,
var.cluster_tags,
)
timeouts {
create = lookup(var.cluster_timeouts, "create", null)
delete = lookup(var.cluster_timeouts, "update", null)
update = lookup(var.cluster_timeouts, "delete", null)
}
depends_on = [
aws_iam_role_policy_attachment.this,
aws_security_group_rule.cluster,
aws_security_group_rule.node,
aws_cloudwatch_log_group.this
]
}
resource "aws_cloudwatch_log_group" "this" {
count = var.create && var.create_cloudwatch_log_group ? 1 : 0
name = "/aws/eks/${var.cluster_name}/cluster"
retention_in_days = var.cloudwatch_log_group_retention_in_days
kms_key_id = var.cloudwatch_log_group_kms_key_id
tags = var.tags
}
################################################################################
# Cluster Security Group
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
################################################################################
locals {
cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
create_cluster_sg = var.create && var.create_cluster_security_group
cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
cluster_security_group_rules = {
ingress_nodes_443 = {
description = "Node groups to cluster API"
protocol = "tcp"
from_port = 443
to_port = 443
type = "ingress"
source_node_security_group = true
}
egress_nodes_443 = {
description = "Cluster API to node groups"
protocol = "tcp"
from_port = 443
to_port = 443
type = "egress"
source_node_security_group = true
}
egress_nodes_kubelet = {
description = "Cluster API to node kubelets"
protocol = "tcp"
from_port = 10250
to_port = 10250
type = "egress"
source_node_security_group = true
}
}
}
resource "aws_security_group" "cluster" {
count = local.create_cluster_sg ? 1 : 0
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}${var.prefix_separator}" : null
description = var.cluster_security_group_description
vpc_id = var.vpc_id
tags = merge(
var.tags,
{ "Name" = local.cluster_sg_name },
var.cluster_security_group_tags
)
}
resource "aws_security_group_rule" "cluster" {
for_each = { for k, v in merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : k => v if local.create_cluster_sg }
# Required
security_group_id = aws_security_group.cluster[0].id
protocol = each.value.protocol
from_port = each.value.from_port
to_port = each.value.to_port
type = each.value.type
# Optional
description = try(each.value.description, null)
cidr_blocks = try(each.value.cidr_blocks, null)
ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
prefix_list_ids = try(each.value.prefix_list_ids, [])
self = try(each.value.self, null)
source_security_group_id = try(
each.value.source_security_group_id,
try(each.value.source_node_security_group, false) ? local.node_security_group_id : null
)
}
################################################################################
# IRSA
# Note - this is different from EKS identity provider
################################################################################
data "tls_certificate" "this" {
count = var.create && var.enable_irsa ? 1 : 0
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
}
resource "aws_iam_openid_connect_provider" "oidc_provider" {
count = var.create && var.enable_irsa ? 1 : 0
client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences)))
thumbprint_list = [data.tls_certificate.this[0].certificates[0].sha1_fingerprint]
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
tags = merge(
{ Name = "${var.cluster_name}-eks-irsa" },
var.tags
)
}
################################################################################
# IAM Role
################################################################################
locals {
iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster")
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
}
data "aws_iam_policy_document" "assume_role_policy" {
count = var.create && var.create_iam_role ? 1 : 0
statement {
sid = "EKSClusterAssumeRole"
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
resource "aws_iam_role" "this" {
count = var.create && var.create_iam_role ? 1 : 0
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}${var.prefix_separator}" : null
path = var.iam_role_path
description = var.iam_role_description
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
permissions_boundary = var.iam_role_permissions_boundary
force_detach_policies = true
tags = merge(var.tags, var.iam_role_tags)
}
# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
resource "aws_iam_role_policy_attachment" "this" {
for_each = var.create && var.create_iam_role ? toset(compact(distinct(concat([
"${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
"${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
], var.iam_role_additional_policies)))) : toset([])
policy_arn = each.value
role = aws_iam_role.this[0].name
}
################################################################################
# EKS Addons
################################################################################
resource "aws_eks_addon" "this" {
for_each = { for k, v in var.cluster_addons : k => v if var.create }
cluster_name = aws_eks_cluster.this[0].name
addon_name = try(each.value.name, each.key)
addon_version = lookup(each.value, "addon_version", null)
resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
lifecycle {
ignore_changes = [
modified_at
]
}
tags = var.tags
}
################################################################################
# EKS Identity Provider
# Note - this is different from IRSA
################################################################################
resource "aws_eks_identity_provider_config" "this" {
for_each = { for k, v in var.cluster_identity_providers : k => v if var.create }
cluster_name = aws_eks_cluster.this[0].name
oidc {
client_id = each.value.client_id
groups_claim = lookup(each.value, "groups_claim", null)
groups_prefix = lookup(each.value, "groups_prefix", null)
identity_provider_config_name = try(each.value.identity_provider_config_name, each.key)
issuer_url = each.value.issuer_url
required_claims = lookup(each.value, "required_claims", null)
username_claim = lookup(each.value, "username_claim", null)
username_prefix = lookup(each.value, "username_prefix", null)
}
tags = var.tags
}