mirror of
https://github.com/ysoftdevs/terraform-aws-eks.git
synced 2026-01-16 16:47:20 +01:00
445 lines
15 KiB
HCL
445 lines
15 KiB
HCL
data "aws_partition" "current" {}
|
|
|
|
locals {
|
|
create = var.create && var.putin_khuylo
|
|
}
|
|
|
|
################################################################################
|
|
# Cluster
|
|
################################################################################
|
|
|
|
resource "aws_eks_cluster" "this" {
|
|
count = local.create ? 1 : 0
|
|
|
|
name = var.cluster_name
|
|
role_arn = try(aws_iam_role.this[0].arn, var.iam_role_arn)
|
|
version = var.cluster_version
|
|
enabled_cluster_log_types = var.cluster_enabled_log_types
|
|
|
|
vpc_config {
|
|
security_group_ids = compact(distinct(concat(var.cluster_additional_security_group_ids, [local.cluster_security_group_id])))
|
|
subnet_ids = var.subnet_ids
|
|
endpoint_private_access = var.cluster_endpoint_private_access
|
|
endpoint_public_access = var.cluster_endpoint_public_access
|
|
public_access_cidrs = var.cluster_endpoint_public_access_cidrs
|
|
}
|
|
|
|
kubernetes_network_config {
|
|
ip_family = var.cluster_ip_family
|
|
service_ipv4_cidr = var.cluster_service_ipv4_cidr
|
|
}
|
|
|
|
dynamic "encryption_config" {
|
|
for_each = toset(var.cluster_encryption_config)
|
|
|
|
content {
|
|
provider {
|
|
key_arn = encryption_config.value.provider_key_arn
|
|
}
|
|
resources = encryption_config.value.resources
|
|
}
|
|
}
|
|
|
|
tags = merge(
|
|
var.tags,
|
|
var.cluster_tags,
|
|
)
|
|
|
|
timeouts {
|
|
create = lookup(var.cluster_timeouts, "create", null)
|
|
update = lookup(var.cluster_timeouts, "update", null)
|
|
delete = lookup(var.cluster_timeouts, "delete", null)
|
|
}
|
|
|
|
depends_on = [
|
|
aws_iam_role_policy_attachment.this,
|
|
aws_security_group_rule.cluster,
|
|
aws_security_group_rule.node,
|
|
aws_cloudwatch_log_group.this
|
|
]
|
|
}
|
|
|
|
resource "aws_ec2_tag" "cluster_primary_security_group" {
|
|
for_each = { for k, v in merge(var.tags, var.cluster_tags) : k => v if local.create }
|
|
|
|
resource_id = aws_eks_cluster.this[0].vpc_config[0].cluster_security_group_id
|
|
key = each.key
|
|
value = each.value
|
|
}
|
|
|
|
resource "aws_cloudwatch_log_group" "this" {
|
|
count = local.create && var.create_cloudwatch_log_group ? 1 : 0
|
|
|
|
name = "/aws/eks/${var.cluster_name}/cluster"
|
|
retention_in_days = var.cloudwatch_log_group_retention_in_days
|
|
kms_key_id = var.cloudwatch_log_group_kms_key_id
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
################################################################################
|
|
# Cluster Security Group
|
|
# Defaults follow https://docs.aws.amazon.com/eks/latest/userguide/sec-group-reqs.html
|
|
################################################################################
|
|
|
|
locals {
|
|
cluster_sg_name = coalesce(var.cluster_security_group_name, "${var.cluster_name}-cluster")
|
|
create_cluster_sg = local.create && var.create_cluster_security_group
|
|
|
|
cluster_security_group_id = local.create_cluster_sg ? aws_security_group.cluster[0].id : var.cluster_security_group_id
|
|
|
|
cluster_security_group_rules = {
|
|
ingress_nodes_443 = {
|
|
description = "Node groups to cluster API"
|
|
protocol = "tcp"
|
|
from_port = 443
|
|
to_port = 443
|
|
type = "ingress"
|
|
source_node_security_group = true
|
|
}
|
|
egress_nodes_443 = {
|
|
description = "Cluster API to node groups"
|
|
protocol = "tcp"
|
|
from_port = 443
|
|
to_port = 443
|
|
type = "egress"
|
|
source_node_security_group = true
|
|
}
|
|
egress_nodes_kubelet = {
|
|
description = "Cluster API to node kubelets"
|
|
protocol = "tcp"
|
|
from_port = 10250
|
|
to_port = 10250
|
|
type = "egress"
|
|
source_node_security_group = true
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "aws_security_group" "cluster" {
|
|
count = local.create_cluster_sg ? 1 : 0
|
|
|
|
name = var.cluster_security_group_use_name_prefix ? null : local.cluster_sg_name
|
|
name_prefix = var.cluster_security_group_use_name_prefix ? "${local.cluster_sg_name}${var.prefix_separator}" : null
|
|
description = var.cluster_security_group_description
|
|
vpc_id = var.vpc_id
|
|
|
|
tags = merge(
|
|
var.tags,
|
|
{ "Name" = local.cluster_sg_name },
|
|
var.cluster_security_group_tags
|
|
)
|
|
|
|
lifecycle {
|
|
create_before_destroy = true
|
|
}
|
|
}
|
|
|
|
resource "aws_security_group_rule" "cluster" {
|
|
for_each = { for k, v in merge(local.cluster_security_group_rules, var.cluster_security_group_additional_rules) : k => v if local.create_cluster_sg }
|
|
|
|
# Required
|
|
security_group_id = aws_security_group.cluster[0].id
|
|
protocol = each.value.protocol
|
|
from_port = each.value.from_port
|
|
to_port = each.value.to_port
|
|
type = each.value.type
|
|
|
|
# Optional
|
|
description = try(each.value.description, null)
|
|
cidr_blocks = try(each.value.cidr_blocks, null)
|
|
ipv6_cidr_blocks = try(each.value.ipv6_cidr_blocks, null)
|
|
prefix_list_ids = try(each.value.prefix_list_ids, [])
|
|
self = try(each.value.self, null)
|
|
source_security_group_id = try(
|
|
each.value.source_security_group_id,
|
|
try(each.value.source_node_security_group, false) ? local.node_security_group_id : null
|
|
)
|
|
}
|
|
|
|
################################################################################
|
|
# IRSA
|
|
# Note - this is different from EKS identity provider
|
|
################################################################################
|
|
|
|
data "tls_certificate" "this" {
|
|
count = local.create && var.enable_irsa ? 1 : 0
|
|
|
|
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
|
|
}
|
|
|
|
resource "aws_iam_openid_connect_provider" "oidc_provider" {
|
|
count = local.create && var.enable_irsa ? 1 : 0
|
|
|
|
client_id_list = distinct(compact(concat(["sts.${data.aws_partition.current.dns_suffix}"], var.openid_connect_audiences)))
|
|
thumbprint_list = concat([data.tls_certificate.this[0].certificates[0].sha1_fingerprint], var.custom_oidc_thumbprints)
|
|
url = aws_eks_cluster.this[0].identity[0].oidc[0].issuer
|
|
|
|
tags = merge(
|
|
{ Name = "${var.cluster_name}-eks-irsa" },
|
|
var.tags
|
|
)
|
|
}
|
|
|
|
################################################################################
|
|
# IAM Role
|
|
################################################################################
|
|
|
|
locals {
|
|
create_iam_role = local.create && var.create_iam_role
|
|
iam_role_name = coalesce(var.iam_role_name, "${var.cluster_name}-cluster")
|
|
policy_arn_prefix = "arn:${data.aws_partition.current.partition}:iam::aws:policy"
|
|
|
|
cluster_encryption_policy_name = coalesce(var.cluster_encryption_policy_name, "${local.iam_role_name}-ClusterEncryption")
|
|
|
|
# TODO - hopefully this can be removed once the AWS endpoint is named properly in China
|
|
# https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1904
|
|
dns_suffix = coalesce(var.cluster_iam_role_dns_suffix, data.aws_partition.current.dns_suffix)
|
|
}
|
|
|
|
data "aws_iam_policy_document" "assume_role_policy" {
|
|
count = local.create && var.create_iam_role ? 1 : 0
|
|
|
|
statement {
|
|
sid = "EKSClusterAssumeRole"
|
|
actions = ["sts:AssumeRole"]
|
|
|
|
principals {
|
|
type = "Service"
|
|
identifiers = ["eks.${local.dns_suffix}"]
|
|
}
|
|
}
|
|
}
|
|
|
|
resource "aws_iam_role" "this" {
|
|
count = local.create_iam_role ? 1 : 0
|
|
|
|
name = var.iam_role_use_name_prefix ? null : local.iam_role_name
|
|
name_prefix = var.iam_role_use_name_prefix ? "${local.iam_role_name}${var.prefix_separator}" : null
|
|
path = var.iam_role_path
|
|
description = var.iam_role_description
|
|
|
|
assume_role_policy = data.aws_iam_policy_document.assume_role_policy[0].json
|
|
permissions_boundary = var.iam_role_permissions_boundary
|
|
force_detach_policies = true
|
|
|
|
# https://github.com/terraform-aws-modules/terraform-aws-eks/issues/920
|
|
# Resources running on the cluster are still generaring logs when destroying the module resources
|
|
# which results in the log group being re-created even after Terraform destroys it. Removing the
|
|
# ability for the cluster role to create the log group prevents this log group from being re-created
|
|
# outside of Terraform due to services still generating logs during destroy process
|
|
dynamic "inline_policy" {
|
|
for_each = var.create_cloudwatch_log_group ? [1] : []
|
|
content {
|
|
name = local.iam_role_name
|
|
|
|
policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [
|
|
{
|
|
Action = ["logs:CreateLogGroup"]
|
|
Effect = "Deny"
|
|
Resource = aws_cloudwatch_log_group.this[0].arn
|
|
},
|
|
]
|
|
})
|
|
}
|
|
}
|
|
|
|
tags = merge(var.tags, var.iam_role_tags)
|
|
}
|
|
|
|
# Policies attached ref https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/eks_node_group
|
|
resource "aws_iam_role_policy_attachment" "this" {
|
|
for_each = local.create_iam_role ? toset(compact(distinct(concat([
|
|
"${local.policy_arn_prefix}/AmazonEKSClusterPolicy",
|
|
"${local.policy_arn_prefix}/AmazonEKSVPCResourceController",
|
|
], var.iam_role_additional_policies)))) : toset([])
|
|
|
|
policy_arn = each.value
|
|
role = aws_iam_role.this[0].name
|
|
}
|
|
|
|
# Using separate attachment due to `The "for_each" value depends on resource attributes that cannot be determined until apply`
|
|
resource "aws_iam_role_policy_attachment" "cluster_encryption" {
|
|
count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0
|
|
|
|
policy_arn = aws_iam_policy.cluster_encryption[0].arn
|
|
role = aws_iam_role.this[0].name
|
|
}
|
|
|
|
resource "aws_iam_policy" "cluster_encryption" {
|
|
count = local.create_iam_role && var.attach_cluster_encryption_policy && length(var.cluster_encryption_config) > 0 ? 1 : 0
|
|
|
|
name = var.cluster_encryption_policy_use_name_prefix ? null : local.cluster_encryption_policy_name
|
|
name_prefix = var.cluster_encryption_policy_use_name_prefix ? local.cluster_encryption_policy_name : null
|
|
description = var.cluster_encryption_policy_description
|
|
path = var.cluster_encryption_policy_path
|
|
|
|
policy = jsonencode({
|
|
Version = "2012-10-17"
|
|
Statement = [
|
|
{
|
|
Action = [
|
|
"kms:Encrypt",
|
|
"kms:Decrypt",
|
|
"kms:ListGrants",
|
|
"kms:DescribeKey",
|
|
]
|
|
Effect = "Allow"
|
|
Resource = [for config in var.cluster_encryption_config : config.provider_key_arn]
|
|
},
|
|
]
|
|
})
|
|
|
|
tags = merge(var.tags, var.cluster_encryption_policy_tags)
|
|
}
|
|
|
|
################################################################################
|
|
# EKS Addons
|
|
################################################################################
|
|
|
|
resource "aws_eks_addon" "this" {
|
|
for_each = { for k, v in var.cluster_addons : k => v if local.create }
|
|
|
|
cluster_name = aws_eks_cluster.this[0].name
|
|
addon_name = try(each.value.name, each.key)
|
|
|
|
addon_version = lookup(each.value, "addon_version", null)
|
|
resolve_conflicts = lookup(each.value, "resolve_conflicts", null)
|
|
service_account_role_arn = lookup(each.value, "service_account_role_arn", null)
|
|
|
|
lifecycle {
|
|
ignore_changes = [
|
|
modified_at
|
|
]
|
|
}
|
|
|
|
depends_on = [
|
|
module.fargate_profile,
|
|
module.eks_managed_node_group,
|
|
module.self_managed_node_group,
|
|
]
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
################################################################################
|
|
# EKS Identity Provider
|
|
# Note - this is different from IRSA
|
|
################################################################################
|
|
|
|
resource "aws_eks_identity_provider_config" "this" {
|
|
for_each = { for k, v in var.cluster_identity_providers : k => v if local.create }
|
|
|
|
cluster_name = aws_eks_cluster.this[0].name
|
|
|
|
oidc {
|
|
client_id = each.value.client_id
|
|
groups_claim = lookup(each.value, "groups_claim", null)
|
|
groups_prefix = lookup(each.value, "groups_prefix", null)
|
|
identity_provider_config_name = try(each.value.identity_provider_config_name, each.key)
|
|
issuer_url = each.value.issuer_url
|
|
required_claims = lookup(each.value, "required_claims", null)
|
|
username_claim = lookup(each.value, "username_claim", null)
|
|
username_prefix = lookup(each.value, "username_prefix", null)
|
|
}
|
|
|
|
tags = var.tags
|
|
}
|
|
|
|
################################################################################
|
|
# aws-auth configmap
|
|
################################################################################
|
|
|
|
locals {
|
|
node_iam_role_arns_non_windows = compact(concat(
|
|
[for group in module.eks_managed_node_group : group.iam_role_arn],
|
|
[for group in module.self_managed_node_group : group.iam_role_arn if group.platform != "windows"],
|
|
var.aws_auth_node_iam_role_arns_non_windows,
|
|
))
|
|
|
|
node_iam_role_arns_windows = compact(concat(
|
|
[for group in module.self_managed_node_group : group.iam_role_arn if group.platform == "windows"],
|
|
var.aws_auth_node_iam_role_arns_windows,
|
|
))
|
|
|
|
fargate_profile_pod_execution_role_arns = compact(concat(
|
|
[for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn],
|
|
var.aws_auth_fargate_profile_pod_execution_role_arns,
|
|
))
|
|
|
|
aws_auth_configmap_data = {
|
|
mapRoles = yamlencode(concat(
|
|
[for role_arn in local.node_iam_role_arns_non_windows : {
|
|
rolearn = role_arn
|
|
username = "system:node:{{EC2PrivateDNSName}}"
|
|
groups = [
|
|
"system:bootstrappers",
|
|
"system:nodes",
|
|
]
|
|
}
|
|
],
|
|
[for role_arn in local.node_iam_role_arns_windows : {
|
|
rolearn = role_arn
|
|
username = "system:node:{{EC2PrivateDNSName}}"
|
|
groups = [
|
|
"eks:kube-proxy-windows",
|
|
"system:bootstrappers",
|
|
"system:nodes",
|
|
]
|
|
}
|
|
],
|
|
# Fargate profile
|
|
[for role_arn in local.fargate_profile_pod_execution_role_arns : {
|
|
rolearn = role_arn
|
|
username = "system:node:{{SessionName}}"
|
|
groups = [
|
|
"system:bootstrappers",
|
|
"system:nodes",
|
|
"system:node-proxier",
|
|
]
|
|
}
|
|
],
|
|
var.aws_auth_roles
|
|
))
|
|
mapUsers = yamlencode(var.aws_auth_users)
|
|
mapAccounts = yamlencode(var.aws_auth_accounts)
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_config_map" "aws_auth" {
|
|
count = var.create && var.create_aws_auth_configmap ? 1 : 0
|
|
|
|
metadata {
|
|
name = "aws-auth"
|
|
namespace = "kube-system"
|
|
}
|
|
|
|
data = local.aws_auth_configmap_data
|
|
|
|
lifecycle {
|
|
# We are ignoring the data here since we will manage it with the resource below
|
|
# This is only intended to be used in scenarios where the configmap does not exist
|
|
ignore_changes = [data]
|
|
}
|
|
}
|
|
|
|
resource "kubernetes_config_map_v1_data" "aws_auth" {
|
|
count = var.create && var.manage_aws_auth_configmap ? 1 : 0
|
|
|
|
force = true
|
|
|
|
metadata {
|
|
name = "aws-auth"
|
|
namespace = "kube-system"
|
|
}
|
|
|
|
data = local.aws_auth_configmap_data
|
|
|
|
depends_on = [
|
|
# Required for instances where the configmap does not exist yet to avoid race condition
|
|
kubernetes_config_map.aws_auth,
|
|
]
|
|
}
|