Files
terraform-aws-eks/data.tf
Thierno IB. BARRY 6d7d6f6f5a feat: Drop random pets from Managed Node Groups (#1372)
BREAKING CHANGES: We now decided to remove `random_pet` resources in Managed Node Groups (MNG). Those were used to recreate MNG if something change and also simulate the newly added argument `node_group_name_prefix`. But they were causing a lot of troubles. To upgrade the module without recreating your MNG, you will need to explicitly reuse their previous name and set them in your MNG `name` argument. Please see [upgrade docs](https://github.com/terraform-aws-modules/terraform-aws-eks/blob/master/docs/upgrades.md#upgrade-module-to-v1700-for-managed-node-groups) for more details.
2021-05-28 01:50:16 +02:00

100 lines
2.3 KiB
HCL

data "aws_partition" "current" {}
data "aws_caller_identity" "current" {}
data "aws_iam_policy_document" "workers_assume_role_policy" {
statement {
sid = "EKSWorkerAssumeRole"
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = [local.ec2_principal]
}
}
}
data "aws_ami" "eks_worker" {
count = local.worker_has_linux_ami ? 1 : 0
filter {
name = "name"
values = [local.worker_ami_name_filter]
}
most_recent = true
owners = [var.worker_ami_owner_id]
}
data "aws_ami" "eks_worker_windows" {
count = local.worker_has_windows_ami ? 1 : 0
filter {
name = "name"
values = [local.worker_ami_name_filter_windows]
}
filter {
name = "platform"
values = ["windows"]
}
most_recent = true
owners = [var.worker_ami_owner_id_windows]
}
data "aws_iam_policy_document" "cluster_assume_role_policy" {
statement {
sid = "EKSClusterAssumeRole"
actions = [
"sts:AssumeRole",
]
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
data "aws_iam_role" "custom_cluster_iam_role" {
count = var.manage_cluster_iam_resources ? 0 : 1
name = var.cluster_iam_role_name
}
data "aws_iam_instance_profile" "custom_worker_group_iam_instance_profile" {
count = var.manage_worker_iam_resources ? 0 : local.worker_group_count
name = lookup(
var.worker_groups[count.index],
"iam_instance_profile_name",
local.workers_group_defaults["iam_instance_profile_name"],
)
}
data "aws_iam_instance_profile" "custom_worker_group_launch_template_iam_instance_profile" {
count = var.manage_worker_iam_resources ? 0 : local.worker_group_launch_template_count
name = lookup(
var.worker_groups_launch_template[count.index],
"iam_instance_profile_name",
local.workers_group_defaults["iam_instance_profile_name"],
)
}
data "http" "wait_for_cluster" {
count = var.create_eks && var.manage_aws_auth ? 1 : 0
url = format("%s/healthz", aws_eks_cluster.this[0].endpoint)
ca_certificate = base64decode(coalescelist(aws_eks_cluster.this[*].certificate_authority[0].data, [""])[0])
timeout = 300
depends_on = [
aws_eks_cluster.this,
aws_security_group_rule.cluster_private_access,
]
}