diff --git a/examples/launch_templates/main.tf b/examples/launch_templates/main.tf index 2501d6d..98cac24 100644 --- a/examples/launch_templates/main.tf +++ b/examples/launch_templates/main.tf @@ -60,5 +60,12 @@ module "eks" { asg_desired_capacity = 1 public_ip = true }, + { + name = "worker-group-3" + instance_type = "t2.large" + asg_desired_capacity = 1 + public_ip = true + elastic_inference_accelerator = "eia2.medium" + }, ] } diff --git a/local.tf b/local.tf index 517fe7a..f60ef1c 100644 --- a/local.tf +++ b/local.tf @@ -94,6 +94,7 @@ locals { spot_instance_pools = 10 # "Number of Spot pools per availability zone to allocate capacity. EC2 Auto Scaling selects the cheapest Spot pools and evenly allocates Spot capacity across the number of Spot pools that you specify." spot_max_price = "" # Maximum price per unit hour that the user is willing to pay for the Spot instances. Default is the on-demand price max_instance_lifetime = 0 # Maximum number of seconds instances can run in the ASG. 0 is unlimited. + elastic_inference_accelerator = null # Type of elastic inference accelerator to be attached. Example values are eia1.medium, eia2.large, etc. } workers_group_defaults = merge( diff --git a/workers_launch_template.tf b/workers_launch_template.tf index c9626f4..bdf07af 100644 --- a/workers_launch_template.tf +++ b/workers_launch_template.tf @@ -290,6 +290,18 @@ resource "aws_launch_template" "workers_launch_template" { "instance_type", local.workers_group_defaults["instance_type"], ) + + dynamic "elastic_inference_accelerator" { + for_each = lookup( + var.worker_groups_launch_template[count.index], + "elastic_inference_accelerator", + local.workers_group_defaults["elastic_inference_accelerator"] + ) != null ? [lookup(var.worker_groups_launch_template[count.index], "elastic_inference_accelerator", local.workers_group_defaults["elastic_inference_accelerator"])] : [] + content { + type = elastic_inference_accelerator.value + } + } + key_name = lookup( var.worker_groups_launch_template[count.index], "key_name",