-
-
Notifications
You must be signed in to change notification settings - Fork 4.2k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
feat: Add support for enabling addons before data plane compute is created #2478
Changes from all commits
6a45547
acc1d62
b18fa43
99a837c
0aefce4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,6 +19,25 @@ locals { | |
} | ||
} | ||
|
||
# This sleep resource is used to provide a timed gap between the cluster creation and the downstream dependencies | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is there any documented reason we cannot use a If there was an actual dependency in place, there could be additional benefits (e.g. creating the VPC CNI with IRSA and eliminating the need for removing any instance roles after cluster creation). Anecdotally The secondary reason I ask is concerns over downstream dependencies on deletion. |
||
# that consume the outputs from here. Any of the values that are used as triggers can be used in dependencies | ||
# to ensure that the downstream resources are created after both the cluster is ready and the sleep time has passed. | ||
# This was primarily added to give addons that need to be configured BEFORE data plane compute resources | ||
# enough time to create and configure themselves before the data plane compute resources are created. | ||
resource "time_sleep" "this" { | ||
count = var.create ? 1 : 0 | ||
|
||
create_duration = var.dataplane_wait_duration | ||
|
||
triggers = { | ||
cluster_name = aws_eks_cluster.this[0].name | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. because all of these would trigger a downstream update, they make valid triggers here to ensure the nodegroups/Fargate profiles wait for this duration before proceeding
antonbabenko marked this conversation as resolved.
Show resolved
Hide resolved
|
||
cluster_endpoint = aws_eks_cluster.this[0].endpoint | ||
cluster_version = aws_eks_cluster.this[0].version | ||
|
||
cluster_certificate_authority_data = aws_eks_cluster.this[0].certificate_authority[0].data | ||
} | ||
} | ||
|
||
################################################################################ | ||
# EKS IPV6 CNI Policy | ||
# TODO - hopefully AWS releases a managed policy which can replace this | ||
|
@@ -220,7 +239,7 @@ module "fargate_profile" { | |
create = try(each.value.create, true) | ||
|
||
# Fargate Profile | ||
cluster_name = aws_eks_cluster.this[0].name | ||
cluster_name = time_sleep.this[0].triggers["cluster_name"] | ||
cluster_ip_family = var.cluster_ip_family | ||
name = try(each.value.name, each.key) | ||
subnet_ids = try(each.value.subnet_ids, var.fargate_profile_defaults.subnet_ids, var.subnet_ids) | ||
|
@@ -255,8 +274,8 @@ module "eks_managed_node_group" { | |
|
||
create = try(each.value.create, true) | ||
|
||
cluster_name = aws_eks_cluster.this[0].name | ||
cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) | ||
cluster_name = time_sleep.this[0].triggers["cluster_name"] | ||
cluster_version = try(each.value.cluster_version, var.eks_managed_node_group_defaults.cluster_version, time_sleep.this[0].triggers["cluster_version"]) | ||
cluster_ip_family = var.cluster_ip_family | ||
|
||
# EKS Managed Node Group | ||
|
@@ -286,8 +305,8 @@ module "eks_managed_node_group" { | |
|
||
# User data | ||
platform = try(each.value.platform, var.eks_managed_node_group_defaults.platform, "linux") | ||
cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "") | ||
cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") | ||
cluster_endpoint = try(time_sleep.this[0].triggers["cluster_endpoint"], "") | ||
cluster_auth_base64 = try(time_sleep.this[0].triggers["cluster_certificate_authority_data"], "") | ||
cluster_service_ipv4_cidr = var.cluster_service_ipv4_cidr | ||
enable_bootstrap_user_data = try(each.value.enable_bootstrap_user_data, var.eks_managed_node_group_defaults.enable_bootstrap_user_data, false) | ||
pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.eks_managed_node_group_defaults.pre_bootstrap_user_data, "") | ||
|
@@ -362,7 +381,7 @@ module "self_managed_node_group" { | |
|
||
create = try(each.value.create, true) | ||
|
||
cluster_name = aws_eks_cluster.this[0].name | ||
cluster_name = time_sleep.this[0].triggers["cluster_name"] | ||
cluster_ip_family = var.cluster_ip_family | ||
|
||
# Autoscaling Group | ||
|
@@ -415,8 +434,8 @@ module "self_managed_node_group" { | |
|
||
# User data | ||
platform = try(each.value.platform, var.self_managed_node_group_defaults.platform, "linux") | ||
cluster_endpoint = try(aws_eks_cluster.this[0].endpoint, "") | ||
cluster_auth_base64 = try(aws_eks_cluster.this[0].certificate_authority[0].data, "") | ||
cluster_endpoint = try(time_sleep.this[0].triggers["cluster_endpoint"], "") | ||
cluster_auth_base64 = try(time_sleep.this[0].triggers["cluster_certificate_authority_data"], "") | ||
pre_bootstrap_user_data = try(each.value.pre_bootstrap_user_data, var.self_managed_node_group_defaults.pre_bootstrap_user_data, "") | ||
post_bootstrap_user_data = try(each.value.post_bootstrap_user_data, var.self_managed_node_group_defaults.post_bootstrap_user_data, "") | ||
bootstrap_extra_args = try(each.value.bootstrap_extra_args, var.self_managed_node_group_defaults.bootstrap_extra_args, "") | ||
|
@@ -436,7 +455,7 @@ module "self_managed_node_group" { | |
|
||
ebs_optimized = try(each.value.ebs_optimized, var.self_managed_node_group_defaults.ebs_optimized, null) | ||
ami_id = try(each.value.ami_id, var.self_managed_node_group_defaults.ami_id, "") | ||
cluster_version = try(each.value.cluster_version, var.self_managed_node_group_defaults.cluster_version, aws_eks_cluster.this[0].version) | ||
cluster_version = try(each.value.cluster_version, var.self_managed_node_group_defaults.cluster_version, time_sleep.this[0].triggers["cluster_version"]) | ||
instance_type = try(each.value.instance_type, var.self_managed_node_group_defaults.instance_type, "m6i.large") | ||
key_name = try(each.value.key_name, var.self_managed_node_group_defaults.key_name, null) | ||
|
||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
this addition now allows the resources to be cleaned up properly when users elect to create the IPv6 policy with the module