Skip to content

Commit 39a75a3

Browse files
committed
chore: consolidate configmap data blocks
1 parent 3530a65 commit 39a75a3

File tree

2 files changed

+18
-54
lines changed

2 files changed

+18
-54
lines changed

examples/self_managed_node_group/main.tf

+3-6
Original file line numberDiff line numberDiff line change
@@ -41,12 +41,9 @@ module "eks" {
4141
cluster_endpoint_public_access = true
4242

4343
cluster_addons = {
44-
# Note: launching a new cluster where the module manages the aws-auth configmap
45-
# and tries to manage the CoreDNS will fail due to order of deploy. Managed CoreDNS addon
46-
# after aws-auth has been updated with node IAM roles
47-
# coredns = {
48-
# resolve_conflicts = "OVERWRITE"
49-
# }
44+
coredns = {
45+
resolve_conflicts = "OVERWRITE"
46+
}
5047
kube-proxy = {}
5148
vpc-cni = {
5249
resolve_conflicts = "OVERWRITE"

main.tf

+15-48
Original file line numberDiff line numberDiff line change
@@ -368,17 +368,8 @@ locals {
368368
[for group in module.fargate_profile : group.fargate_profile_pod_execution_role_arn],
369369
var.aws_auth_fargate_profile_pod_execution_role_arns,
370370
))
371-
}
372-
373-
resource "kubernetes_config_map" "aws_auth" {
374-
count = var.create && var.create_aws_auth_configmap ? 1 : 0
375-
376-
metadata {
377-
name = "aws-auth"
378-
namespace = "kube-system"
379-
}
380371

381-
data = {
372+
aws_auth_configmap_data = {
382373
mapRoles = yamlencode(concat(
383374
[for role_arn in local.node_iam_role_arns_non_windows : {
384375
rolearn = role_arn
@@ -415,6 +406,17 @@ resource "kubernetes_config_map" "aws_auth" {
415406
mapUsers = yamlencode(var.aws_auth_users)
416407
mapAccounts = yamlencode(var.aws_auth_accounts)
417408
}
409+
}
410+
411+
resource "kubernetes_config_map" "aws_auth" {
412+
count = var.create && var.create_aws_auth_configmap ? 1 : 0
413+
414+
metadata {
415+
name = "aws-auth"
416+
namespace = "kube-system"
417+
}
418+
419+
data = local.aws_auth_configmap_data
418420

419421
lifecycle {
420422
# We are ignoring the data here since we will manage it with the resource below
@@ -426,52 +428,17 @@ resource "kubernetes_config_map" "aws_auth" {
426428
resource "kubernetes_config_map_v1_data" "aws_auth" {
427429
count = var.create && var.manage_aws_auth_configmap ? 1 : 0
428430

429-
force = false
431+
force = true
430432

431433
metadata {
432434
name = "aws-auth"
433435
namespace = "kube-system"
434436
}
435437

436-
data = {
437-
mapRoles = yamlencode(concat(
438-
[for role_arn in local.node_iam_role_arns_non_windows : {
439-
rolearn = role_arn
440-
username = "system:node:{{EC2PrivateDNSName}}"
441-
groups = [
442-
"system:bootstrappers",
443-
"system:nodes",
444-
]
445-
}
446-
],
447-
[for role_arn in local.node_iam_role_arns_windows : {
448-
rolearn = role_arn
449-
username = "system:node:{{EC2PrivateDNSName}}"
450-
groups = [
451-
"eks:kube-proxy-windows",
452-
"system:bootstrappers",
453-
"system:nodes",
454-
]
455-
}
456-
],
457-
# Fargate profile
458-
[for role_arn in local.fargate_profile_pod_execution_role_arns : {
459-
rolearn = role_arn
460-
username = "system:node:{{SessionName}}"
461-
groups = [
462-
"system:bootstrappers",
463-
"system:nodes",
464-
"system:node-proxier",
465-
]
466-
}
467-
],
468-
var.aws_auth_roles
469-
))
470-
mapUsers = yamlencode(var.aws_auth_users)
471-
mapAccounts = yamlencode(var.aws_auth_accounts)
472-
}
438+
data = local.aws_auth_configmap_data
473439

474440
depends_on = [
441+
# Required for instances where the configmap does not exist yet to avoid race condition
475442
kubernetes_config_map.aws_auth,
476443
]
477444
}

0 commit comments

Comments
 (0)