diff --git a/docs/faq.md b/docs/faq.md index fe3903b6d7..9fba65b7fa 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -197,3 +197,9 @@ Reference the `--node-labels` argument for your version of Kubenetes for the all `worker_groups` are [self-managed nodes](https://docs.aws.amazon.com/eks/latest/userguide/worker.html) (provisions a typical "Autoscaling group" on EC2). It gives you full control over nodes in the cluster like using custom AMI for the nodes. As AWS says, "with worker groups the customer controls the data plane & AWS controls the control plane". Both can be used together in the same cluster. + +## I'm using both AWS-Managed node groups and Self-Managed worker groups and pods scheduled on a AWS Managed node groups are unable resolve DNS (even communication between pods) + +This happen because Core DNS can be scheduled on Self-Managed worker groups and by default, the terraform module doesn't create security group rules to ensure communication between pods schedulled on Self-Managed worker group and AWS-Managed node groups. + +You can set `var.worker_create_cluster_primary_security_group_rules` to `true` to create required rules. diff --git a/examples/managed_node_groups/main.tf b/examples/managed_node_groups/main.tf index 6d66aa21ea..eee25b9e01 100644 --- a/examples/managed_node_groups/main.tf +++ b/examples/managed_node_groups/main.tf @@ -112,6 +112,21 @@ module "eks" { } } + # Create security group rules to allow communication between pods on workers and pods in managed node groups. + # Set this to true if you have AWS-Managed node groups and Self-Managed worker groups. + # See https://github.com/terraform-aws-modules/terraform-aws-eks/issues/1089 + + # worker_create_cluster_primary_security_group_rules = true + + # worker_groups_launch_template = [ + # { + # name = "worker-group-1" + # instance_type = "t2.small" + # asg_desired_capacity = 2 + # public_ip = true + # } + # ] + map_roles = var.map_roles map_users = var.map_users map_accounts = var.map_accounts