Skip to content

Commit 1731f2a

Browse files
jschwinger233pchaigno
authored andcommitted
contrib: Set IPv6 for Kubenetes nodeIP on dual-stack dev VM
The development setup using the provided Vagrantfile will give a dual-stack Kubernetes cluster in the dev VM, but a hostNetwork pod deployed on it cannot show an IPv6 address. This is because the kubelet is deployed using the incomplete `--node-ip` parameter: to enable dual-stack mode, we must pass both IPv6 address and IPv4 address via `--node-ip` to kubelet. This commit completes the `--node-ip` parameter and fixes the issue. Fixes: cilium#23503 Signed-off-by: Zhichuan Liang <[email protected]>
1 parent 3aa42ea commit 1731f2a

File tree

3 files changed

+26
-9
lines changed

3 files changed

+26
-9
lines changed

Vagrantfile

+8-7
Original file line numberDiff line numberDiff line change
@@ -201,6 +201,7 @@ Vagrant.configure(2) do |config|
201201
master_vm_name = "#{$vm_base_name}1#{$build_id_name}#{$vm_kernel}"
202202
config.vm.define master_vm_name, primary: true do |cm|
203203
node_ip = "#{$master_ip}"
204+
node_ipv6 = "#{$master_ipv6}"
204205
cm.vm.network "forwarded_port", guest: 6443, host: 7443, auto_correct: true
205206
cm.vm.network "forwarded_port", guest: 9081, host: 9081, auto_correct: true
206207
# 2345 is the default delv server port
@@ -241,7 +242,7 @@ Vagrant.configure(2) do |config|
241242
cm.vm.provision "k8s-install-master-part-1",
242243
type: "shell",
243244
run: "always",
244-
env: {"node_ip" => node_ip},
245+
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
245246
privileged: true,
246247
path: k8sinstall
247248
end
@@ -255,7 +256,7 @@ Vagrant.configure(2) do |config|
255256
cm.vm.provision "k8s-install-master-part-2",
256257
type: "shell",
257258
run: "always",
258-
env: {"node_ip" => node_ip},
259+
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
259260
privileged: true,
260261
path: k8sinstall
261262
end
@@ -268,16 +269,16 @@ Vagrant.configure(2) do |config|
268269
node_hostname = "#{$vm_base_name}#{n+2}"
269270
config.vm.define node_vm_name do |node|
270271
node_ip = $workers_ipv4_addrs[n]
272+
node_ipv6 = $workers_ipv6_addrs[n]
271273
node.vm.network "private_network", ip: "#{node_ip}",
272274
virtualbox__intnet: "cilium-test-#{$build_id}"
273275
nfs_ipv4_addr = $workers_ipv4_addrs_nfs[n]
274-
ipv6_addr = $workers_ipv6_addrs[n]
275276
node.vm.network "private_network", ip: "#{nfs_ipv4_addr}", bridge: "enp0s9"
276277
# Add IPv6 address this way or we get hit by a virtualbox bug
277278
node.vm.provision "ipv6-config",
278279
type: "shell",
279280
run: "always",
280-
inline: "ip -6 a a #{ipv6_addr}/16 dev enp0s9"
281+
inline: "ip -6 a a #{node_ipv6}/16 dev enp0s9"
281282

282283
# Interface for the IPv6 NAT Service. The IP address doesn't matter
283284
# as it won't be used. We use an IPv4 address as newer versions of
@@ -294,7 +295,7 @@ Vagrant.configure(2) do |config|
294295
inline: "ip -6 r a default via fd17:625c:f037:2::1 dev enp0s10 || true"
295296

296297
if ENV["IPV6_EXT"] then
297-
node_ip = "#{ipv6_addr}"
298+
node_ip = "#{node_ipv6}"
298299
end
299300
node.vm.hostname = "#{node_hostname}"
300301
if ENV['CILIUM_TEMP'] then
@@ -303,7 +304,7 @@ Vagrant.configure(2) do |config|
303304
node.vm.provision "k8s-install-node-part-1",
304305
type: "shell",
305306
run: "always",
306-
env: {"node_ip" => node_ip},
307+
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
307308
privileged: true,
308309
path: k8sinstall
309310
end
@@ -314,7 +315,7 @@ Vagrant.configure(2) do |config|
314315
node.vm.provision "k8s-install-node-part-2",
315316
type: "shell",
316317
run: "always",
317-
env: {"node_ip" => node_ip},
318+
env: {"node_ip" => node_ip, "node_ipv6" => node_ipv6},
318319
privileged: true,
319320
path: k8sinstall
320321
end

contrib/k8s/k8s-extract-clustermesh-nodeport-secret.sh

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ set -e
77

88
NAMESPACE=$(kubectl get pod -l k8s-app=clustermesh-apiserver -o jsonpath='{.items[0].metadata.namespace}' --all-namespaces)
99
NODE_NAME=$(kubectl -n $NAMESPACE get pod -l k8s-app=clustermesh-apiserver -o jsonpath='{.items[0].spec.nodeName}')
10-
NODE_IP=$(kubectl -n $NAMESPACE get node $NODE_NAME -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}')
10+
NODE_IP=$(kubectl -n $NAMESPACE get node $NODE_NAME -o jsonpath='{.status.addresses[?(@.type=="InternalIP")].address}' | awk '{print $1}')
1111
NODE_PORT=$(kubectl -n $NAMESPACE get svc clustermesh-apiserver -o jsonpath='{.spec.ports[0].nodePort}')
1212
CLUSTER_NAME=$(kubectl -n $NAMESPACE get cm cilium-config -o jsonpath='{.data.cluster-name}')
1313
# TODO: once v1.10 is the minimum version supported, we can replace the

contrib/vagrant/scripts/03-install-kubernetes-worker.sh

+17-1
Original file line numberDiff line numberDiff line change
@@ -65,6 +65,22 @@ EOF
6565
sudo docker ps
6666
}
6767

68+
# node_ip_addresses returns the parameter for kubelet --node-ip
69+
# need to cover 3 scenarios:
70+
# 1. $node_ipv6 == "", this happens when ipv6 is disabled
71+
# 2. $node_ip == $node_ipv6, this happens when IPV6_EXT=1
72+
# 3. $node_ip != $node_ipv6 && $node_ipv6 != ""
73+
# we concatenate two vars on scenario 3 and return the non-empty var for the others
74+
function node_ip_addresses() {
75+
if [[ -z "$node_ipv6" ]]; then
76+
echo -n $node_ip
77+
elif [[ "$node_ipv6" == "$node_ip" ]]; then
78+
echo -n $node_ipv6
79+
else
80+
echo -n "$node_ip,$node_ipv6"
81+
fi
82+
}
83+
6884
log "Installing kubernetes worker components..."
6985

7086
set -e
@@ -335,7 +351,7 @@ ExecStart=/usr/bin/kubelet \\
335351
--kubeconfig=/var/lib/kubelet/kubelet.kubeconfig \\
336352
--fail-swap-on=false \\
337353
--make-iptables-util-chains=false \\
338-
--node-ip=${node_ip} \\
354+
--node-ip=$(node_ip_addresses) \\
339355
--register-node=true \\
340356
--serialize-image-pulls=false \\
341357
--tls-cert-file=/var/lib/kubelet/kubelet-kubelet-${hostname}.pem \\

0 commit comments

Comments
 (0)