Skip to content

Commit d0b684b

Browse files
authored
fix replace wait errors and rate limiting (rancher#30)
This is a decent-sized refactor, primarily in how the controller is setup, that I think enhances readability. Additionally, wrangler has been upgraded which now supports openapi schema creation so norman has been dropped. Primarily, however, this change is meant to address rancher#25 (and possibly rancher#36) caused by a bug in wrangler that: - prevented the controller from properly replacing jobs that could not be patched (pod tempalte spec changes) - prevented the controller from replacing a job generated with the `apply.WithNoDelete` option - prevented the controller from processing external plan edits in a timely manner (no more long waits due to spurious replace-wait errors) Fixes rancher#25 Fixes rancher#34 Depends on rancher/wrangler#66
1 parent 0d5d520 commit d0b684b

File tree

935 files changed

+50936
-51179
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

935 files changed

+50936
-51179
lines changed

Dockerfile.dapper

+1
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@ ENV DAPPER_OUTPUT ./bin ./dist
3939
ENV DAPPER_DOCKER_SOCKET true
4040
ENV HOME ${DAPPER_SOURCE}
4141
ENV KUBECONFIG /root/.kube/config
42+
ENV KUBEHOST 172.17.0.1
4243
ENV GO111MODULE off
4344
WORKDIR ${DAPPER_SOURCE}
4445

Makefile

+2-1
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,8 @@ TARGETS := $(shell ls scripts)
1010
$(TARGETS): .dapper
1111
./.dapper $@
1212

13-
e2e: | e2e-sonobuoy e2e-verify
13+
e2e: e2e-sonobuoy
14+
$(MAKE) e2e-verify
1415

1516
clean:
1617
rm -rvf ./bin ./dist

e2e/cluster/local/images/k3s/Dockerfile

+1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ RUN set -x \
1919
&& k3s --version
2020

2121
COPY scratch/*-${ARCH}.tar /var/lib/rancher/k3s/agent/images/
22+
#ADD https://github.com/rancher/k3s/releases/download/${K3S_VERSION}/k3s-airgap-images-${ARCH}.tar /var/lib/rancher/k3s/agent/images/
2223

2324
VOLUME /var/lib/cni
2425
VOLUME /var/lib/kubelet

e2e/cluster/local/scripts/cluster-get-kubeconfig

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,5 +5,5 @@ set -e -o pipefail
55
cd $(dirname $0)/..
66

77
mkdir -vp "$(dirname ${KUBECONFIG:="${HOME}/.kube/config"})"
8-
docker exec local-leader kubectl config view --raw | sed -e 's/127.0.0.1/172.17.0.1/g' > "${KUBECONFIG}"
8+
docker exec local-leader kubectl config view --raw | sed -e "s/127.0.0.1/${KUBEHOST:=127.0.0.1}/g" > "${KUBECONFIG}"
99
#echo "${KUBECONFIG}" >&2

e2e/framework/controller/deployment.go

+1-2
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@ import (
88
appsv1 "k8s.io/api/apps/v1"
99
corev1 "k8s.io/api/core/v1"
1010
clientset "k8s.io/client-go/kubernetes"
11-
schedulerapi "k8s.io/kubernetes/pkg/scheduler/api"
1211
e2eframework "k8s.io/kubernetes/test/e2e/framework"
1312
e2edeployment "k8s.io/kubernetes/test/e2e/framework/deployment"
1413
)
@@ -59,7 +58,7 @@ func DeploymentWithTolerations(toleration ...corev1.Toleration) DeploymentOption
5958
func DeploymentDefaultTolerations() DeploymentOption {
6059
return DeploymentWithTolerations(
6160
corev1.Toleration{
62-
Key: schedulerapi.TaintNodeUnschedulable,
61+
Key: corev1.TaintNodeUnschedulable,
6362
Operator: corev1.TolerationOpExists,
6463
Effect: corev1.TaintEffectNoSchedule,
6564
},

e2e/framework/framework.go

+32-5
Original file line numberDiff line numberDiff line change
@@ -7,14 +7,17 @@ import (
77

88
"github.com/onsi/ginkgo"
99
"github.com/rancher/system-upgrade-controller/e2e/framework/controller"
10+
upgradeapi "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io"
1011
upgradeapiv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1"
11-
upgrade "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned"
12+
upgradecln "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned"
1213
upgradescheme "github.com/rancher/system-upgrade-controller/pkg/generated/clientset/versioned/scheme"
1314
"github.com/rancher/wrangler/pkg/condition"
1415
appsv1 "k8s.io/api/apps/v1"
16+
batchv1 "k8s.io/api/batch/v1"
1517
corev1 "k8s.io/api/core/v1"
1618
rbacv1 "k8s.io/api/rbac/v1"
1719
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
20+
"k8s.io/apimachinery/pkg/labels"
1821
"k8s.io/apimachinery/pkg/runtime/schema"
1922
"k8s.io/apimachinery/pkg/util/wait"
2023
"k8s.io/apimachinery/pkg/watch"
@@ -38,7 +41,7 @@ type Options struct {
3841
type Client struct {
3942
framework.Framework
4043

41-
UpgradeClientSet *upgrade.Clientset
44+
UpgradeClientSet *upgradecln.Clientset
4245

4346
controllerDeployment *appsv1.Deployment
4447
controllerServiceAccount *corev1.ServiceAccount
@@ -116,8 +119,8 @@ func (c *Client) DeletePlans(options *metav1.DeleteOptions, listOpts metav1.List
116119
return c.UpgradeClientSet.UpgradeV1().Plans(c.Namespace.Name).DeleteCollection(options, listOpts)
117120
}
118121

119-
func (c *Client) PollPlanCondition(name string, cond condition.Cond, interval, timeout time.Duration) (plan *upgradeapiv1.Plan, err error) {
120-
return plan, wait.Poll(interval, timeout, func() (bool, error) {
122+
func (c *Client) WaitForPlanCondition(name string, cond condition.Cond, timeout time.Duration) (plan *upgradeapiv1.Plan, err error) {
123+
return plan, wait.Poll(time.Second, timeout, func() (bool, error) {
121124
plan, err = c.GetPlan(name, metav1.GetOptions{})
122125
if err != nil {
123126
return false, err
@@ -126,6 +129,30 @@ func (c *Client) PollPlanCondition(name string, cond condition.Cond, interval, t
126129
})
127130
}
128131

132+
func (c *Client) WaitForPlanJobs(plan *upgradeapiv1.Plan, count int, timeout time.Duration) (jobs []batchv1.Job, err error) {
133+
complete := condition.Cond(batchv1.JobComplete)
134+
failed := condition.Cond(batchv1.JobFailed)
135+
136+
labelSelector := labels.SelectorFromSet(labels.Set{
137+
upgradeapi.LabelPlan: plan.Name,
138+
})
139+
140+
return jobs, wait.Poll(5*time.Second, timeout, func() (bool, error) {
141+
list, err := c.ClientSet.BatchV1().Jobs(plan.Namespace).List(metav1.ListOptions{
142+
LabelSelector: labelSelector.String(),
143+
})
144+
if err != nil {
145+
return false, err
146+
}
147+
for _, item := range list.Items {
148+
if failed.IsTrue(&item) || complete.IsTrue(&item) {
149+
jobs = append(jobs, item)
150+
}
151+
}
152+
return len(jobs) >= count, nil
153+
})
154+
}
155+
129156
func (c *Client) BeforeEach() {
130157
c.beforeFramework()
131158
c.Framework.BeforeEach()
@@ -204,7 +231,7 @@ func (c *Client) beforeFramework() {
204231
config.NegotiatedSerializer = upgradescheme.Codecs
205232
}
206233

207-
c.UpgradeClientSet, err = upgrade.NewForConfig(config)
234+
c.UpgradeClientSet, err = upgradecln.NewForConfig(config)
208235
framework.ExpectNoError(err)
209236

210237
restClient, err := rest.RESTClientFor(config)

e2e/suite/job_generate_test.go

+62
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
package suite_test
2+
3+
import (
4+
"time"
5+
6+
batchv1 "k8s.io/api/batch/v1"
7+
8+
"github.com/rancher/system-upgrade-controller/e2e/framework"
9+
upgradeapiv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1"
10+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
11+
)
12+
13+
var _ = Describe("Job Generation", func() {
14+
e2e := framework.New("generate")
15+
16+
When("fails because of a bad plan", func() {
17+
var (
18+
err error
19+
plan *upgradeapiv1.Plan
20+
jobs []batchv1.Job
21+
)
22+
BeforeEach(func() {
23+
plan = e2e.NewPlan("fail-then-succeed-", "library/alpine:3.11", []string{"sh", "-c"}, "exit 1")
24+
plan.Spec.Version = "latest"
25+
plan.Spec.Concurrency = 1
26+
plan.Spec.NodeSelector = &metav1.LabelSelector{
27+
MatchExpressions: []metav1.LabelSelectorRequirement{{
28+
Key: "node-role.kubernetes.io/master",
29+
Operator: metav1.LabelSelectorOpDoesNotExist,
30+
}},
31+
}
32+
plan, err = e2e.CreatePlan(plan)
33+
Expect(err).ToNot(HaveOccurred())
34+
35+
plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second)
36+
Expect(err).ToNot(HaveOccurred())
37+
Expect(upgradeapiv1.PlanLatestResolved.IsTrue(plan)).To(BeTrue())
38+
39+
jobs, err = e2e.WaitForPlanJobs(plan, 1, 120*time.Second)
40+
Expect(err).ToNot(HaveOccurred())
41+
Expect(jobs).To(HaveLen(1))
42+
Expect(jobs[0].Status.Succeeded).To(BeNumerically("==", 0))
43+
Expect(jobs[0].Status.Failed).To(BeNumerically(">=", 1))
44+
45+
plan, err = e2e.GetPlan(plan.Name, metav1.GetOptions{})
46+
Expect(err).ToNot(HaveOccurred())
47+
48+
plan.Spec.Upgrade.Args = []string{"exit 0"}
49+
plan, err = e2e.UpdatePlan(plan)
50+
Expect(err).ToNot(HaveOccurred())
51+
52+
jobs, err = e2e.WaitForPlanJobs(plan, 1, 120*time.Second)
53+
Expect(err).ToNot(HaveOccurred())
54+
Expect(jobs).To(HaveLen(1))
55+
})
56+
It("should apply successfully after edit", func() {
57+
Expect(jobs).To(HaveLen(1))
58+
Expect(jobs[0].Status.Succeeded).To(BeNumerically("==", 1))
59+
Expect(jobs[0].Status.Failed).To(BeNumerically("==", 0))
60+
})
61+
})
62+
})

e2e/suite/plan_resolve_test.go

+21-16
Original file line numberDiff line numberDiff line change
@@ -9,19 +9,20 @@ import (
99
upgradeapiv1 "github.com/rancher/system-upgrade-controller/pkg/apis/upgrade.cattle.io/v1"
1010
)
1111

12-
var _ = Describe("Upgrade", func() {
12+
var _ = Describe("Plan Resolution", func() {
1313
e2e := framework.New("resolve")
1414

15-
When("plan missing channel and version", func() {
15+
When("missing channel and version", func() {
1616
var (
1717
err error
18-
plan = e2e.NewPlan("missing-", "", nil)
18+
plan *upgradeapiv1.Plan
1919
)
2020
BeforeEach(func() {
21+
plan = e2e.NewPlan("missing-", "", nil)
2122
plan, err = e2e.CreatePlan(plan)
2223
Expect(err).ToNot(HaveOccurred())
2324

24-
plan, err = e2e.PollPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 2*time.Second, 30*time.Second)
25+
plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second)
2526
Expect(err).ToNot(HaveOccurred())
2627
})
2728
It("should not resolve", func() {
@@ -31,18 +32,19 @@ var _ = Describe("Upgrade", func() {
3132
})
3233
})
3334

34-
When("plan has version", func() {
35+
When("has version", func() {
3536
var (
3637
err error
37-
plan = e2e.NewPlan("version-", "", nil)
38+
plan *upgradeapiv1.Plan
3839
)
3940
BeforeEach(func() {
41+
plan = e2e.NewPlan("version-", "", nil)
4042
plan.Spec.Version = "test"
4143

4244
plan, err = e2e.CreatePlan(plan)
4345
Expect(err).ToNot(HaveOccurred())
4446

45-
plan, err = e2e.PollPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 2*time.Second, 30*time.Second)
47+
plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second)
4648
Expect(err).ToNot(HaveOccurred())
4749
})
4850
It("should resolve", func() {
@@ -53,19 +55,20 @@ var _ = Describe("Upgrade", func() {
5355
})
5456
})
5557

56-
When("plan has version with semver+metadata", func() {
58+
When("has version with semver+metadata", func() {
5759
var (
5860
err error
59-
plan = e2e.NewPlan("version-semver-metadata-", "", nil)
61+
plan *upgradeapiv1.Plan
6062
semver = "v1.2.3+test"
6163
)
6264
BeforeEach(func() {
65+
plan = e2e.NewPlan("version-semver-metadata-", "", nil)
6366
plan.Spec.Version = semver
6467

6568
plan, err = e2e.CreatePlan(plan)
6669
Expect(err).ToNot(HaveOccurred())
6770

68-
plan, err = e2e.PollPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 2*time.Second, 30*time.Second)
71+
plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second)
6972
Expect(err).ToNot(HaveOccurred())
7073
})
7174
It("should resolve", func() {
@@ -78,22 +81,23 @@ var _ = Describe("Upgrade", func() {
7881
})
7982
})
8083

81-
When("plan has channel", func() {
84+
When("has channel", func() {
8285
var (
8386
err error
84-
plan = e2e.NewPlan("channel-", "", nil)
87+
plan *upgradeapiv1.Plan
8588
channelSrv *httptest.Server
8689
channelTag = "test"
8790
)
8891
BeforeEach(func() {
8992
channelSrv = framework.ChannelServer(path.Join("/local", channelTag))
93+
plan = e2e.NewPlan("channel-", "", nil)
9094
plan.Spec.Channel = channelSrv.URL
9195
Expect(plan.Spec.Channel).ToNot(BeEmpty())
9296

9397
plan, err = e2e.CreatePlan(plan)
9498
Expect(err).ToNot(HaveOccurred())
9599

96-
plan, err = e2e.PollPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 2*time.Second, 30*time.Second)
100+
plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second)
97101
Expect(err).ToNot(HaveOccurred())
98102
})
99103
AfterEach(func() {
@@ -109,22 +113,23 @@ var _ = Describe("Upgrade", func() {
109113
})
110114
})
111115

112-
When("plan has channel with semver+metadata", func() {
116+
When("has channel with semver+metadata", func() {
113117
var (
114118
err error
115-
plan = e2e.NewPlan("channel-semver-metadata-", "", nil)
119+
plan *upgradeapiv1.Plan
116120
channelSrv *httptest.Server
117121
channelTag = "v1.2.3+test"
118122
)
119123
BeforeEach(func() {
120124
channelSrv = framework.ChannelServer(path.Join("/local/test", channelTag))
125+
plan = e2e.NewPlan("channel-semver-metadata-", "", nil)
121126
plan.Spec.Channel = channelSrv.URL
122127
Expect(plan.Spec.Channel).ToNot(BeEmpty())
123128

124129
plan, err = e2e.CreatePlan(plan)
125130
Expect(err).ToNot(HaveOccurred())
126131

127-
plan, err = e2e.PollPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 2*time.Second, 30*time.Second)
132+
plan, err = e2e.WaitForPlanCondition(plan.Name, upgradeapiv1.PlanLatestResolved, 30*time.Second)
128133
Expect(err).ToNot(HaveOccurred())
129134
})
130135
AfterEach(func() {

go.mod

+28-29
Original file line numberDiff line numberDiff line change
@@ -5,40 +5,39 @@ go 1.13
55
require (
66
github.com/onsi/ginkgo v1.10.1
77
github.com/onsi/gomega v1.7.0
8-
github.com/rancher/norman v0.0.0-20191015045301-1fdd5cbabdd9
9-
github.com/rancher/wrangler v0.3.1
8+
github.com/rancher/wrangler v0.4.2-0.20200225192203-e5307acd8846
109
github.com/rancher/wrangler-api v0.2.1-0.20191025043713-b1ca9c21825a
1110
github.com/sirupsen/logrus v1.4.2
1211
github.com/urfave/cli v1.22.2
13-
k8s.io/api v0.0.0
14-
k8s.io/apimachinery v0.0.0
12+
k8s.io/api v0.17.2
13+
k8s.io/apimachinery v0.17.2
1514
k8s.io/client-go v11.0.1-0.20190409021438-1a26190bd76a+incompatible
16-
k8s.io/kubernetes v1.16.3
15+
k8s.io/kubernetes v1.17.2
1716
)
1817

1918
replace (
20-
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.16.3-k3s.2
21-
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.16.3-k3s.2
22-
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.16.3-k3s.2
23-
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.16.3-k3s.2
24-
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.16.3-k3s.2
25-
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.16.3-k3s.2
26-
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.16.3-k3s.2
27-
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.16.3-k3s.2
28-
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.16.3-k3s.2
29-
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.16.3-k3s.2
30-
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.16.3-k3s.2
31-
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.16.3-k3s.2
32-
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.16.3-k3s.2
33-
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.16.3-k3s.2
34-
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.16.3-k3s.2
35-
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.16.3-k3s.2
36-
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.16.3-k3s.2
37-
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.16.3-k3s.2
38-
k8s.io/kubernetes => github.com/rancher/kubernetes v1.16.3-k3s.2
39-
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.16.3-k3s.2
40-
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.16.3-k3s.2
41-
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.16.3-k3s.2
42-
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.16.3-k3s.2
43-
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.16.3-k3s.2
19+
k8s.io/api => github.com/rancher/kubernetes/staging/src/k8s.io/api v1.17.2-k3s1
20+
k8s.io/apiextensions-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiextensions-apiserver v1.17.2-k3s1
21+
k8s.io/apimachinery => github.com/rancher/kubernetes/staging/src/k8s.io/apimachinery v1.17.2-k3s1
22+
k8s.io/apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/apiserver v1.17.2-k3s1
23+
k8s.io/cli-runtime => github.com/rancher/kubernetes/staging/src/k8s.io/cli-runtime v1.17.2-k3s1
24+
k8s.io/client-go => github.com/rancher/kubernetes/staging/src/k8s.io/client-go v1.17.2-k3s1
25+
k8s.io/cloud-provider => github.com/rancher/kubernetes/staging/src/k8s.io/cloud-provider v1.17.2-k3s1
26+
k8s.io/cluster-bootstrap => github.com/rancher/kubernetes/staging/src/k8s.io/cluster-bootstrap v1.17.2-k3s1
27+
k8s.io/code-generator => github.com/rancher/kubernetes/staging/src/k8s.io/code-generator v1.17.2-k3s1
28+
k8s.io/component-base => github.com/rancher/kubernetes/staging/src/k8s.io/component-base v1.17.2-k3s1
29+
k8s.io/cri-api => github.com/rancher/kubernetes/staging/src/k8s.io/cri-api v1.17.2-k3s1
30+
k8s.io/csi-translation-lib => github.com/rancher/kubernetes/staging/src/k8s.io/csi-translation-lib v1.17.2-k3s1
31+
k8s.io/kube-aggregator => github.com/rancher/kubernetes/staging/src/k8s.io/kube-aggregator v1.17.2-k3s1
32+
k8s.io/kube-controller-manager => github.com/rancher/kubernetes/staging/src/k8s.io/kube-controller-manager v1.17.2-k3s1
33+
k8s.io/kube-proxy => github.com/rancher/kubernetes/staging/src/k8s.io/kube-proxy v1.17.2-k3s1
34+
k8s.io/kube-scheduler => github.com/rancher/kubernetes/staging/src/k8s.io/kube-scheduler v1.17.2-k3s1
35+
k8s.io/kubectl => github.com/rancher/kubernetes/staging/src/k8s.io/kubectl v1.17.2-k3s1
36+
k8s.io/kubelet => github.com/rancher/kubernetes/staging/src/k8s.io/kubelet v1.17.2-k3s1
37+
k8s.io/kubernetes => github.com/rancher/kubernetes v1.17.2-k3s1
38+
k8s.io/legacy-cloud-providers => github.com/rancher/kubernetes/staging/src/k8s.io/legacy-cloud-providers v1.17.2-k3s1
39+
k8s.io/metrics => github.com/rancher/kubernetes/staging/src/k8s.io/metrics v1.17.2-k3s1
40+
k8s.io/sample-apiserver => github.com/rancher/kubernetes/staging/src/k8s.io/sample-apiserver v1.17.2-k3s1
41+
k8s.io/sample-cli-plugin => github.com/rancher/kubernetes/staging/src/k8s.io/sample-cli-plugin v1.17.2-k3s1
42+
k8s.io/sample-controller => github.com/rancher/kubernetes/staging/src/k8s.io/sample-controller v1.17.2-k3s1
4443
)

0 commit comments

Comments
 (0)