diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 19e96fdf2..ae394094f 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -9,7 +9,7 @@ on: - main - 'release-*' env: - K8S_VERSION: 1.26.1 + K8S_VERSION: 1.27.1 permissions: contents: read diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 15a88db5b..7c4984181 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -29,10 +29,6 @@ jobs: targetjob: e2e-dsa runner: simics-gnr images: intel-dsa-plugin intel-idxd-config-initcontainer accel-config-demo intel-deviceplugin-operator - - name: e2e-dlb-gnr - targetjob: e2e-dlb - runner: simics-gnr - images: intel-dlb-plugin intel-dlb-initcontainer dlb-libdlb-demo - name: e2e-fpga runner: fpga images: intel-fpga-plugin intel-fpga-initcontainer intel-fpga-admissionwebhook opae-nlb-demo diff --git a/Jenkinsfile b/Jenkinsfile index 44ea0440a..78f304752 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -8,7 +8,7 @@ pipeline { environment { GO111MODULE="on" REG="cloud-native-image-registry.westus.cloudapp.azure.com/" - K8S_VERSION="1.26.1" + K8S_VERSION="1.27.1" GOLANGCI_LINT_VERSION="v1.52.2" GO_VERSION="1.20" GO_TAR="go${GO_VERSION}.linux-amd64.tar.gz" diff --git a/Makefile b/Makefile index 073c96f16..dfe44790d 100644 --- a/Makefile +++ b/Makefile @@ -140,28 +140,28 @@ TAG?=devel export TAG e2e-fpga: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "FPGA" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "FPGA" -delete-namespace-on-failure=false e2e-qat: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "QAT Gen2" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "QAT Gen2" -delete-namespace-on-failure=false e2e-qat4: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "QAT Gen4" -ginkgo.skip "dpdk crypto-perf" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "QAT Gen4" -ginkgo.skip "dpdk crypto-perf" -delete-namespace-on-failure=false e2e-sgx: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "SGX" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "SGX" -delete-namespace-on-failure=false e2e-gpu: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "GPU" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "GPU" -delete-namespace-on-failure=false e2e-dsa: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "DSA" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "DSA" -delete-namespace-on-failure=false e2e-iaa: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "IAA" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "IAA" -delete-namespace-on-failure=false e2e-dlb: - @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.progress -ginkgo.focus "DLB" -delete-namespace-on-failure=false + @$(GO) test -v ./test/e2e/... -ginkgo.v -ginkgo.show-node-events -ginkgo.focus "DLB" -delete-namespace-on-failure=false terrascan: @ls deployments/*/kustomization.yaml | while read f ; \ diff --git a/cmd/gpu_plugin/rm/gpu_plugin_resource_manager_test.go b/cmd/gpu_plugin/rm/gpu_plugin_resource_manager_test.go index cc5e6f542..8201c8c57 100644 --- a/cmd/gpu_plugin/rm/gpu_plugin_resource_manager_test.go +++ b/cmd/gpu_plugin/rm/gpu_plugin_resource_manager_test.go @@ -85,6 +85,12 @@ func (w *mockPodResources) GetAllocatableResources(ctx context.Context, return nil, nil } +func (w *mockPodResources) Get(ctx context.Context, + in *podresourcesv1.GetPodResourcesRequest, + opts ...grpc.CallOption) (*podresourcesv1.GetPodResourcesResponse, error) { + return nil, nil +} + func newMockResourceManager(pods []v1.Pod) ResourceManager { client, err := grpc.Dial("fake", grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 705fc76b6..c2f4b0a14 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -20,6 +20,7 @@ import ( "os" "strings" + corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" clientgoscheme "k8s.io/client-go/kubernetes/scheme" @@ -41,6 +42,7 @@ import ( "github.com/intel/intel-device-plugins-for-kubernetes/pkg/fpgacontroller" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/fpgacontroller/patcher" sgxwebhook "github.com/intel/intel-device-plugins-for-kubernetes/pkg/webhooks/sgx" + "sigs.k8s.io/controller-runtime/pkg/builder" ) var ( @@ -163,9 +165,13 @@ func main() { } if contains(devices, "sgx") { - mgr.GetWebhookServer().Register("/pods-sgx", &webhook.Admission{ - Handler: &sgxwebhook.Mutator{Client: mgr.GetClient()}, - }) + if err = builder.WebhookManagedBy(mgr). + For(&corev1.Pod{}). + WithDefaulter(&sgxwebhook.Mutator{}). + Complete(); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Pod") + os.Exit(1) + } } if contains(devices, "fpga") { diff --git a/cmd/sgx_admissionwebhook/main.go b/cmd/sgx_admissionwebhook/main.go index c73997506..0463dc43f 100644 --- a/cmd/sgx_admissionwebhook/main.go +++ b/cmd/sgx_admissionwebhook/main.go @@ -18,15 +18,15 @@ import ( "os" sgxwebhook "github.com/intel/intel-device-plugins-for-kubernetes/pkg/webhooks/sgx" - "k8s.io/apimachinery/pkg/runtime" + corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/builder" "sigs.k8s.io/controller-runtime/pkg/webhook" ) var ( - scheme = runtime.NewScheme() setupLog = ctrl.Log.WithName("setup") ) @@ -43,7 +43,6 @@ func main() { } mgr, err := ctrl.NewManager(ctrl.GetConfigOrDie(), ctrl.Options{ - Scheme: scheme, MetricsBindAddress: "0", Logger: ctrl.Log.WithName("SgxAdmissionWebhook"), WebhookServer: webHook, @@ -53,9 +52,13 @@ func main() { os.Exit(1) } - mgr.GetWebhookServer().Register("/pods-sgx", &webhook.Admission{ - Handler: &sgxwebhook.Mutator{Client: mgr.GetClient()}, - }) + if err := builder.WebhookManagedBy(mgr). + For(&corev1.Pod{}). + WithDefaulter(&sgxwebhook.Mutator{}). + Complete(); err != nil { + setupLog.Error(err, "unable to create webhook", "webhook", "Pod") + os.Exit(1) + } setupLog.Info("starting manager") diff --git a/deployments/operator/webhook/manifests.yaml b/deployments/operator/webhook/manifests.yaml index 862e809fc..a45c27943 100644 --- a/deployments/operator/webhook/manifests.yaml +++ b/deployments/operator/webhook/manifests.yaml @@ -171,7 +171,7 @@ webhooks: service: name: webhook-service namespace: system - path: /pods-sgx + path: /mutate--v1-pod failurePolicy: Ignore name: sgx.mutator.webhooks.intel.com reinvocationPolicy: IfNeeded diff --git a/deployments/sgx_admissionwebhook/webhook/manifests.yaml b/deployments/sgx_admissionwebhook/webhook/manifests.yaml index 7d2d85af3..685d40289 100644 --- a/deployments/sgx_admissionwebhook/webhook/manifests.yaml +++ b/deployments/sgx_admissionwebhook/webhook/manifests.yaml @@ -10,7 +10,7 @@ webhooks: service: name: webhook-service namespace: system - path: /pods-sgx + path: /mutate--v1-pod failurePolicy: Ignore name: sgx.mutator.webhooks.intel.com reinvocationPolicy: IfNeeded diff --git a/go.mod b/go.mod index 335006b72..1a3632de5 100644 --- a/go.mod +++ b/go.mod @@ -9,33 +9,38 @@ require ( github.com/google/go-cmp v0.5.9 github.com/google/gousb v1.1.2 github.com/klauspost/cpuid/v2 v2.2.4 - github.com/onsi/ginkgo/v2 v2.6.0 - github.com/onsi/gomega v1.24.1 + github.com/onsi/ginkgo/v2 v2.9.2 + github.com/onsi/gomega v1.27.6 github.com/pkg/errors v0.9.1 github.com/prometheus/client_model v0.4.0 github.com/prometheus/common v0.43.0 golang.org/x/sys v0.8.0 golang.org/x/text v0.9.0 google.golang.org/grpc v1.55.0 - k8s.io/api v0.26.1 - k8s.io/apimachinery v0.26.1 - k8s.io/client-go v1.5.2 - k8s.io/component-base v0.26.1 - k8s.io/klog/v2 v2.80.1 - k8s.io/kubelet v1.26.1 - k8s.io/kubernetes v1.26.1 + k8s.io/api v0.27.1 + k8s.io/apimachinery v0.27.1 + k8s.io/client-go v0.27.1 + k8s.io/component-base v0.27.1 + k8s.io/klog/v2 v2.90.1 + k8s.io/kubelet v1.27.1 + k8s.io/kubernetes v1.27.1 k8s.io/pod-security-admission v0.0.0 - k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 - sigs.k8s.io/controller-runtime v0.14.6 + k8s.io/utils v0.0.0-20230209194617-a36077c30491 + sigs.k8s.io/controller-runtime v0.15.0-alpha.0 sigs.k8s.io/yaml v1.3.0 ) require ( github.com/Microsoft/go-winio v0.4.17 // indirect + github.com/NYTimes/gziphandler v1.1.1 // indirect + github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect + github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/blang/semver/v4 v4.0.0 // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/coreos/go-semver v0.3.0 // indirect + github.com/coreos/go-systemd/v22 v22.4.0 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/docker/distribution v2.8.1+incompatible // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect @@ -43,22 +48,27 @@ require ( github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect - github.com/go-openapi/jsonpointer v0.19.5 // indirect - github.com/go-openapi/jsonreference v0.20.0 // indirect - github.com/go-openapi/swag v0.19.14 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonreference v0.20.1 // indirect + github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect + github.com/google/cel-go v0.12.6 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect + github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect - github.com/mailru/easyjson v0.7.6 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/mitchellh/mapstructure v1.4.1 // indirect github.com/moby/spdystream v0.2.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect @@ -68,7 +78,12 @@ require ( github.com/prometheus/procfs v0.9.0 // indirect github.com/spf13/cobra v1.6.0 // indirect github.com/spf13/pflag v1.0.5 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 // indirect + github.com/stoewer/go-strcase v1.2.0 // indirect + go.etcd.io/etcd/api/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/pkg/v3 v3.5.7 // indirect + go.etcd.io/etcd/client/v3 v3.5.7 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 // indirect go.opentelemetry.io/otel v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.10.0 // indirect @@ -77,60 +92,65 @@ require ( go.opentelemetry.io/otel/sdk v1.10.0 // indirect go.opentelemetry.io/otel/trace v1.10.0 // indirect go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.24.0 // indirect golang.org/x/crypto v0.1.0 // indirect golang.org/x/net v0.9.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect + golang.org/x/sync v0.1.0 // indirect golang.org/x/term v0.7.0 // indirect golang.org/x/time v0.3.0 // indirect + golang.org/x/tools v0.7.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect google.golang.org/protobuf v1.30.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.26.1 // indirect - k8s.io/apiserver v0.26.1 // indirect - k8s.io/component-helpers v0.26.1 // indirect - k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect + k8s.io/apiextensions-apiserver v0.27.1 // indirect + k8s.io/apiserver v0.27.1 // indirect + k8s.io/cloud-provider v0.0.0 // indirect + k8s.io/component-helpers v0.27.1 // indirect + k8s.io/controller-manager v0.27.1 // indirect + k8s.io/kms v0.27.1 // indirect + k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a // indirect k8s.io/kubectl v0.0.0 // indirect - sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 // indirect - sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect + sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 // indirect + sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect ) replace ( - k8s.io/api => k8s.io/api v0.26.1 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.1 - k8s.io/apimachinery => k8s.io/apimachinery v0.26.2-rc.0 - k8s.io/apiserver => k8s.io/apiserver v0.26.1 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.1 - k8s.io/client-go => k8s.io/client-go v0.26.1 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.1 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.1 - k8s.io/code-generator => k8s.io/code-generator v0.26.2-rc.0 - k8s.io/component-base => k8s.io/component-base v0.26.1 - k8s.io/component-helpers => k8s.io/component-helpers v0.26.1 - k8s.io/controller-manager => k8s.io/controller-manager v0.26.1 - k8s.io/cri-api => k8s.io/cri-api v0.26.2-rc.0 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.1 - k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.26.1 - k8s.io/kms => k8s.io/kms v0.26.2-rc.0 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.1 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.1 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.1 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.1 - k8s.io/kubectl => k8s.io/kubectl v0.26.1 - k8s.io/kubelet => k8s.io/kubelet v0.26.1 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.1 - k8s.io/metrics => k8s.io/metrics v0.26.1 - k8s.io/mount-utils => k8s.io/mount-utils v0.26.2-rc.0 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.1 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.1 - k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.26.1 - k8s.io/sample-controller => k8s.io/sample-controller v0.26.1 + k8s.io/api => k8s.io/api v0.27.1 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.27.1 + k8s.io/apimachinery => k8s.io/apimachinery v0.28.0-alpha.0 + k8s.io/apiserver => k8s.io/apiserver v0.27.1 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.27.1 + k8s.io/client-go => k8s.io/client-go v0.27.1 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.27.1 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.27.1 + k8s.io/code-generator => k8s.io/code-generator v0.27.1 + k8s.io/component-base => k8s.io/component-base v0.27.1 + k8s.io/component-helpers => k8s.io/component-helpers v0.27.1 + k8s.io/controller-manager => k8s.io/controller-manager v0.27.1 + k8s.io/cri-api => k8s.io/cri-api v0.28.0-alpha.0 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.27.1 + k8s.io/dynamic-resource-allocation => k8s.io/dynamic-resource-allocation v0.27.1 + k8s.io/kms => k8s.io/kms v0.27.1 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.27.1 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.27.1 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.27.1 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.27.1 + k8s.io/kubectl => k8s.io/kubectl v0.27.1 + k8s.io/kubelet => k8s.io/kubelet v0.27.1 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.27.1 + k8s.io/metrics => k8s.io/metrics v0.27.1 + k8s.io/mount-utils => k8s.io/mount-utils v0.27.1 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.27.1 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.27.1 + k8s.io/sample-cli-plugin => k8s.io/sample-cli-plugin v0.27.1 + k8s.io/sample-controller => k8s.io/sample-controller v0.27.1 ) - -replace github.com/onsi/gomega v1.24.1 => github.com/onsi/gomega v1.24.0 - -replace github.com/onsi/ginkgo/v2 v2.6.0 => github.com/onsi/ginkgo/v2 v2.4.0 diff --git a/go.sum b/go.sum index e306c1338..29cab82e6 100644 --- a/go.sum +++ b/go.sum @@ -13,12 +13,15 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/compute v1.15.1 h1:7UGq3QknM33pw5xATlpzeoomNxsacIVvTqTTvbfajmE= +cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -31,13 +34,21 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cqUQ3I= +github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 h1:yL7+Jz0jTC6yykIK/Wh74gnTJnrGr5AyrNMXuA0gves= +github.com/antlr/antlr4/runtime/Go/antlr v1.4.10/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= @@ -60,6 +71,10 @@ github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWH github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= +github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd/v22 v22.4.0 h1:y9YHcjnjynCd/DVbg5j9L/33jQM3MxJlbj/zWskzfGU= +github.com/coreos/go-systemd/v22 v22.4.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -68,7 +83,7 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -100,16 +115,18 @@ github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/zapr v1.2.3 h1:a9vnzlIBPQBBkeaR9IuMUfmVOrQlkoC4YfPoFkX3T7A= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= -github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng= -github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonreference v0.20.1 h1:FBLnyygC4/IZZr893oiomc9XaghoveYTrLC1F86HID8= +github.com/go-openapi/jsonreference v0.20.1/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= +github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.4.2 h1:rcc4lwaZgFMCZ5jxF9ABolDcIHdBytAFgqFPbSJQAYs= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= @@ -146,6 +163,9 @@ github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/cel-go v0.12.6 h1:kjeKudqV0OygrAqA9fX6J55S8gj+Jre2tckIm5RoG4M= +github.com/google/cel-go v0.12.6/go.mod h1:Jk7ljRzLBhkmiAwBoUxB1sZSCVBAzkqPF25olK/iRDw= github.com/google/gnostic v0.5.7-v3refs h1:FhTMOKj2VhjpouxvWJAV1TL304uMlb9zcDqkl6cEI54= github.com/google/gnostic v0.5.7-v3refs/go.mod h1:73MKFl6jIHelAJNaBGFzt3SPtZULs9dYrGFt8OiIsHQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -173,24 +193,33 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= github.com/inconshreveable/mousetrap v1.0.1 h1:U3uMjPSQEBMNp1lFxmllqCPM6P5u/Xq7Pgzkat/bFNc= github.com/inconshreveable/mousetrap v1.0.1/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -203,17 +232,18 @@ github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZX github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= +github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -223,11 +253,10 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= -github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.24.0 h1:+0glovB9Jd6z3VR+ScSwQqXVTIfJcGA9UBM8yzQxhqg= -github.com/onsi/gomega v1.24.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/ginkgo/v2 v2.9.2 h1:BA2GMJOtfGAfagzYtrAlufIP0lq6QERkFmHLMLPwFSU= +github.com/onsi/ginkgo/v2 v2.9.2/go.mod h1:WHcJJG2dIlcCqVfBAwUCrJxSPFb6v4azBwgxeMeDuts= +github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -249,31 +278,54 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= +github.com/soheilhy/cmux v0.1.5 h1:jjzc5WVemNEDTLwv9tlmemhC73tI08BNOIGwBOo10Js= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/cobra v1.6.0 h1:42a0n6jwCot1pUmomAp4T7DeMD+20LFv4Q54pxLf2LI= github.com/spf13/cobra v1.6.0/go.mod h1:IOw/AERYS7UzyrGinqmz6HLUo219MORXGxhbaJUqzrY= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/tmc/grpc-websocket-proxy v0.0.0-20220101234140-673ab2c3ae75 h1:6fotK7otjonDflCTK0BCfls4SPy3NcCVb5dqqmbRknE= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +go.etcd.io/bbolt v1.3.6 h1:/ecaJf0sk1l4l6V4awd65v2C3ILy7MSj+s/x1ADCIMU= +go.etcd.io/etcd/api/v3 v3.5.7 h1:sbcmosSVesNrWOJ58ZQFitHMdncusIifYcrBfwrlJSY= +go.etcd.io/etcd/api/v3 v3.5.7/go.mod h1:9qew1gCdDDLu+VwmeG+iFpL+QlpHTo7iubavdVDgCAA= +go.etcd.io/etcd/client/pkg/v3 v3.5.7 h1:y3kf5Gbp4e4q7egZdn5T7W9TSHUvkClN6u+Rq9mEOmg= +go.etcd.io/etcd/client/pkg/v3 v3.5.7/go.mod h1:o0Abi1MK86iad3YrWhgUsbGx1pmTS+hrORWc2CamuhY= +go.etcd.io/etcd/client/v2 v2.305.7 h1:AELPkjNR3/igjbO7CjyF1fPuVPjrblliiKj+Y6xSGOU= +go.etcd.io/etcd/client/v3 v3.5.7 h1:u/OhpiuCgYY8awOHlhIhmGIGpxfBU/GZBUP3m/3/Iz4= +go.etcd.io/etcd/client/v3 v3.5.7/go.mod h1:sOWmj9DZUMyAngS7QQwCyAXXAL6WhgTOPLNS/NabQgw= +go.etcd.io/etcd/pkg/v3 v3.5.7 h1:obOzeVwerFwZ9trMWapU/VjDcYUJb5OfgC1zqEGWO/0= +go.etcd.io/etcd/raft/v3 v3.5.7 h1:aN79qxLmV3SvIq84aNTliYGmjwsW6NqJSnqmI1HLJKc= +go.etcd.io/etcd/server/v3 v3.5.7 h1:BTBD8IJUV7YFgsczZMHhMTS67XuA4KpRquL0MFOJGRk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0 h1:Ajldaqhxqw/gNzQA45IKFWLdG7jZuXX/wBW1d5qvbUI= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0 h1:xFSRQBbXF6VvYRf2lqMJXxoB72XI1K/azav8TekHHSw= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.35.0/go.mod h1:h8TWwRAhQpOd0aM5nYsRD8+flnkj+526GEIVlarH7eY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1 h1:sxoY9kG1s1WpSYNyzm24rlwH4lnRYFXUVVBmKMBfRgw= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= go.opentelemetry.io/otel v1.10.0 h1:Y7DTJMR6zs1xkS/upamJYk0SxxN4C9AqRd77jmZnyY4= go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0 h1:TaB+1rQhddO1sF71MpZOZAuSPW1klK2M8XxfrBMfK7Y= @@ -292,9 +344,12 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= -go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= +go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -379,6 +434,8 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -472,6 +529,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.7.0 h1:W4OVu8VVOaIO0yzWMNdepAulS7YfoS3Zabrm8DOXXU4= +golang.org/x/tools v0.7.0/go.mod h1:4pg6aUX35JBAogB10C9AtvVL+qowtN4pT3CGSQex14s= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -572,11 +631,13 @@ google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -594,43 +655,49 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -k8s.io/api v0.26.1 h1:f+SWYiPd/GsiWwVRz+NbFyCgvv75Pk9NK6dlkZgpCRQ= -k8s.io/api v0.26.1/go.mod h1:xd/GBNgR0f707+ATNyPmQ1oyKSgndzXij81FzWGsejg= -k8s.io/apiextensions-apiserver v0.26.1 h1:cB8h1SRk6e/+i3NOrQgSFij1B2S0Y0wDoNl66bn8RMI= -k8s.io/apiextensions-apiserver v0.26.1/go.mod h1:AptjOSXDGuE0JICx/Em15PaoO7buLwTs0dGleIHixSM= -k8s.io/apimachinery v0.26.2-rc.0 h1:f9BARTuEy0MguW4KGK6VwEBT9BCe03lYde0wnWxBilk= -k8s.io/apimachinery v0.26.2-rc.0/go.mod h1:tnPmbONNJ7ByJNz9+n9kMjNP8ON+1qoAIIC70lztu74= -k8s.io/apiserver v0.26.1 h1:6vmnAqCDO194SVCPU3MU8NcDgSqsUA62tBUSWrFXhsc= -k8s.io/apiserver v0.26.1/go.mod h1:wr75z634Cv+sifswE9HlAo5FQ7UoUauIICRlOE+5dCg= -k8s.io/client-go v0.26.1 h1:87CXzYJnAMGaa/IDDfRdhTzxk/wzGZ+/HUQpqgVSZXU= -k8s.io/client-go v0.26.1/go.mod h1:IWNSglg+rQ3OcvDkhY6+QLeasV4OYHDjdqeWkDQZwGE= -k8s.io/component-base v0.26.1 h1:4ahudpeQXHZL5kko+iDHqLj/FSGAEUnSVO0EBbgDd+4= -k8s.io/component-base v0.26.1/go.mod h1:VHrLR0b58oC035w6YQiBSbtsf0ThuSwXP+p5dD/kAWU= -k8s.io/component-helpers v0.26.1 h1:Y5h1OYUJTGyHZlSAsc7mcfNsWF08S/MlrQyF/vn93mU= -k8s.io/component-helpers v0.26.1/go.mod h1:jxNTnHb1axLe93MyVuvKj9T/+f4nxBVrj/xf01/UNFk= -k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4= -k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E= -k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= -k8s.io/kubectl v0.26.1 h1:K8A0Jjlwg8GqrxOXxAbjY5xtmXYeYjLU96cHp2WMQ7s= -k8s.io/kubectl v0.26.1/go.mod h1:miYFVzldVbdIiXMrHZYmL/EDWwJKM+F0sSsdxsATFPo= -k8s.io/kubelet v0.26.1 h1:wQyCQYmLW6GN3v7gVTxnc3jAE4zMYDlzdF3FZV4rKas= -k8s.io/kubelet v0.26.1/go.mod h1:gFVZ1Ab4XdjtnYdVRATwGwku7FhTxo6LVEZwYoQaDT8= -k8s.io/kubernetes v1.26.1 h1:N+qxlptxpSU/VSLvqBGWyyw/kNhJRpEn1b5YP57+5rk= -k8s.io/kubernetes v1.26.1/go.mod h1:dEfAfGVZBOr2uZLeVazLPj/8E+t8jYFbQqCiBudkB8o= -k8s.io/pod-security-admission v0.26.1 h1:EDIxsYFeKMzNvN/JB0PgQcuwBP6fIkIG2O8ZWJhzOp4= -k8s.io/pod-security-admission v0.26.1/go.mod h1:hCbYTG5UtLlivmukkMPjAWf23PUBUHzEvR60xNVWN4c= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448 h1:KTgPnR10d5zhztWptI952TNtt/4u5h3IzDXkdIMuo2Y= -k8s.io/utils v0.0.0-20221128185143-99ec85e7a448/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/api v0.27.1 h1:Z6zUGQ1Vd10tJ+gHcNNNgkV5emCyW+v2XTmn+CLjSd0= +k8s.io/api v0.27.1/go.mod h1:z5g/BpAiD+f6AArpqNjkY+cji8ueZDU/WV1jcj5Jk4E= +k8s.io/apiextensions-apiserver v0.27.1 h1:Hp7B3KxKHBZ/FxmVFVpaDiXI6CCSr49P1OJjxKO6o4g= +k8s.io/apiextensions-apiserver v0.27.1/go.mod h1:8jEvRDtKjVtWmdkhOqE84EcNWJt/uwF8PC4627UZghY= +k8s.io/apimachinery v0.28.0-alpha.0 h1:GZf6I49h9Sjl2Rjc+jY72nEYApr1pCKEHoOP/KxWWrA= +k8s.io/apimachinery v0.28.0-alpha.0/go.mod h1:5ikh59fK3AJ287GUvpUsryoMFtH9zj/ARfWCo3AyXTM= +k8s.io/apiserver v0.27.1 h1:phY+BtXjjzd+ta3a4kYbomC81azQSLa1K8jo9RBw7Lg= +k8s.io/apiserver v0.27.1/go.mod h1:UGrOjLY2KsieA9Fw6lLiTObxTb8Z1xEba4uqSuMY0WU= +k8s.io/client-go v0.27.1 h1:oXsfhW/qncM1wDmWBIuDzRHNS2tLhK3BZv512Nc59W8= +k8s.io/client-go v0.27.1/go.mod h1:f8LHMUkVb3b9N8bWturc+EDtVVVwZ7ueTVquFAJb2vA= +k8s.io/cloud-provider v0.27.1 h1:482W9e2Yp8LDgTUKrXAxT+nH4pHS2TiBElI/CnfGWac= +k8s.io/cloud-provider v0.27.1/go.mod h1:oN7Zci2Ls2dorwSNd2fMiW/6DA40+F4o2QL70p63bqo= +k8s.io/component-base v0.27.1 h1:kEB8p8lzi4gCs5f2SPU242vOumHJ6EOsOnDM3tTuDTM= +k8s.io/component-base v0.27.1/go.mod h1:UGEd8+gxE4YWoigz5/lb3af3Q24w98pDseXcXZjw+E0= +k8s.io/component-helpers v0.27.1 h1:uY63v834MAHuf3fBiKGQGPq/cToU5kY5SW/58Xv0gl4= +k8s.io/component-helpers v0.27.1/go.mod h1:oOpwSYW1AdL+pU7abHADwX1ZcJl+5c8mnIkvoFZNFWA= +k8s.io/controller-manager v0.27.1 h1:+4OGWAzg4JVLEauPSmyQFIfrYrYQoUsC4MbHmRuPaFU= +k8s.io/controller-manager v0.27.1/go.mod h1:oe9vKl0RPiedlCXmeVbhkDV2yX8r7C4K/B8OGaKdYtY= +k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw= +k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kms v0.27.1 h1:JTSQbJb+mcobScQwF0bOmZhIwP17k8GvBsiLlA6SQqw= +k8s.io/kms v0.27.1/go.mod h1:VuTsw0uHlSycKLCkypCGxfFCjLfzf/5YMeATECd/zJA= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a h1:gmovKNur38vgoWfGtP5QOGNOA7ki4n6qNYoFAgMlNvg= +k8s.io/kube-openapi v0.0.0-20230308215209-15aac26d736a/go.mod h1:y5VtZWM9sHHc2ZodIH/6SHzXj+TPU5USoA8lcIeKEKY= +k8s.io/kubectl v0.27.1 h1:9T5c5KdpburYiW8XKQSH0Uly1kMNE90aGSnbYUZNdcA= +k8s.io/kubectl v0.27.1/go.mod h1:QsAkSmrRsKTPlAFzF8kODGDl4p35BIwQnc9XFhkcsy8= +k8s.io/kubelet v0.27.1 h1:IkfZ0N9CX/g6EDis7nJw8ZsOuHcpFA6cm0pXQx0g5TY= +k8s.io/kubelet v0.27.1/go.mod h1:g3cIhpZPawo/MvsdnmcLmqDJvDPdbUFkzfyLNz03nQg= +k8s.io/kubernetes v1.27.1 h1:DFeW4Lv+kh5DyYcezOzwmQAbC3VqXAxnMyZabALiRSc= +k8s.io/kubernetes v1.27.1/go.mod h1:TTwPjSCKQ+a/NTiFKRGjvOnEaQL8wIG40nsYH8Er4bA= +k8s.io/pod-security-admission v0.27.1 h1:if4d1zzcpNOZNvljvJ0nTCshFPUmnkIsy7KYJg7FP08= +k8s.io/pod-security-admission v0.27.1/go.mod h1:dICAHAC4DE0q+yrGuPJ8kuJ5dEsWtqNkclzCDckHj/s= +k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= +k8s.io/utils v0.0.0-20230209194617-a36077c30491/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35 h1:+xBL5uTc+BkPBwmMi3vYfUJjq+N3K+H6PXeETwf5cPI= -sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.35/go.mod h1:WxjusMwXlKzfAs4p9km6XJRndVt2FROgMVCE4cdohFo= -sigs.k8s.io/controller-runtime v0.14.6 h1:oxstGVvXGNnMvY7TAESYk+lzr6S3V5VFxQ6d92KcwQA= -sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S6qbPc9s68h+0= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= -sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1 h1:MB1zkK+WMOmfLxEpjr1wEmkpcIhZC7kfTkZ0stg5bog= +sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.1.1/go.mod h1:/4NLd21PQY0B+H+X0aDZdwUiVXYJQl/2NXA5KVtDiP4= +sigs.k8s.io/controller-runtime v0.15.0-alpha.0 h1:ukmgReObs7FEUNBcn2NLxn/DiEQ8g1yC8YvpX0HGiyE= +sigs.k8s.io/controller-runtime v0.15.0-alpha.0/go.mod h1:icJQ1mtZAutJ9iOzS2V2VJQCBVV2ir+xahBeTHCCZGs= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= +sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= diff --git a/pkg/apis/deviceplugin/v1/dlbdeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/dlbdeviceplugin_webhook.go index 1b35eb561..dd5cfd02f 100644 --- a/pkg/apis/deviceplugin/v1/dlbdeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/dlbdeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -60,28 +61,28 @@ func (r *DlbDevicePlugin) Default() { var _ webhook.Validator = &DlbDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *DlbDevicePlugin) ValidateCreate() error { +func (r *DlbDevicePlugin) ValidateCreate() (admission.Warnings, error) { dlbdevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(dlbPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", dlbPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", dlbPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *DlbDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *DlbDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { dlbdevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *DlbDevicePlugin) ValidateDelete() error { +func (r *DlbDevicePlugin) ValidateDelete() (admission.Warnings, error) { dlbdevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *DlbDevicePlugin) validatePlugin() error { diff --git a/pkg/apis/deviceplugin/v1/dsadeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/dsadeviceplugin_webhook.go index eb0982fee..a5226c66f 100644 --- a/pkg/apis/deviceplugin/v1/dsadeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/dsadeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -60,28 +61,28 @@ func (r *DsaDevicePlugin) Default() { var _ webhook.Validator = &DsaDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *DsaDevicePlugin) ValidateCreate() error { +func (r *DsaDevicePlugin) ValidateCreate() (admission.Warnings, error) { dsadevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(dsaPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", dsaPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", dsaPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *DsaDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *DsaDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { dsadevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *DsaDevicePlugin) ValidateDelete() error { +func (r *DsaDevicePlugin) ValidateDelete() (admission.Warnings, error) { dsadevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *DsaDevicePlugin) validatePlugin() error { diff --git a/pkg/apis/deviceplugin/v1/fpgadeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/fpgadeviceplugin_webhook.go index 5d3413b64..ba96b7c66 100644 --- a/pkg/apis/deviceplugin/v1/fpgadeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/fpgadeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -64,28 +65,28 @@ func (r *FpgaDevicePlugin) Default() { var _ webhook.Validator = &FpgaDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *FpgaDevicePlugin) ValidateCreate() error { +func (r *FpgaDevicePlugin) ValidateCreate() (admission.Warnings, error) { fpgadevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(fpgaPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", fpgaPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", fpgaPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *FpgaDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *FpgaDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { fpgadevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *FpgaDevicePlugin) ValidateDelete() error { +func (r *FpgaDevicePlugin) ValidateDelete() (admission.Warnings, error) { fpgadevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *FpgaDevicePlugin) validatePlugin() error { diff --git a/pkg/apis/deviceplugin/v1/gpudeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/gpudeviceplugin_webhook.go index 91deb57c1..8a7285a6a 100644 --- a/pkg/apis/deviceplugin/v1/gpudeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/gpudeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -60,28 +61,28 @@ func (r *GpuDevicePlugin) Default() { var _ webhook.Validator = &GpuDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *GpuDevicePlugin) ValidateCreate() error { +func (r *GpuDevicePlugin) ValidateCreate() (admission.Warnings, error) { gpudevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(gpuPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", gpuPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", gpuPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *GpuDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *GpuDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { gpudevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *GpuDevicePlugin) ValidateDelete() error { +func (r *GpuDevicePlugin) ValidateDelete() (admission.Warnings, error) { gpudevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *GpuDevicePlugin) validatePlugin() error { diff --git a/pkg/apis/deviceplugin/v1/iaadeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/iaadeviceplugin_webhook.go index f3588b2ab..f31eb1dc0 100644 --- a/pkg/apis/deviceplugin/v1/iaadeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/iaadeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -60,28 +61,28 @@ func (r *IaaDevicePlugin) Default() { var _ webhook.Validator = &IaaDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *IaaDevicePlugin) ValidateCreate() error { +func (r *IaaDevicePlugin) ValidateCreate() (admission.Warnings, error) { iaadevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(iaaPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", iaaPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", iaaPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *IaaDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *IaaDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { iaadevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *IaaDevicePlugin) ValidateDelete() error { +func (r *IaaDevicePlugin) ValidateDelete() (admission.Warnings, error) { iaadevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *IaaDevicePlugin) validatePlugin() error { diff --git a/pkg/apis/deviceplugin/v1/qatdeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/qatdeviceplugin_webhook.go index a71f6136e..7a1e2204c 100644 --- a/pkg/apis/deviceplugin/v1/qatdeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/qatdeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -60,28 +61,28 @@ func (r *QatDevicePlugin) Default() { var _ webhook.Validator = &QatDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *QatDevicePlugin) ValidateCreate() error { +func (r *QatDevicePlugin) ValidateCreate() (admission.Warnings, error) { qatdevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(qatPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", qatPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", qatPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *QatDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *QatDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { qatdevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *QatDevicePlugin) ValidateDelete() error { +func (r *QatDevicePlugin) ValidateDelete() (admission.Warnings, error) { qatdevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *QatDevicePlugin) validatePlugin() error { diff --git a/pkg/apis/deviceplugin/v1/sgxdeviceplugin_webhook.go b/pkg/apis/deviceplugin/v1/sgxdeviceplugin_webhook.go index 83eef2fa4..c2c21223a 100644 --- a/pkg/apis/deviceplugin/v1/sgxdeviceplugin_webhook.go +++ b/pkg/apis/deviceplugin/v1/sgxdeviceplugin_webhook.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" logf "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/webhook" + "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/controllers" ) @@ -60,28 +61,28 @@ func (r *SgxDevicePlugin) Default() { var _ webhook.Validator = &SgxDevicePlugin{} // ValidateCreate implements webhook.Validator so a webhook will be registered for the type. -func (r *SgxDevicePlugin) ValidateCreate() error { +func (r *SgxDevicePlugin) ValidateCreate() (admission.Warnings, error) { sgxdevicepluginlog.Info("validate create", "name", r.Name) if controllers.GetDevicePluginCount(sgxPluginKind) > 0 { - return errors.Errorf("an instance of %q already exists in the cluster", sgxPluginKind) + return nil, errors.Errorf("an instance of %q already exists in the cluster", sgxPluginKind) } - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateUpdate implements webhook.Validator so a webhook will be registered for the type. -func (r *SgxDevicePlugin) ValidateUpdate(old runtime.Object) error { +func (r *SgxDevicePlugin) ValidateUpdate(old runtime.Object) (admission.Warnings, error) { sgxdevicepluginlog.Info("validate update", "name", r.Name) - return r.validatePlugin() + return nil, r.validatePlugin() } // ValidateDelete implements webhook.Validator so a webhook will be registered for the type. -func (r *SgxDevicePlugin) ValidateDelete() error { +func (r *SgxDevicePlugin) ValidateDelete() (admission.Warnings, error) { sgxdevicepluginlog.Info("validate delete", "name", r.Name) - return nil + return nil, nil } func (r *SgxDevicePlugin) validatePlugin() error { diff --git a/pkg/fpgacontroller/mocks_test.go b/pkg/fpgacontroller/mocks_test.go index 807403598..df9d8754b 100644 --- a/pkg/fpgacontroller/mocks_test.go +++ b/pkg/fpgacontroller/mocks_test.go @@ -22,12 +22,13 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/config/v1alpha1" + "sigs.k8s.io/controller-runtime/pkg/config" "sigs.k8s.io/controller-runtime/pkg/healthz" "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/webhook" @@ -81,6 +82,14 @@ func (c *mockClient) RESTMapper() meta.RESTMapper { return nil } +func (c *mockClient) GroupVersionKindFor(runtime.Object) (schema.GroupVersionKind, error) { + return schema.GroupVersionKind{}, nil +} + +func (c *mockClient) IsObjectNamespaced(runtime.Object) (bool, error) { + return false, nil +} + type mockManager struct { scheme *runtime.Scheme log logr.Logger @@ -146,8 +155,8 @@ func (m *mockManager) GetLogger() logr.Logger { return m.log } -func (m *mockManager) GetControllerOptions() v1alpha1.ControllerConfigurationSpec { - return v1alpha1.ControllerConfigurationSpec{} +func (m *mockManager) GetControllerOptions() config.Controller { + return config.Controller{} } func (m *mockManager) SetFields(interface{}) error { @@ -157,3 +166,7 @@ func (m *mockManager) SetFields(interface{}) error { func (m *mockManager) Start(context.Context) error { return nil } + +func (m *mockManager) GetHTTPClient() *http.Client { + return nil +} diff --git a/pkg/webhooks/sgx/sgx.go b/pkg/webhooks/sgx/sgx.go index 406681a71..a9c96da0f 100644 --- a/pkg/webhooks/sgx/sgx.go +++ b/pkg/webhooks/sgx/sgx.go @@ -16,24 +16,23 @@ package sgx import ( "context" - "encoding/json" - "net/http" + "errors" + "fmt" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" - "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/webhook/admission" + "k8s.io/apimachinery/pkg/runtime" + logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/intel/intel-device-plugins-for-kubernetes/pkg/internal/containers" ) -// +kubebuilder:webhook:path=/pods-sgx,mutating=true,failurePolicy=ignore,groups="",resources=pods,verbs=create;update,versions=v1,name=sgx.mutator.webhooks.intel.com,sideEffects=None,admissionReviewVersions=v1,reinvocationPolicy=IfNeeded +var ErrObjectType = errors.New("invalid runtime object type") + +// +kubebuilder:webhook:path=/mutate--v1-pod,mutating=true,failurePolicy=ignore,groups="",resources=pods,verbs=create;update,versions=v1,name=sgx.mutator.webhooks.intel.com,sideEffects=None,admissionReviewVersions=v1,reinvocationPolicy=IfNeeded // Mutator annotates Pods. -type Mutator struct { - Client client.Client - decoder *admission.Decoder -} +type Mutator struct{} const ( namespace = "sgx.intel.com" @@ -96,22 +95,6 @@ func createAesmdVolumeIfNotExists(needsAesmd bool, epcUserCount int32, aesmdPres return vol } -func warnWrongResources(resources map[string]int64) []string { - warnings := make([]string, 0) - - _, ok := resources[encl] - if ok { - warnings = append(warnings, encl+" should not be used in Pod spec directly") - } - - _, ok = resources[provision] - if ok { - warnings = append(warnings, provision+" should not be used in Pod spec directly") - } - - return warnings -} - func volumeMountExists(path string, container *corev1.Container) bool { if container.VolumeMounts != nil { for _, vm := range container.VolumeMounts { @@ -132,18 +115,20 @@ func createNewVolumeMounts(container *corev1.Container, volumeMount *corev1.Volu return append(container.VolumeMounts, *volumeMount) } -// Handle implements controller-runtimes's admission.Handler inteface. -func (s *Mutator) Handle(ctx context.Context, req admission.Request) admission.Response { - pod := &corev1.Pod{} +func (s *Mutator) Default(ctx context.Context, obj runtime.Object) error { + var pod *corev1.Pod + + log := logf.FromContext(ctx) - if err := s.decoder.Decode(req, pod); err != nil { - return admission.Errored(http.StatusBadRequest, err) + pod, ok := obj.(*corev1.Pod) + + if !ok { + return fmt.Errorf("%w: expected a Pod but got a %T", ErrObjectType, obj) } totalEpc := int64(0) epcUserCount := int32(0) aesmdPresent := bool(false) - warnings := make([]string, 0) if pod.Annotations == nil { pod.Annotations = make(map[string]string) @@ -154,11 +139,9 @@ func (s *Mutator) Handle(ctx context.Context, req admission.Request) admission.R for idx, container := range pod.Spec.Containers { requestedResources, err := containers.GetRequestedResources(container, namespace) if err != nil { - return admission.Errored(http.StatusInternalServerError, err) + return err } - warnings = append(warnings, warnWrongResources(requestedResources)...) - // the container has no sgx.intel.com/epc epcSize, ok := requestedResources[epc] if !ok { @@ -242,17 +225,7 @@ func (s *Mutator) Handle(ctx context.Context, req admission.Request) admission.R pod.Annotations["sgx.intel.com/epc"] = quantity.String() } - marshaledPod, err := json.Marshal(pod) - if err != nil { - return admission.Errored(http.StatusInternalServerError, err) - } - - return admission.PatchResponseFromRaw(req.Object.Raw, marshaledPod).WithWarnings(warnings...) -} + log.Info("Mutated SGX Pod") -// InjectDecoder implements controller-runtime's admission.DecoderInjector interface. -// A decoder will be automatically injected. -func (s *Mutator) InjectDecoder(d *admission.Decoder) error { - s.decoder = d return nil } diff --git a/test/e2e/deviceplugins_suite_test.go b/test/e2e/deviceplugins_suite_test.go index b73beee62..09991f880 100644 --- a/test/e2e/deviceplugins_suite_test.go +++ b/test/e2e/deviceplugins_suite_test.go @@ -15,6 +15,7 @@ package e2e_test import ( + "context" "flag" "os" "testing" @@ -51,7 +52,7 @@ func init() { ginkgo.SynchronizedBeforeSuite(setupFirstNode, func(data []byte) {}) } -func setupFirstNode() []byte { +func setupFirstNode(ctx context.Context) []byte { c, err := framework.LoadClientset() if err != nil { framework.Failf("Error loading client: %v", err) @@ -59,35 +60,38 @@ func setupFirstNode() []byte { // Delete any namespaces except those created by the system. This ensures no // lingering resources are left over from a previous test run. - deleted, err := framework.DeleteNamespaces(c, nil, /* deleteFilter */ - []string{ - metav1.NamespaceSystem, - metav1.NamespaceDefault, - metav1.NamespacePublic, - v1.NamespaceNodeLease, - "cert-manager", - }) - if err != nil { - framework.Failf("Error deleting orphaned namespaces: %v", err) + if framework.TestContext.CleanStart { + deleted, err2 := framework.DeleteNamespaces(ctx, c, nil, /* deleteFilter */ + []string{ + metav1.NamespaceSystem, + metav1.NamespaceDefault, + metav1.NamespacePublic, + v1.NamespaceNodeLease, + "cert-manager", + }) + if err2 != nil { + framework.Failf("Error deleting orphaned namespaces: %v", err2) + } + + framework.Logf("Waiting for deletion of the following namespaces: %v", deleted) + + if err2 = framework.WaitForNamespacesDeleted(ctx, c, deleted, e2epod.DefaultPodDeletionTimeout); err2 != nil { + framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err2) + } } - framework.Logf("Waiting for deletion of the following namespaces: %v", deleted) - - if err = framework.WaitForNamespacesDeleted(c, deleted, e2epod.DefaultPodDeletionTimeout); err != nil { - framework.Failf("Failed to delete orphaned namespaces %v: %v", deleted, err) - } + timeouts := framework.NewTimeoutContext() - framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(c, framework.TestContext.NodeSchedulableTimeout)) + framework.ExpectNoError(e2enode.WaitForAllNodesSchedulable(ctx, c, timeouts.NodeSchedulable)) // Ensure all pods are running and ready before starting tests (otherwise, // cluster infrastructure pods that are being pulled or started can block // test pods from running, and tests that ensure all pods are running and // ready will fail). - if err = e2epod.WaitForPodsRunningReady(c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), - int32(framework.TestContext.AllowedNotReadyNodes), framework.TestContext.SystemPodsStartupTimeout, - map[string]string{}); err != nil { - e2edebug.DumpAllNamespaceInfo(c, metav1.NamespaceSystem) - e2ekubectl.LogFailedContainers(c, metav1.NamespaceSystem, framework.Logf) + if err = e2epod.WaitForPodsRunningReady(ctx, c, metav1.NamespaceSystem, int32(framework.TestContext.MinStartupPods), + int32(framework.TestContext.AllowedNotReadyNodes), timeouts.SystemPodsStartup); err != nil { + e2edebug.DumpAllNamespaceInfo(ctx, c, metav1.NamespaceSystem) + e2ekubectl.LogFailedContainers(ctx, c, metav1.NamespaceSystem, framework.Logf) framework.Failf("Error waiting for all pods to be running and ready: %v", err) } @@ -107,8 +111,8 @@ func setupFirstNode() []byte { utils.Kubectl("node-feature-discovery", "apply", "-k", "deployments/nfd/overlays/node-feature-rules/kustomization.yaml") - if err = e2epod.WaitForPodsRunningReady(c, "node-feature-discovery", 2, 0, - 300*time.Second, map[string]string{}); err != nil { + if err = e2epod.WaitForPodsRunningReady(ctx, c, "node-feature-discovery", 2, 0, + 300*time.Second); err != nil { framework.Failf("unable to wait for NFD pods to be running and ready: %v", err) } diff --git a/test/e2e/dlb/dlb.go b/test/e2e/dlb/dlb.go index e606348ef..a4479a4a9 100644 --- a/test/e2e/dlb/dlb.go +++ b/test/e2e/dlb/dlb.go @@ -15,6 +15,7 @@ package dlb import ( + "context" "path/filepath" "strings" "time" @@ -51,16 +52,16 @@ func describe() { var dpPodName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("deploying DLB plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(kustomizationPath)) ginkgo.By("waiting for DLB plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-dlb-plugin"}.AsSelector(), 1 /* one replica */, 100*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } dpPodName = podList.Items[0].Name @@ -71,42 +72,42 @@ func describe() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("undeploying DLB plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(kustomizationPath)) - if err := e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { + if err := e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { framework.Failf("failed to terminate pod: %v", err) } }) ginkgo.Context("When PF resources are available", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { resource := v1.ResourceName("dlb.intel.com/pf") - if err := utils.WaitForNodesWithResource(f.ClientSet, resource, 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, resource, 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource %s: %v", resource, err) } }) - ginkgo.It("can run demo app", func() { - runDemoApp("PF", demoPFYaml, f) + ginkgo.It("can run demo app", func(ctx context.Context) { + runDemoApp(ctx, "PF", demoPFYaml, f) }) }) ginkgo.Context("When VF resources are available", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { resource := v1.ResourceName("dlb.intel.com/vf") - if err := utils.WaitForNodesWithResource(f.ClientSet, resource, 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, resource, 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource %s: %v", resource, err) } }) - ginkgo.It("can run demo app", func() { - runDemoApp("VF", demoVFYaml, f) + ginkgo.It("can run demo app", func(ctx context.Context) { + runDemoApp(ctx, "VF", demoVFYaml, f) }) }) } -func runDemoApp(function, yaml string, f *framework.Framework) { +func runDemoApp(ctx context.Context, function, yaml string, f *framework.Framework) { demoPath, err := utils.LocateRepoFile(yaml) if err != nil { framework.Failf("unable to locate %q: %v", yaml, err) @@ -118,11 +119,11 @@ func runDemoApp(function, yaml string, f *framework.Framework) { e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath) ginkgo.By("waiting for the DLB demo to succeed") - e2epod.NewPodClient(f).WaitForSuccess(podName, 200*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, podName, 200*time.Second) ginkgo.By("getting workload log") - log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("unable to get log from pod: %v", err) diff --git a/test/e2e/dsa/dsa.go b/test/e2e/dsa/dsa.go index 410e8654e..a61770562 100644 --- a/test/e2e/dsa/dsa.go +++ b/test/e2e/dsa/dsa.go @@ -15,6 +15,7 @@ package dsa import ( + "context" "path/filepath" "time" @@ -63,18 +64,18 @@ func describe() { var dpPodName string ginkgo.Describe("Without using operator", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("deploying DSA plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "configmap", "intel-dsa-config", "--from-file="+configmap) e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(kustomizationPath)) ginkgo.By("waiting for DSA plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-dsa-plugin"}.AsSelector(), 1 /* one replica */, 300*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } dpPodName = podList.Items[0].Name @@ -85,30 +86,30 @@ func describe() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("undeploying DSA plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(kustomizationPath)) - if err := e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { + if err := e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { framework.Failf("failed to terminate pod: %v", err) } }) ginkgo.Context("When DSA resources are available", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("checking if the resource is allocatable") - if err := utils.WaitForNodesWithResource(f.ClientSet, "dsa.intel.com/wq-user-dedicated", 300*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "dsa.intel.com/wq-user-dedicated", 300*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } }) - ginkgo.It("deploys a demo app", func() { + ginkgo.It("deploys a demo app", func(ctx context.Context) { e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath) ginkgo.By("waiting for the DSA demo to succeed") - e2epod.NewPodClient(f).WaitForSuccess(podName, 200*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, podName, 200*time.Second) ginkgo.By("getting workload log") - log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("unable to get log from pod: %v", err) @@ -120,20 +121,20 @@ func describe() { }) ginkgo.Describe("With using operator", func() { - ginkgo.It("deploys DSA plugin with operator", func() { + ginkgo.It("deploys DSA plugin with operator", func(ctx context.Context) { utils.Kubectl("", "apply", "-k", "deployments/operator/default/kustomization.yaml") - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, ns, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1, timeout); err != nil { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, ns, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1, timeout); err != nil { framework.Failf("unable to wait for all pods to be running and ready: %v", err) } utils.Kubectl("", "apply", "-f", "deployments/operator/samples/deviceplugin_v1_dsadeviceplugin.yaml") - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, ns, labels.Set{"app": "intel-dsa-plugin"}.AsSelector(), 1, timeout); err != nil { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, ns, labels.Set{"app": "intel-dsa-plugin"}.AsSelector(), 1, timeout); err != nil { framework.Failf("unable to wait for all pods to be running and ready: %v", err) } - if err := utils.WaitForNodesWithResource(f.ClientSet, "dsa.intel.com/wq-user-dedicated", timeout); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "dsa.intel.com/wq-user-dedicated", timeout); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } diff --git a/test/e2e/fpga/fpga.go b/test/e2e/fpga/fpga.go index ee574ba11..18917eb45 100644 --- a/test/e2e/fpga/fpga.go +++ b/test/e2e/fpga/fpga.go @@ -63,16 +63,16 @@ func describe() { fmw := framework.NewDefaultFramework("fpgaplugin-e2e") fmw.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("Run FPGA plugin tests", func() { + ginkgo.It("Run FPGA plugin tests", func(ctx context.Context) { // Run region test case twice to ensure that device is reprogrammed at least once - runTestCase(fmw, pluginKustomizationPath, mappingsCollectionPath, "region", arria10NodeResource, nlb3PodResource, "nlb3", "nlb0") - runTestCase(fmw, pluginKustomizationPath, mappingsCollectionPath, "region", arria10NodeResource, nlb0PodResource, "nlb0", "nlb3") + runTestCase(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, "region", arria10NodeResource, nlb3PodResource, "nlb3", "nlb0") + runTestCase(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, "region", arria10NodeResource, nlb0PodResource, "nlb0", "nlb3") // Run af test case - runTestCase(fmw, pluginKustomizationPath, mappingsCollectionPath, "af", nlb0NodeResource, nlb0PodResourceAF, "nlb0", "nlb3") + runTestCase(ctx, fmw, pluginKustomizationPath, mappingsCollectionPath, "af", nlb0NodeResource, nlb0PodResourceAF, "nlb0", "nlb3") }) } -func runTestCase(fmw *framework.Framework, pluginKustomizationPath, mappingsCollectionPath, pluginMode, nodeResource, podResource, cmd1, cmd2 string) { +func runTestCase(ctx context.Context, fmw *framework.Framework, pluginKustomizationPath, mappingsCollectionPath, pluginMode, nodeResource, podResource, cmd1, cmd2 string) { tmpDir, err := os.MkdirTemp("", "fpgaplugine2etest-"+fmw.Namespace.Name) if err != nil { framework.Failf("unable to create temp directory: %v", err) @@ -91,13 +91,13 @@ func runTestCase(fmw *framework.Framework, pluginKustomizationPath, mappingsColl ginkgo.By("deploying mappings") e2ekubectl.RunKubectlOrDie(fmw.Namespace.Name, "apply", "-f", mappingsCollectionPath) - waitForPod(fmw, "intel-fpga-plugin") + waitForPod(ctx, fmw, "intel-fpga-plugin") resource := v1.ResourceName(nodeResource) ginkgo.By("checking if the resource is allocatable") - if err := utils.WaitForNodesWithResource(fmw.ClientSet, resource, 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, fmw.ClientSet, resource, 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } @@ -106,10 +106,10 @@ func runTestCase(fmw *framework.Framework, pluginKustomizationPath, mappingsColl ginkgo.By("submitting a pod requesting correct FPGA resources") - pod := createPod(fmw, fmt.Sprintf("fpgaplugin-%s-%s-%s-correct", pluginMode, cmd1, cmd2), resource, image, []string{cmd1, "-S0"}) + pod := createPod(ctx, fmw, fmt.Sprintf("fpgaplugin-%s-%s-%s-correct", pluginMode, cmd1, cmd2), resource, image, []string{cmd1, "-S0"}) ginkgo.By("waiting the pod to finish successfully") - e2epod.NewPodClient(fmw).WaitForSuccess(pod.ObjectMeta.Name, 60*time.Second) + e2epod.NewPodClient(fmw).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second) // If WaitForSuccess fails, ginkgo doesn't show the logs of the failed container. // Replacing WaitForSuccess with WaitForFinish + 'kubelet logs' would show the logs //fmw.PodClient().WaitForFinish(pod.ObjectMeta.Name, 60*time.Second) @@ -117,13 +117,13 @@ func runTestCase(fmw *framework.Framework, pluginKustomizationPath, mappingsColl ginkgo.By("submitting a pod requesting incorrect FPGA resources") - pod = createPod(fmw, fmt.Sprintf("fpgaplugin-%s-%s-%s-incorrect", pluginMode, cmd1, cmd2), resource, image, []string{cmd2, "-S0"}) + pod = createPod(ctx, fmw, fmt.Sprintf("fpgaplugin-%s-%s-%s-incorrect", pluginMode, cmd1, cmd2), resource, image, []string{cmd2, "-S0"}) ginkgo.By("waiting the pod failure") - utils.WaitForPodFailure(fmw, pod.ObjectMeta.Name, 60*time.Second) + utils.WaitForPodFailure(ctx, fmw, pod.ObjectMeta.Name, 60*time.Second) } -func createPod(fmw *framework.Framework, name string, resourceName v1.ResourceName, image string, command []string) *v1.Pod { +func createPod(ctx context.Context, fmw *framework.Framework, name string, resourceName v1.ResourceName, image string, command []string) *v1.Pod { resourceList := v1.ResourceList{resourceName: resource.MustParse("1"), "cpu": resource.MustParse("1"), "hugepages-2Mi": resource.MustParse("20Mi")} @@ -150,21 +150,21 @@ func createPod(fmw *framework.Framework, name string, resourceName v1.ResourceNa }, } - pod, err := fmw.ClientSet.CoreV1().Pods(fmw.Namespace.Name).Create(context.TODO(), + pod, err := fmw.ClientSet.CoreV1().Pods(fmw.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") return pod } -func waitForPod(fmw *framework.Framework, name string) { +func waitForPod(ctx context.Context, fmw *framework.Framework, name string) { ginkgo.By(fmt.Sprintf("waiting for %s availability", name)) - podList, err := e2epod.WaitForPodsWithLabelRunningReady(fmw.ClientSet, fmw.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, fmw.ClientSet, fmw.Namespace.Name, labels.Set{"app": name}.AsSelector(), 1, 100*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(fmw.ClientSet, fmw.Namespace.Name) - e2ekubectl.LogFailedContainers(fmw.ClientSet, fmw.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, fmw.ClientSet, fmw.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, fmw.ClientSet, fmw.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } diff --git a/test/e2e/fpgaadmissionwebhook/fpgaadmissionwebhook.go b/test/e2e/fpgaadmissionwebhook/fpgaadmissionwebhook.go index 71d95bde5..f97e84fb5 100644 --- a/test/e2e/fpgaadmissionwebhook/fpgaadmissionwebhook.go +++ b/test/e2e/fpgaadmissionwebhook/fpgaadmissionwebhook.go @@ -42,32 +42,32 @@ func init() { func describe() { f := framework.NewDefaultFramework("webhook") - ginkgo.It("mutates created pods to reference resolved AFs", func() { - checkPodMutation(f, f.Namespace.Name, "fpga.intel.com/d5005-nlb3-preprogrammed", + ginkgo.It("mutates created pods to reference resolved AFs", func(ctx context.Context) { + checkPodMutation(ctx, f, f.Namespace.Name, "fpga.intel.com/d5005-nlb3-preprogrammed", "fpga.intel.com/af-bfa.f7d.v6xNhR7oVv6MlYZc4buqLfffQFy9es9yIvFEsLk6zRg") }) - ginkgo.It("mutates created pods to reference resolved Regions", func() { - checkPodMutation(f, f.Namespace.Name, "fpga.intel.com/arria10.dcp1.0-nlb0-orchestrated", + ginkgo.It("mutates created pods to reference resolved Regions", func(ctx context.Context) { + checkPodMutation(ctx, f, f.Namespace.Name, "fpga.intel.com/arria10.dcp1.0-nlb0-orchestrated", "fpga.intel.com/region-ce48969398f05f33946d560708be108a") }) - ginkgo.It("mutates created pods to reference resolved Regions in regiondevel mode", func() { - checkPodMutation(f, f.Namespace.Name, "fpga.intel.com/arria10.dcp1.0", + ginkgo.It("mutates created pods to reference resolved Regions in regiondevel mode", func(ctx context.Context) { + checkPodMutation(ctx, f, f.Namespace.Name, "fpga.intel.com/arria10.dcp1.0", "fpga.intel.com/region-ce48969398f05f33946d560708be108a") }) - ginkgo.It("doesn't mutate a pod if it's created in a namespace different from mappings'", func() { + ginkgo.It("doesn't mutate a pod if it's created in a namespace different from mappings'", func(ctx context.Context) { ginkgo.By("create another namespace for mappings") - ns, err := f.CreateNamespace("mappings", nil) + ns, err := f.CreateNamespace(ctx, "mappings", nil) framework.ExpectNoError(err, "unable to create a namespace") - checkPodMutation(f, ns.Name, "fpga.intel.com/arria10.dcp1.0-nlb0-orchestrated", + checkPodMutation(ctx, f, ns.Name, "fpga.intel.com/arria10.dcp1.0-nlb0-orchestrated", "fpga.intel.com/arria10.dcp1.0-nlb0-orchestrated") }) } -func checkPodMutation(f *framework.Framework, mappingsNamespace string, source, expectedMutation v1.ResourceName) { +func checkPodMutation(ctx context.Context, f *framework.Framework, mappingsNamespace string, source, expectedMutation v1.ResourceName) { kustomizationPath, err := utils.LocateRepoFile(kustomizationYaml) if err != nil { framework.Failf("unable to locate %q: %v", kustomizationYaml, err) @@ -75,7 +75,7 @@ func checkPodMutation(f *framework.Framework, mappingsNamespace string, source, ginkgo.By("deploying webhook") - _ = utils.DeployWebhook(f, kustomizationPath) + _ = utils.DeployWebhook(ctx, f, kustomizationPath) ginkgo.By("deploying mappings") e2ekubectl.RunKubectlOrDie(mappingsNamespace, "apply", "-f", filepath.Dir(kustomizationPath)+"/../mappings-collection.yaml") @@ -109,11 +109,11 @@ func checkPodMutation(f *framework.Framework, mappingsNamespace string, source, }, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) if source.String() == expectedMutation.String() { - framework.ExpectError(err, "pod mistakenly got accepted") + gomega.Expect(err).To(gomega.HaveOccurred(), "pod mistakenly got accepted") return } @@ -123,8 +123,8 @@ func checkPodMutation(f *framework.Framework, mappingsNamespace string, source, q, ok := pod.Spec.Containers[0].Resources.Limits[expectedMutation] if !ok { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Fail("pod hasn't been mutated") } diff --git a/test/e2e/gpu/gpu.go b/test/e2e/gpu/gpu.go index f80b82733..d4c5aba3b 100644 --- a/test/e2e/gpu/gpu.go +++ b/test/e2e/gpu/gpu.go @@ -52,16 +52,16 @@ func describe() { framework.Failf("unable to locate %q: %v", kustomizationYaml, err) } - ginkgo.It("checks availability of GPU resources", func() { + ginkgo.It("checks availability of GPU resources", func(ctx context.Context) { ginkgo.By("deploying GPU plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(kustomizationPath)) ginkgo.By("waiting for GPU plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-gpu-plugin"}.AsSelector(), 1 /* one replica */, 100*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } @@ -71,7 +71,7 @@ func describe() { } ginkgo.By("checking if the resource is allocatable") - if err = utils.WaitForNodesWithResource(f.ClientSet, "gpu.intel.com/i915", 30*time.Second); err != nil { + if err = utils.WaitForNodesWithResource(ctx, f.ClientSet, "gpu.intel.com/i915", 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } @@ -94,14 +94,14 @@ func describe() { RestartPolicy: v1.RestartPolicyNever, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") ginkgo.By("waiting the pod to finish successfully") - e2epod.NewPodClient(f).WaitForSuccess(pod.ObjectMeta.Name, 60*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second) ginkgo.By("checking log output") - log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.Name, containerName) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.Name, containerName) if err != nil { framework.Failf("unable to get log from pod: %v", err) diff --git a/test/e2e/iaa/iaa.go b/test/e2e/iaa/iaa.go index 891c1e18a..379064dae 100644 --- a/test/e2e/iaa/iaa.go +++ b/test/e2e/iaa/iaa.go @@ -15,6 +15,7 @@ package iaa import ( + "context" "path/filepath" "time" @@ -63,18 +64,18 @@ func describe() { var dpPodName string ginkgo.Describe("Without using operator", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("deploying IAA plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "configmap", "intel-iaa-config", "--from-file="+configmap) e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(kustomizationPath)) ginkgo.By("waiting for IAA plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-iaa-plugin"}.AsSelector(), 1 /* one replica */, 300*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } dpPodName = podList.Items[0].Name @@ -85,30 +86,30 @@ func describe() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("undeploying IAA plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(kustomizationPath)) - if err := e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { + if err := e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { framework.Failf("failed to terminate pod: %v", err) } }) ginkgo.Context("When IAA resources are available", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("checking if the resource is allocatable") - if err := utils.WaitForNodesWithResource(f.ClientSet, "iaa.intel.com/wq-user-dedicated", 300*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "iaa.intel.com/wq-user-dedicated", 300*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } }) - ginkgo.It("deploys a demo app", func() { + ginkgo.It("deploys a demo app", func(ctx context.Context) { e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-f", demoPath) ginkgo.By("waiting for the IAA demo to succeed") - e2epod.NewPodClient(f).WaitForSuccess(podName, 300*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, podName, 300*time.Second) ginkgo.By("getting workload log") - log, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, podName, podName) + log, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, podName, podName) if err != nil { framework.Failf("unable to get log from pod: %v", err) @@ -120,20 +121,20 @@ func describe() { }) ginkgo.Describe("With using operator", func() { - ginkgo.It("deploys IAA plugin with operator", func() { + ginkgo.It("deploys IAA plugin with operator", func(ctx context.Context) { utils.Kubectl("", "apply", "-k", "deployments/operator/default/kustomization.yaml") - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, ns, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1, timeout); err != nil { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, ns, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1, timeout); err != nil { framework.Failf("unable to wait for all pods to be running and ready: %v", err) } utils.Kubectl("", "apply", "-f", "deployments/operator/samples/deviceplugin_v1_iaadeviceplugin.yaml") - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, ns, labels.Set{"app": "intel-iaa-plugin"}.AsSelector(), 1, timeout); err != nil { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, ns, labels.Set{"app": "intel-iaa-plugin"}.AsSelector(), 1, timeout); err != nil { framework.Failf("unable to wait for all pods to be running and ready: %v", err) } - if err := utils.WaitForNodesWithResource(f.ClientSet, "iaa.intel.com/wq-user-dedicated", timeout); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "iaa.intel.com/wq-user-dedicated", timeout); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } diff --git a/test/e2e/operator/operator.go b/test/e2e/operator/operator.go index f278c3162..78b14fdc0 100644 --- a/test/e2e/operator/operator.go +++ b/test/e2e/operator/operator.go @@ -16,6 +16,8 @@ package inteldevicepluginsoperator import ( + "context" + "github.com/intel/intel-device-plugins-for-kubernetes/test/e2e/utils" "github.com/onsi/ginkgo/v2" "github.com/onsi/gomega" @@ -38,18 +40,18 @@ func describe() { var webhook v1.Pod - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { kustomizationPath, err := utils.LocateRepoFile(kustomizationYaml) if err != nil { framework.Failf("unable to locate %q: %v", kustomizationYaml, err) } - webhook = utils.DeployWebhook(f, kustomizationPath) + webhook = utils.DeployWebhook(ctx, f, kustomizationPath) }) - ginkgo.It("checks the operator webhook pod is safely configured", func() { + ginkgo.It("checks the operator webhook pod is safely configured", func(ctx context.Context) { err := utils.TestContainersRunAsNonRoot([]v1.Pod{webhook}) gomega.Expect(err).To(gomega.BeNil()) - err = utils.TestWebhookServerTLS(f, "https://inteldeviceplugins-webhook-service") + err = utils.TestWebhookServerTLS(ctx, f, "https://inteldeviceplugins-webhook-service") gomega.Expect(err).To(gomega.BeNil()) }) } diff --git a/test/e2e/qat/qatplugin_dpdk.go b/test/e2e/qat/qatplugin_dpdk.go index 82c007b37..274517a79 100644 --- a/test/e2e/qat/qatplugin_dpdk.go +++ b/test/e2e/qat/qatplugin_dpdk.go @@ -71,16 +71,16 @@ func describeQatDpdkPlugin() { var resourceName v1.ResourceName - ginkgo.JustBeforeEach(func() { + ginkgo.JustBeforeEach(func(ctx context.Context) { ginkgo.By("deploying QAT plugin in DPDK mode") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(kustomizationPath)) ginkgo.By("waiting for QAT plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-qat-plugin"}.AsSelector(), 1 /* one replica */, 100*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } dpPodName = podList.Items[0].Name @@ -91,15 +91,15 @@ func describeQatDpdkPlugin() { } ginkgo.By("checking if the resource is allocatable") - if err := utils.WaitForNodesWithResource(f.ClientSet, resourceName, 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, resourceName, 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("undeploying QAT plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(kustomizationPath)) - if err := e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { + if err := e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { framework.Failf("failed to terminate pod: %v", err) } }) @@ -114,25 +114,25 @@ func describeQatDpdkPlugin() { resourceName = "qat.intel.com/cy" }) - ginkgo.It("deploys a crypto pod (openssl) requesting QAT resources", func() { - runCpaSampleCode(f, "4", resourceName) + ginkgo.It("deploys a crypto pod (openssl) requesting QAT resources", func(ctx context.Context) { + runCpaSampleCode(ctx, f, "4", resourceName) }) - ginkgo.It("deploys a crypto pod (dpdk crypto-perf) requesting QAT resources", func() { + ginkgo.It("deploys a crypto pod (dpdk crypto-perf) requesting QAT resources", func(ctx context.Context) { ginkgo.By("submitting a crypto pod requesting QAT resources") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(cryptoTestGen4YamlPath)) ginkgo.By("waiting the crypto pod to finish successfully") - e2epod.NewPodClient(f).WaitForSuccess("qat-dpdk-test-crypto-perf-tc1-gen4", 300*time.Second) - output, _ := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "qat-dpdk-test-crypto-perf-tc1-gen4", "crypto-perf") + e2epod.NewPodClient(f).WaitForSuccess(ctx, "qat-dpdk-test-crypto-perf-tc1-gen4", 300*time.Second) + + output, _ := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, "qat-dpdk-test-crypto-perf-tc1-gen4", "crypto-perf") framework.Logf("crypto-perf output:\n %s", output) }) }) ginkgo.Context("When QAT Gen4 resources are available with compress (dc) services enabled", func() { - // This BeforeEach runs even before the JustBeforeEach above. ginkgo.BeforeEach(func() { ginkgo.By("creating a configMap before plugin gets deployed") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "configmap", "--from-literal", "qat.conf=ServicesEnabled=dc", "qat-config") @@ -141,8 +141,8 @@ func describeQatDpdkPlugin() { resourceName = "qat.intel.com/dc" }) - ginkgo.It("deploys a compress pod (openssl) requesting QAT resources", func() { - runCpaSampleCode(f, "32", resourceName) + ginkgo.It("deploys a compress pod (openssl) requesting QAT resources", func(ctx context.Context) { + runCpaSampleCode(ctx, f, "32", resourceName) }) }) @@ -152,25 +152,25 @@ func describeQatDpdkPlugin() { resourceName = "qat.intel.com/generic" }) - ginkgo.It("deploys a crypto pod requesting QAT resources", func() { + ginkgo.It("deploys a crypto pod requesting QAT resources", func(ctx context.Context) { ginkgo.By("submitting a crypto pod requesting QAT resources") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(cryptoTestYamlPath)) ginkgo.By("waiting the crypto pod to finish successfully") - e2epod.NewPodClient(f).WaitForSuccess("qat-dpdk-test-crypto-perf-tc1", 60*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, "qat-dpdk-test-crypto-perf-tc1", 60*time.Second) }) - ginkgo.It("deploys a compress pod requesting QAT resources", func() { + ginkgo.It("deploys a compress pod requesting QAT resources", func(ctx context.Context) { ginkgo.By("submitting a compress pod requesting QAT resources") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(compressTestYamlPath)) ginkgo.By("waiting the compress pod to finish successfully") - e2epod.NewPodClient(f).WaitForSuccess("qat-dpdk-test-compress-perf-tc1", 60*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, "qat-dpdk-test-compress-perf-tc1", 60*time.Second) }) }) } -func runCpaSampleCode(f *framework.Framework, runTests string, resourceName v1.ResourceName) { +func runCpaSampleCode(ctx context.Context, f *framework.Framework, runTests string, resourceName v1.ResourceName) { ginkgo.By("submitting a pod requesting QAT" + resourceName.String() + "resources") podSpec := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "openssl-qat-engine"}, @@ -194,13 +194,13 @@ func runCpaSampleCode(f *framework.Framework, runTests string, resourceName v1.R RestartPolicy: v1.RestartPolicyNever, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") ginkgo.By("waiting the cpa_sample_code pod for the resource" + resourceName.String() + "to finish successfully") - e2epod.NewPodClient(f).WaitForSuccess(pod.ObjectMeta.Name, 300*time.Second) + e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 300*time.Second) - output, _ := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, pod.Spec.Containers[0].Name) + output, _ := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, pod.ObjectMeta.Name, pod.Spec.Containers[0].Name) framework.Logf("cpa_sample_code output:\n %s", output) } diff --git a/test/e2e/qat/qatplugin_kernel.go b/test/e2e/qat/qatplugin_kernel.go index 29275e019..227a6980e 100644 --- a/test/e2e/qat/qatplugin_kernel.go +++ b/test/e2e/qat/qatplugin_kernel.go @@ -51,16 +51,16 @@ func describeQatKernelPlugin() { var dpPodName string - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("deploying QAT plugin in kernel mode") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "create", "-f", yamlPath) ginkgo.By("waiting for QAT plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-qat-kernel-plugin"}.AsSelector(), 1 /* one replica */, 100*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } dpPodName = podList.Items[0].Name @@ -71,23 +71,23 @@ func describeQatKernelPlugin() { } }) - ginkgo.AfterEach(func() { + ginkgo.AfterEach(func(ctx context.Context) { ginkgo.By("undeploying QAT plugin") e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-f", yamlPath) - if err := e2epod.WaitForPodNotFoundInNamespace(f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { + if err := e2epod.WaitForPodNotFoundInNamespace(ctx, f.ClientSet, dpPodName, f.Namespace.Name, 30*time.Second); err != nil { framework.Failf("failed to terminate pod: %v", err) } }) ginkgo.Context("When QAT resources are available", func() { - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { ginkgo.By("checking if the resource is allocatable") - if err := utils.WaitForNodesWithResource(f.ClientSet, "qat.intel.com/cy1_dc0", 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "qat.intel.com/cy1_dc0", 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable resource: %v", err) } }) - ginkgo.It("deploys a pod requesting QAT resources", func() { + ginkgo.It("deploys a pod requesting QAT resources", func(ctx context.Context) { ginkgo.By("submitting a pod requesting QAT resources") podSpec := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{Name: "qatplugin-tester"}, @@ -107,12 +107,12 @@ func describeQatKernelPlugin() { RestartPolicy: v1.RestartPolicyNever, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") ginkgo.By("waiting the pod to finish successfully") - e2epod.NewPodClient(f).WaitForFinish(pod.ObjectMeta.Name, 60*time.Second) + e2epod.NewPodClient(f).WaitForFinish(ctx, pod.ObjectMeta.Name, 60*time.Second) }) }) } diff --git a/test/e2e/sgx/sgx.go b/test/e2e/sgx/sgx.go index 408c1bc8b..36a297892 100644 --- a/test/e2e/sgx/sgx.go +++ b/test/e2e/sgx/sgx.go @@ -48,7 +48,7 @@ func describe() { f := framework.NewDefaultFramework("sgxplugin") f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged - ginkgo.It("checks availability of SGX resources", func() { + ginkgo.It("checks availability of SGX resources", func(ctx context.Context) { ginkgo.By("deploying SGX plugin") deploymentWebhookPath, err := utils.LocateRepoFile(kustomizationWebhook) @@ -56,7 +56,7 @@ func describe() { framework.Failf("unable to locate %q: %v", kustomizationWebhook, err) } - _ = utils.DeployWebhook(f, deploymentWebhookPath) + _ = utils.DeployWebhook(ctx, f, deploymentWebhookPath) deploymentPluginPath, err := utils.LocateRepoFile(kustomizationPlugin) if err != nil { @@ -66,27 +66,27 @@ func describe() { e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", filepath.Dir(deploymentPluginPath)) ginkgo.By("waiting for SGX plugin's availability") - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"app": "intel-sgx-plugin"}.AsSelector(), 1 /* one replica */, 100*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } - ginkgo.By("checking DLB plugin's securityContext") + ginkgo.By("checking SGX plugin's securityContext") if err = utils.TestPodsFileSystemInfo(podList.Items); err != nil { framework.Failf("container filesystem info checks failed: %v", err) } ginkgo.By("checking if the resource is allocatable") - if err = utils.WaitForNodesWithResource(f.ClientSet, "sgx.intel.com/epc", 150*time.Second); err != nil { + if err = utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/epc", 150*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable epc resource: %v", err) } - if err = utils.WaitForNodesWithResource(f.ClientSet, "sgx.intel.com/enclave", 30*time.Second); err != nil { + if err = utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/enclave", 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable enclave resource: %v", err) } - if err = utils.WaitForNodesWithResource(f.ClientSet, "sgx.intel.com/provision", 30*time.Second); err != nil { + if err = utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/provision", 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable provision resource: %v", err) } @@ -109,37 +109,38 @@ func describe() { RestartPolicy: v1.RestartPolicyNever, }, } - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") ginkgo.By("waiting the pod to finish successfully") - e2epod.NewPodClient(f).WaitForSuccess(pod.ObjectMeta.Name, 60*time.Second) + + e2epod.NewPodClient(f).WaitForSuccess(ctx, pod.ObjectMeta.Name, 60*time.Second) e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "delete", "-k", filepath.Dir(deploymentPluginPath)) }) - ginkgo.It("deploys SGX plugin with operator", func() { + ginkgo.It("deploys SGX plugin with operator", func(ctx context.Context) { utils.Kubectl("", "apply", "-k", "deployments/operator/default/kustomization.yaml") - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, ns, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1, timeout); err != nil { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, ns, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1, timeout); err != nil { framework.Failf("unable to wait for all pods to be running and ready: %v", err) } utils.Kubectl("", "apply", "-f", "deployments/operator/samples/deviceplugin_v1_sgxdeviceplugin.yaml") - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, ns, labels.Set{"app": "intel-sgx-plugin"}.AsSelector(), 1, timeout); err != nil { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, ns, labels.Set{"app": "intel-sgx-plugin"}.AsSelector(), 1, timeout); err != nil { framework.Failf("unable to wait for all pods to be running and ready: %v", err) } - if err := utils.WaitForNodesWithResource(f.ClientSet, "sgx.intel.com/epc", 150*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/epc", 150*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable epc resource: %v", err) } - if err := utils.WaitForNodesWithResource(f.ClientSet, "sgx.intel.com/enclave", 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/enclave", 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable enclave resource: %v", err) } - if err := utils.WaitForNodesWithResource(f.ClientSet, "sgx.intel.com/provision", 30*time.Second); err != nil { + if err := utils.WaitForNodesWithResource(ctx, f.ClientSet, "sgx.intel.com/provision", 30*time.Second); err != nil { framework.Failf("unable to wait for nodes to have positive allocatable provision resource: %v", err) } diff --git a/test/e2e/sgxadmissionwebhook/sgxaadmissionwebhook.go b/test/e2e/sgxadmissionwebhook/sgxaadmissionwebhook.go index 72ae7968f..987f8cacd 100644 --- a/test/e2e/sgxadmissionwebhook/sgxaadmissionwebhook.go +++ b/test/e2e/sgxadmissionwebhook/sgxaadmissionwebhook.go @@ -46,46 +46,46 @@ func describe() { var webhook v1.Pod - ginkgo.BeforeEach(func() { + ginkgo.BeforeEach(func(ctx context.Context) { kustomizationPath, err := utils.LocateRepoFile(kustomizationYaml) if err != nil { framework.Failf("unable to locate %q: %v", kustomizationYaml, err) } - webhook = utils.DeployWebhook(f, kustomizationPath) + webhook = utils.DeployWebhook(ctx, f, kustomizationPath) }) - ginkgo.It("checks the webhook pod is safely configured", func() { + ginkgo.It("checks the webhook pod is safely configured", func(ctx context.Context) { err := utils.TestContainersRunAsNonRoot([]v1.Pod{webhook}) gomega.Expect(err).To(gomega.BeNil()) - err = utils.TestWebhookServerTLS(f, "https://intelsgxwebhook-webhook-service") + err = utils.TestWebhookServerTLS(ctx, f, "https://intelsgxwebhook-webhook-service") gomega.Expect(err).To(gomega.BeNil()) }) - ginkgo.It("mutates created pods when no quote generation is needed", func() { + ginkgo.It("mutates created pods when no quote generation is needed", func(ctx context.Context) { ginkgo.By("submitting the pod") - pod := submitPod(f, []string{"test"}, "") + pod := submitPod(ctx, f, []string{"test"}, "") ginkgo.By("checking the container resources have been mutated") - checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) + checkMutatedResources(ctx, f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) ginkgo.By("checking the pod total EPC size annotation is correctly set") gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("1Mi")) }) - ginkgo.It("mutates created pods when the container contains the quote generation libraries", func() { + ginkgo.It("mutates created pods when the container contains the quote generation libraries", func(ctx context.Context) { ginkgo.By("submitting the pod") - pod := submitPod(f, []string{"test"}, "test") + pod := submitPod(ctx, f, []string{"test"}, "test") ginkgo.By("checking the container resources have been mutated") - checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave", "sgx.intel.com/provision"}, []v1.ResourceName{}) + checkMutatedResources(ctx, f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave", "sgx.intel.com/provision"}, []v1.ResourceName{}) ginkgo.By("checking the pod total EPC size annotation is correctly set") gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("1Mi")) }) - ginkgo.It("mutates created pods when the container uses aesmd from a side-car container to generate quotes", func() { + ginkgo.It("mutates created pods when the container uses aesmd from a side-car container to generate quotes", func(ctx context.Context) { ginkgo.By("submitting the pod") - pod := submitPod(f, []string{"test", "aesmd"}, "aesmd") + pod := submitPod(ctx, f, []string{"test", "aesmd"}, "aesmd") ginkgo.By("checking the container resources have been mutated") - checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) - checkMutatedResources(f, pod.Spec.Containers[1].Resources, []v1.ResourceName{"sgx.intel.com/enclave", "sgx.intel.com/provision"}, []v1.ResourceName{}) + checkMutatedResources(ctx, f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) + checkMutatedResources(ctx, f, pod.Spec.Containers[1].Resources, []v1.ResourceName{"sgx.intel.com/enclave", "sgx.intel.com/provision"}, []v1.ResourceName{}) ginkgo.By("checking the container volumes have been mutated") checkMutatedVolumes(f, pod, "aesmd-socket", v1.EmptyDirVolumeSource{}) ginkgo.By("checking the container envvars have been mutated") @@ -94,11 +94,11 @@ func describe() { ginkgo.By("checking the pod total EPC size annotation is correctly set") gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("2Mi")) }) - ginkgo.It("mutates created pods where one container uses host/daemonset aesmd to generate quotes", func() { + ginkgo.It("mutates created pods where one container uses host/daemonset aesmd to generate quotes", func(ctx context.Context) { ginkgo.By("submitting the pod") - pod := submitPod(f, []string{"test"}, "aesmd") + pod := submitPod(ctx, f, []string{"test"}, "aesmd") ginkgo.By("checking the container resources have been mutated") - checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) + checkMutatedResources(ctx, f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) ginkgo.By("checking the container volumes have been mutated") checkMutatedVolumes(f, pod, "aesmd-socket", v1.HostPathVolumeSource{}) ginkgo.By("checking the container envvars have been mutated") @@ -107,13 +107,13 @@ func describe() { ginkgo.By("checking the pod total EPC size annotation is correctly set") gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("1Mi")) }) - ginkgo.It("mutates created pods where three containers use host/daemonset aesmd to generate quotes", func() { + ginkgo.It("mutates created pods where three containers use host/daemonset aesmd to generate quotes", func(ctx context.Context) { ginkgo.By("submitting the pod") - pod := submitPod(f, []string{"test1", "test2", "test3"}, "aesmd") + pod := submitPod(ctx, f, []string{"test1", "test2", "test3"}, "aesmd") ginkgo.By("checking the container resources have been mutated") - checkMutatedResources(f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) - checkMutatedResources(f, pod.Spec.Containers[1].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) - checkMutatedResources(f, pod.Spec.Containers[2].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) + checkMutatedResources(ctx, f, pod.Spec.Containers[0].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) + checkMutatedResources(ctx, f, pod.Spec.Containers[1].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) + checkMutatedResources(ctx, f, pod.Spec.Containers[2].Resources, []v1.ResourceName{"sgx.intel.com/enclave"}, []v1.ResourceName{"sgx.intel.com/provision"}) ginkgo.By("checking the container volumes have been mutated") checkMutatedVolumes(f, pod, "aesmd-socket", v1.HostPathVolumeSource{}) ginkgo.By("checking the container envvars have been mutated") @@ -126,7 +126,7 @@ func describe() { ginkgo.By("checking the pod total EPC size annotation is correctly set") gomega.Expect(pod.Annotations["sgx.intel.com/epc"]).To(gomega.Equal("3Mi")) }) - ginkgo.It("checks that Volumes and VolumeMounts are created only once", func() { + ginkgo.It("checks that Volumes and VolumeMounts are created only once", func(ctx context.Context) { ginkgo.By("submitting the pod") podSpec := createPodSpec([]string{"test"}, "aesmd") podSpec.Spec.Volumes = make([]v1.Volume, 0) @@ -143,7 +143,7 @@ func describe() { Name: "aesmd-socket", MountPath: "/var/run/aesmd", }) - pod := submitCustomPod(f, podSpec) + pod := submitCustomPod(ctx, f, podSpec) ginkgo.By("checking the container volumes have been not mutated") checkMutatedVolumes(f, pod, "aesmd-socket", v1.EmptyDirVolumeSource{}) }) @@ -167,12 +167,12 @@ func checkMutatedVolumes(f *framework.Framework, pod *v1.Pod, volumeName string, } } -func checkMutatedResources(f *framework.Framework, r v1.ResourceRequirements, expectedResources, forbiddenResources []v1.ResourceName) { +func checkMutatedResources(ctx context.Context, f *framework.Framework, r v1.ResourceRequirements, expectedResources, forbiddenResources []v1.ResourceName) { for _, res := range expectedResources { q, ok := r.Limits[res] if !ok { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Fail("the pod has missing resources") } @@ -182,15 +182,15 @@ func checkMutatedResources(f *framework.Framework, r v1.ResourceRequirements, ex for _, res := range forbiddenResources { _, ok := r.Limits[res] if ok { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Fail("the pod has extra resources") } } } -func submitCustomPod(f *framework.Framework, podSpec *v1.Pod) *v1.Pod { - pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), +func submitCustomPod(ctx context.Context, f *framework.Framework, podSpec *v1.Pod) *v1.Pod { + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") @@ -230,6 +230,6 @@ func createPodSpec(containerNames []string, quoteProvider string) *v1.Pod { return podSpec } -func submitPod(f *framework.Framework, containerNames []string, quoteProvider string) *v1.Pod { - return submitCustomPod(f, createPodSpec(containerNames, quoteProvider)) +func submitPod(ctx context.Context, f *framework.Framework, containerNames []string, quoteProvider string) *v1.Pod { + return submitCustomPod(ctx, f, createPodSpec(containerNames, quoteProvider)) } diff --git a/test/e2e/utils/utils.go b/test/e2e/utils/utils.go index 66fdd1e79..2edf2e9d6 100644 --- a/test/e2e/utils/utils.go +++ b/test/e2e/utils/utils.go @@ -41,15 +41,15 @@ const ( ) // WaitForNodesWithResource waits for nodes to have positive allocatable resource. -func WaitForNodesWithResource(c clientset.Interface, res v1.ResourceName, timeout time.Duration) error { +func WaitForNodesWithResource(ctx context.Context, c clientset.Interface, res v1.ResourceName, timeout time.Duration) error { framework.Logf("Waiting up to %s for any positive allocatable resource %q", timeout, res) start := time.Now() - err := wait.Poll(poll, timeout, - func() (bool, error) { + err := wait.PollUntilContextTimeout(ctx, poll, timeout, true, + func(ctx context.Context) (bool, error) { for t := time.Now(); time.Since(t) < timeout; time.Sleep(poll) { - nodelist, err := c.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{}) + nodelist, err := c.CoreV1().Nodes().List(ctx, metav1.ListOptions{}) if err != nil { return false, err } @@ -75,8 +75,8 @@ func WaitForNodesWithResource(c clientset.Interface, res v1.ResourceName, timeou // WaitForPodFailure waits for a pod to fail. // This function used to be a part of k8s e2e framework, but was deleted in // https://github.com/kubernetes/kubernetes/pull/86732. -func WaitForPodFailure(f *framework.Framework, name string, timeout time.Duration) { - gomega.Expect(e2epod.WaitForPodCondition(f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, +func WaitForPodFailure(ctx context.Context, f *framework.Framework, name string, timeout time.Duration) { + gomega.Expect(e2epod.WaitForPodCondition(ctx, f.ClientSet, f.Namespace.Name, name, "success or failure", timeout, func(pod *v1.Pod) (bool, error) { switch pod.Status.Phase { case v1.PodFailed: @@ -132,8 +132,8 @@ func CreateKustomizationOverlay(namespace, base, overlay string) error { } // DeployWebhook deploys an admission webhook to a framework-specific namespace. -func DeployWebhook(f *framework.Framework, kustomizationPath string) v1.Pod { - if _, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, "cert-manager", +func DeployWebhook(ctx context.Context, f *framework.Framework, kustomizationPath string) v1.Pod { + if _, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, "cert-manager", labels.Set{"app.kubernetes.io/name": "cert-manager"}.AsSelector(), 1 /* one replica */, 10*time.Second); err != nil { framework.Failf("unable to detect running cert-manager: %v", err) } @@ -152,11 +152,11 @@ func DeployWebhook(f *framework.Framework, kustomizationPath string) v1.Pod { e2ekubectl.RunKubectlOrDie(f.Namespace.Name, "apply", "-k", tmpDir) - podList, err := e2epod.WaitForPodsWithLabelRunningReady(f.ClientSet, f.Namespace.Name, + podList, err := e2epod.WaitForPodsWithLabelRunningReady(ctx, f.ClientSet, f.Namespace.Name, labels.Set{"control-plane": "controller-manager"}.AsSelector(), 1 /* one replica */, 60*time.Second) if err != nil { - e2edebug.DumpAllNamespaceInfo(f.ClientSet, f.Namespace.Name) - e2ekubectl.LogFailedContainers(f.ClientSet, f.Namespace.Name, framework.Logf) + e2edebug.DumpAllNamespaceInfo(ctx, f.ClientSet, f.Namespace.Name) + e2ekubectl.LogFailedContainers(ctx, f.ClientSet, f.Namespace.Name, framework.Logf) framework.Failf("unable to wait for all pods to be running and ready: %v", err) } @@ -212,7 +212,7 @@ func TestPodsFileSystemInfo(pods []v1.Pod) error { return nil } -func TestWebhookServerTLS(f *framework.Framework, serviceName string) error { +func TestWebhookServerTLS(ctx context.Context, f *framework.Framework, serviceName string) error { podSpec := &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ Name: "testssl-tester", @@ -240,12 +240,12 @@ func TestWebhookServerTLS(f *framework.Framework, serviceName string) error { }, } - _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + _, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(ctx, podSpec, metav1.CreateOptions{}) framework.ExpectNoError(err, "pod Create API error") - waitErr := e2epod.WaitForPodSuccessInNamespaceTimeout(f.ClientSet, "testssl-tester", f.Namespace.Name, 180*time.Second) + waitErr := e2epod.WaitForPodSuccessInNamespaceTimeout(ctx, f.ClientSet, "testssl-tester", f.Namespace.Name, 180*time.Second) - output, err := e2epod.GetPodLogs(f.ClientSet, f.Namespace.Name, "testssl-tester", "testssl-container") + output, err := e2epod.GetPodLogs(ctx, f.ClientSet, f.Namespace.Name, "testssl-tester", "testssl-container") if err != nil { return errors.Wrap(err, "failed to get output for testssl.sh run") }