diff --git a/.snyk b/.snyk index 0554e2db5..b63aa87ff 100644 --- a/.snyk +++ b/.snyk @@ -11,6 +11,7 @@ exclude: - "vendor/github.com/k8stopologyawareschedwg/deployer/pkg/kubeletconfig/*" - "vendor/github.com/golang/**" - "vendor/github.com/google/**" + - "vendor/github.com/go-task/slim-sprig/**" - "vendor/github.com/openshift/**" - "vendor/github.com/onsi/gomega**" - "vendor/github.com/onsi/ginkgo/**" diff --git a/controllers/numaresourcesscheduler_controller.go b/controllers/numaresourcesscheduler_controller.go index 3d161aba1..109864457 100644 --- a/controllers/numaresourcesscheduler_controller.go +++ b/controllers/numaresourcesscheduler_controller.go @@ -39,6 +39,8 @@ import ( "github.com/pkg/errors" + k8swgmanifests "github.com/k8stopologyawareschedwg/deployer/pkg/manifests" + nropv1 "github.com/openshift-kni/numaresources-operator/api/numaresourcesoperator/v1" "github.com/openshift-kni/numaresources-operator/pkg/apply" "github.com/openshift-kni/numaresources-operator/pkg/hash" @@ -194,7 +196,24 @@ func (r *NUMAResourcesSchedulerReconciler) syncNUMASchedulerResources(ctx contex schedSpec := instance.Spec.Normalize() cacheResyncPeriod := unpackAPIResyncPeriod(schedSpec.CacheResyncPeriod) - if err := schedupdate.SchedulerConfigWithFilter(r.SchedulerManifests.ConfigMap, schedSpec.SchedulerName, schedupdate.CleanSchedulerConfig, cacheResyncPeriod); err != nil { + + resyncPeriod := int64(cacheResyncPeriod.Seconds()) + params := k8swgmanifests.ConfigParams{ + ProfileName: schedSpec.SchedulerName, + Cache: &k8swgmanifests.ConfigCacheParams{ + ResyncPeriodSeconds: &resyncPeriod, + }, + } + + schedName, ok := schedstate.SchedulerNameFromObject(r.SchedulerManifests.ConfigMap) + if !ok { + err := fmt.Errorf("missing scheduler name in builtin config map") + klog.V(2).ErrorS(err, "cannot find the scheduler profile name") + return nropv1.NUMAResourcesSchedulerStatus{}, err + } + klog.V(4).InfoS("detected scheduler profile", "profileName", schedName) + + if err := schedupdate.SchedulerConfig(r.SchedulerManifests.ConfigMap, schedName, ¶ms); err != nil { return nropv1.NUMAResourcesSchedulerStatus{}, err } diff --git a/controllers/numaresourcesscheduler_controller_test.go b/controllers/numaresourcesscheduler_controller_test.go index 22df2dcb4..6d1652675 100644 --- a/controllers/numaresourcesscheduler_controller_test.go +++ b/controllers/numaresourcesscheduler_controller_test.go @@ -145,7 +145,7 @@ var _ = ginkgo.Describe("Test NUMAResourcesScheduler Reconcile", func() { name, found := sched.SchedulerNameFromObject(cm) gomega.Expect(found).To(gomega.BeTrue()) - gomega.Expect(name).To(gomega.BeEquivalentTo(testSchedulerName)) + gomega.Expect(name).To(gomega.BeEquivalentTo(testSchedulerName), "found scheduler %q expected %q", name, testSchedulerName) }) ginkgo.It("should expose the resync period in status", func() { @@ -405,11 +405,11 @@ func pop(m map[string]string, k string) string { } func diffYAML(want, got string) (string, error) { - cfgWant, err := depmanifests.DecodeSchedulerConfigFromData([]byte(want)) + cfgWant, err := depmanifests.DecodeSchedulerProfilesFromData([]byte(want)) if err != nil { return "", err } - cfgGot, err := depmanifests.DecodeSchedulerConfigFromData([]byte(got)) + cfgGot, err := depmanifests.DecodeSchedulerProfilesFromData([]byte(got)) if err != nil { return "", err } diff --git a/go.mod b/go.mod index c8967ca19..a1f35117f 100644 --- a/go.mod +++ b/go.mod @@ -9,7 +9,7 @@ require ( github.com/go-logr/logr v1.2.4 github.com/google/go-cmp v0.5.9 github.com/jaypipes/ghw v0.9.0 - github.com/k8stopologyawareschedwg/deployer v0.12.1-0.20230322120411-111a4d4522b1 + github.com/k8stopologyawareschedwg/deployer v0.12.2 github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.1 github.com/k8stopologyawareschedwg/podfingerprint v0.2.2 github.com/k8stopologyawareschedwg/resource-topology-exporter v0.14.3 @@ -26,14 +26,12 @@ require ( k8s.io/apiextensions-apiserver v0.27.2 k8s.io/apimachinery v0.27.2 k8s.io/client-go v0.27.2 - k8s.io/code-generator v0.26.7 + k8s.io/code-generator v0.26.10 k8s.io/klog/v2 v2.90.1 k8s.io/kubectl v0.25.1 - k8s.io/kubelet v0.26.7 - k8s.io/kubernetes v1.26.7 + k8s.io/kubelet v0.26.10 kubevirt.io/qe-tools v0.1.8 sigs.k8s.io/controller-runtime v0.15.0 - sigs.k8s.io/scheduler-plugins v0.24.9 sigs.k8s.io/yaml v1.3.0 ) @@ -59,12 +57,14 @@ require ( github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonreference v0.20.1 // indirect github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/glog v1.1.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/google/gnostic v0.5.7-v3refs // indirect github.com/google/gofuzz v1.2.0 // indirect + github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/uuid v1.3.0 // indirect github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.0.1 // indirect @@ -90,7 +90,7 @@ require ( github.com/vincent-petithory/dataurl v1.0.0 // indirect go.uber.org/ratelimit v0.2.0 // indirect golang.org/x/mod v0.11.0 // indirect - golang.org/x/net v0.12.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/term v0.13.0 // indirect @@ -107,11 +107,9 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect - k8s.io/apiserver v0.26.7 // indirect k8s.io/component-base v0.27.2 // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f // indirect - k8s.io/kube-scheduler v0.26.7 // indirect k8s.io/utils v0.0.0-20230209194617-a36077c30491 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect @@ -120,41 +118,36 @@ require ( replace ( github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 golang.org/x/text => golang.org/x/text v0.3.8 - k8s.io/api => k8s.io/api v0.26.7 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.7 - k8s.io/apimachinery => k8s.io/apimachinery v0.26.7 - k8s.io/apiserver => k8s.io/apiserver v0.26.7 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.7 - k8s.io/client-go => k8s.io/client-go v0.26.7 - k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.7 - k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.7 - k8s.io/code-generator => k8s.io/code-generator v0.26.7 - k8s.io/component-base => k8s.io/component-base v0.26.7 - k8s.io/component-helpers => k8s.io/component-helpers v0.26.7 - k8s.io/controller-manager => k8s.io/controller-manager v0.26.7 - k8s.io/cri-api => k8s.io/cri-api v0.26.7 - k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.7 - k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.7 - k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.7 - k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.7 - k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.7 - k8s.io/kubectl => k8s.io/kubectl v0.26.7 - k8s.io/kubelet => k8s.io/kubelet v0.26.7 - k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.7 - k8s.io/metrics => k8s.io/metrics v0.26.7 - k8s.io/mount-utils => k8s.io/mount-utils v0.26.7 - k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.7 - k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.7 + k8s.io/api => k8s.io/api v0.26.10 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.10 + k8s.io/apimachinery => k8s.io/apimachinery v0.26.10 + k8s.io/apiserver => k8s.io/apiserver v0.26.10 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.10 + k8s.io/client-go => k8s.io/client-go v0.26.10 + k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.10 + k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.10 + k8s.io/code-generator => k8s.io/code-generator v0.26.10 + k8s.io/component-base => k8s.io/component-base v0.26.10 + k8s.io/component-helpers => k8s.io/component-helpers v0.26.10 + k8s.io/controller-manager => k8s.io/controller-manager v0.26.10 + k8s.io/cri-api => k8s.io/cri-api v0.26.10 + k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.10 + k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.10 + k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.10 + k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.10 + k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.10 + k8s.io/kubectl => k8s.io/kubectl v0.26.10 + k8s.io/kubelet => k8s.io/kubelet v0.26.10 + k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.10 + k8s.io/metrics => k8s.io/metrics v0.26.10 + k8s.io/mount-utils => k8s.io/mount-utils v0.26.10 + k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.10 + k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.10 sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.14.6 ) // local pinning replace ( github.com/containerd/containerd => github.com/containerd/containerd v1.4.11 - github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.4.0 - github.com/onsi/gomega => github.com/onsi/gomega v1.23.0 - github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e // release-4.13 - golang.org/x/net => golang.org/x/net v0.17.0 - golang.org/x/sys => golang.org/x/sys v0.13.0 google.golang.org/grpc => google.golang.org/grpc v1.58.3 ) diff --git a/go.sum b/go.sum index e07e197fe..d122045a3 100644 --- a/go.sum +++ b/go.sum @@ -796,8 +796,6 @@ github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/diktyo-io/appgroup-api v0.0.9-alpha h1:rqRNOl4CRD313a+ugZJ2rJAN2sNe5GGd4PfyaV44Dy4= -github.com/diktyo-io/networktopology-api v0.0.8-alpha h1:8bSVk6JdUgYd719mXfHgkGZ2eXOfTF436mLu/TQ7WHM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/drone/envsubst v1.0.3 h1:PCIBwNDYjs50AsLZPYdfhSATKaRg/FJmDc2D6+C2x8g= github.com/drone/envsubst v1.0.3/go.mod h1:N2jZmlMufstn1KEqvbHjw40h1KyTmnVzHcSc9bFiJ2g= @@ -825,7 +823,6 @@ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2Vvl github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= -github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= @@ -881,6 +878,8 @@ github.com/go-pdf/fpdf v0.5.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhO github.com/go-pdf/fpdf v0.6.0/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhOh5M= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= +github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -970,6 +969,7 @@ github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/s2a-go v0.1.0/go.mod h1:OJpEgntRZo8ugHpF9hkoLJbS5dSI20XZeXJ9JVywLlM= @@ -1007,6 +1007,7 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4Zs github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -1023,7 +1024,6 @@ github.com/jaypipes/pcidb v1.0.0/go.mod h1:TnYUvqhPBzCKnH34KrIX22kAeEbDCSRJ9cqLR github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1037,8 +1037,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/jung-kurt/gofpdf v1.0.0/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= -github.com/k8stopologyawareschedwg/deployer v0.12.1-0.20230322120411-111a4d4522b1 h1:whbW4RyuashGgR76tQtIKygoBS67cnt12SiDV3hW26A= -github.com/k8stopologyawareschedwg/deployer v0.12.1-0.20230322120411-111a4d4522b1/go.mod h1:oE2sLyAaP5IkKmmHh4+gZHLK/RSPvgW68jQO9eKB7PQ= +github.com/k8stopologyawareschedwg/deployer v0.12.2 h1:8iuh/DbGnk2FkyyvahjhOVcllDm12tDCm22sFFvji+Q= +github.com/k8stopologyawareschedwg/deployer v0.12.2/go.mod h1:x0WEm2x698xNV9cqGHkeS1PqIs6NQ4CVUwYONVvtHe4= github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.1 h1:BI3L7hNqRvXtB42FO4NI/0ZjDDVRPOMBDFLShhFtf28= github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.1/go.mod h1:AkACMQGiTgCt0lQw3m7TTU8PLH9lYKNK5e9DqFf5VuM= github.com/k8stopologyawareschedwg/podfingerprint v0.2.2 h1:iFHPfZInM9pz2neye5RdmORMp1hPmte1EGJYpOOzZVg= @@ -1106,26 +1106,41 @@ github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8m github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= -github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.1.6/go.mod h1:MEH45j8TBi6u9BMogfbp0stKC5cdGjumZj5Y7AG4VIk= +github.com/onsi/ginkgo/v2 v2.3.0/go.mod h1:Eew0uilEqZmIEZr8JrvYlvOM7Rr6xzTmMV8AyFNU9d0= github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo= -github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys= +github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= +github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.20.1/go.mod h1:DtrZpjmvpn2mPm4YWQa0/ALMDj9v4YxLgojwPeREyVo= +github.com/onsi/gomega v1.21.1/go.mod h1:iYAIXgPSaDHak0LCMA+AWBpIKBr8WZicMxnE8luStNc= +github.com/onsi/gomega v1.22.1/go.mod h1:x6n7VNe4hw0vkyYUM4mjIXx3JbLiPaBPNgB7PRQ1tuM= github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg= +github.com/onsi/gomega v1.27.7 h1:fVih9JD6ogIiHUN6ePK7HJidyEDpWGVB5mzM7cWNXoU= +github.com/onsi/gomega v1.27.7/go.mod h1:1p8OOlwo2iUUDsHnOrjE5UKYJ+e3W8eQ3qSlRahPmr4= github.com/openshift/api v0.0.0-20230330150608-05635858d40f h1:mGpCtfoehMcvmg/sSYLiv6nCbTl04cmtkUfYzP7H1AQ= github.com/openshift/api v0.0.0-20230330150608-05635858d40f/go.mod h1:ctXNyWanKEjGj8sss1KjjHQ3ENKFm33FFnS5BKaIPh4= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c h1:CV76yFOTXmq9VciBR3Bve5ZWzSxdft7gaMVB3kS0rwg= github.com/openshift/client-go v0.0.0-20221019143426-16aed247da5c/go.mod h1:lFMO8mLHXWFzSdYvGNo8ivF9SfF6zInA8ZGw4phRnUE= github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e h1:lgQ2Iy0NHk/iBaR9yvqSqROCiOovpkbc8MRYSIpHf+M= github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e/go.mod h1:xwAAOZMhwF91Ii8/yfOwp+yH5+GJS7VX90NI39RmSyo= -github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc= -github.com/paypal/load-watcher v0.2.2 h1:tKCy3Ts8LnihFSLKmyrOODrdt9ulbRXe0MFbxe9U2Bk= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= github.com/phpdave11/gofpdi v1.0.12/go.mod h1:vBmVV0Do6hSBHC8uKUQ71JGW+ZGQq74llk/7bXwjDoI= @@ -1187,7 +1202,6 @@ github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeV github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= -github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= @@ -1240,7 +1254,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.0/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.35.1/go.mod h1:9NiG9I2aHTKkcxqCILhjtyNA1QEiCjdBACv4IvrFQ+c= go.opentelemetry.io/otel v1.8.0/go.mod h1:2pkj+iMj0o03Y+cW6/m8Y4WkRdYN3AvCXCnzRMp9yvM= go.opentelemetry.io/otel v1.10.0/go.mod h1:NbvWjCthWHKBEUMpf0/v8ZRZlni86PpGFEMA9pnQSnQ= go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.10.0/go.mod h1:78XhIg8Ht9vR4tbLNUhXsiOnE2HOuSeKAiAcoVQEpOY= @@ -1267,10 +1281,12 @@ go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= @@ -1278,6 +1294,8 @@ golang.org/x/crypto v0.0.0-20220314234659-1baeb1ce4c0b/go.mod h1:IxCIyHEi3zRg3s0 golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/crypto v0.10.0/go.mod h1:o4eNf7Ede1fv+hwOwZsTHl9EsPFO6q6ZvYR8vYfY45I= +golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio= golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1340,6 +1358,74 @@ golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221012135044-0b7e1fb9d458/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.0.0-20221014081412-f15817d10f9b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk= +golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= +golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= +golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.11.0/go.mod h1:2L/ixqYpgIVXmeoSA/4Lu7BzTG4KIyPIryS4IsOd1oQ= +golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA= golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1372,6 +1458,7 @@ golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4 golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1389,12 +1476,118 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200917073148-efd3b9a0ff20/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210304124612-50617c2ba197/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220615213510-4f61da869c0c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220624220833-87e55d714810/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220829200755-d48e67d00261/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.9.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.13.0 h1:Af8nKPmuFypiUBjVoU9V20FiaFXOcuZI21p0ycVYYGE= golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= +golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.9.0/go.mod h1:M6DEAAIenWoTxdKrOltXcmDY3rSplQUkrvaDU5FcQyo= +golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o= golang.org/x/term v0.13.0 h1:bb+I9cTfFazGW51MZqBVmZy7+JEJMouUHTUSKVQLBek= golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY= @@ -1457,6 +1650,7 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20201124115921-2c860bdd6e78/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1751,6 +1945,7 @@ gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -1783,21 +1978,18 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -k8s.io/api v0.26.7 h1:Lf4iEBEJb5OFNmawtBfSZV/UNi9riSJ0t1qdhyZqI40= -k8s.io/api v0.26.7/go.mod h1:Vk9bMadzA49UHPmHB//lX7VRCQSXGoVwfLd3Sc1SSXI= -k8s.io/apiextensions-apiserver v0.26.7 h1:L7ImW831auK1ZQBtNJPaG5qyrFxeISEBlgfZJfnaIPk= -k8s.io/apiextensions-apiserver v0.26.7/go.mod h1:3DbZBfS3kO7/Jaj7qqzcBtPcKP0/KcklyAnk8zc+fs4= -k8s.io/apimachinery v0.26.7 h1:590jSBwaSHCAFCqltaEogY/zybFlhGsnLteLpuF2wig= -k8s.io/apimachinery v0.26.7/go.mod h1:qYzLkrQ9lhrZRh0jNKo2cfvf/R1/kQONnSiyB7NUJU0= -k8s.io/apiserver v0.26.7 h1:NX/zBZZn4R+Cq6shwyn8Pn8REd0yJJ16dbtv9WkEVEU= -k8s.io/apiserver v0.26.7/go.mod h1:r0wDRWHI7VL/KlQLTkJJBVGZ3KeNfv+VetlyRtr86xs= -k8s.io/client-go v0.26.7 h1:hyU9aKHlwVOykgyxzGYkrDSLCc4+mimZVyUJjPyUn1E= -k8s.io/client-go v0.26.7/go.mod h1:okYjy0jtq6sdeztALDvCh24tg4opOQS1XNvsJlERDAo= -k8s.io/code-generator v0.26.7 h1:bZkIAVLFfhYOfMXb2nRb0xKFmBTI6o2phX/4Q0ay87g= -k8s.io/code-generator v0.26.7/go.mod h1:seNkA/wYpeG1GT1REW1xHk4MCjQcVbx6FFAXKwCgdlE= -k8s.io/component-base v0.26.7 h1:uqsOyZh0Zqoaup8tmHa491D/CvgFdGUs+X2H/inNUKM= -k8s.io/component-base v0.26.7/go.mod h1:CZe1HTmX/DQdeBrb9XYOXzs96jXth8ZbFvhLMsoJLUg= -k8s.io/component-helpers v0.26.7 h1:WWdNnJfRQcsxP1NCcMhGMpi9vdlOKPFzZzx48wREvzE= +k8s.io/api v0.26.10 h1:skTnrDR0r8dg4MMLf6YZIzugxNM0BjFsWKPkNc5kOvk= +k8s.io/api v0.26.10/go.mod h1:ou/H3yviqrHtP/DSPVTfsc7qNfmU06OhajytJfYXkXw= +k8s.io/apiextensions-apiserver v0.26.10 h1:wAriTUc6l7gUqJKOxhmXnYo/VNJzk4oh4QLCUR4Uq+k= +k8s.io/apiextensions-apiserver v0.26.10/go.mod h1:N2qhlxkhJLSoC4f0M1/1lNG627b45SYqnOPEVFoQXw4= +k8s.io/apimachinery v0.26.10 h1:aE+J2KIbjctFqPp3Y0q4Wh2PD+l1p2g3Zp4UYjSvtGU= +k8s.io/apimachinery v0.26.10/go.mod h1:iT1ZP4JBP34wwM+ZQ8ByPEQ81u043iqAcsJYftX9amM= +k8s.io/client-go v0.26.10 h1:4mDzl+1IrfRxh4Ro0s65JRGJp14w77gSMUTjACYWVRo= +k8s.io/client-go v0.26.10/go.mod h1:sh74ig838gCckU4ElYclWb24lTesPdEDPnlyg5vcbkA= +k8s.io/code-generator v0.26.10 h1:YHyiMDqabyW+S4s6WglcfsUJMl5GlpNPoFEwrS7/tIY= +k8s.io/code-generator v0.26.10/go.mod h1:+IHzChHYqL6v5M5KVRglocWMzdSzH3I2jRXZK05yZ9I= +k8s.io/component-base v0.26.10 h1:vl3Gfe5aC09mNxfnQtTng7u3rnBVrShOK3MAkqEleb0= +k8s.io/component-base v0.26.10/go.mod h1:/IDdENUHG5uGxqcofZajovYXE9KSPzJ4yQbkYQt7oN0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= @@ -1809,15 +2001,10 @@ k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f h1:2kWPakN3i/k81b0gvD5C5FJ2kxm1WrQFanWchyKuqGg= k8s.io/kube-openapi v0.0.0-20230501164219-8b0f38b5fd1f/go.mod h1:byini6yhqGC14c3ebc/QwanvYwhuMWF6yz2F8uwW8eg= -k8s.io/kube-scheduler v0.26.7 h1:WW7vTe0CqziQ5rwRsrMkhgYye5qgl9vWKwxVcdIONxI= -k8s.io/kube-scheduler v0.26.7/go.mod h1:lkxnrzPiHUllMiBKuV9Gsxvkmm+kk6Mafvnyq+z8DKs= -k8s.io/kubectl v0.26.7 h1:s24r6MjKDMW4sMOsuBLaNYQHlweTZeDC0BPkMiom8s0= -k8s.io/kubectl v0.26.7/go.mod h1:4PGqS2bPQ5yGE0ZSQajzYdWKFUAi8HiuWBZQ2/iEFHg= -k8s.io/kubelet v0.26.7 h1:YzVtyhQzKcpbZktVyeOMzE5d77fFUKf5TiWD/fDKMnc= -k8s.io/kubelet v0.26.7/go.mod h1:X/T8bnhE26lE5hOkpAVXx0MQQfXg1dwlsngz7avDWyI= -k8s.io/kubernetes v1.26.7 h1:524bMbtT/JVip9yO/nm0vrNsS/pswo0BS5U11nvjN6U= -k8s.io/kubernetes v1.26.7/go.mod h1:EBE8dfGfk2sZ3yzZVQjr1wQ/k28/wwaajL/1+77Cjmg= -k8s.io/metrics v0.26.7 h1:GziC+HlH1Gpbh4xrI5Vfz8QxBmy5nXzzRiul2HS5Ioc= +k8s.io/kubectl v0.26.10 h1:UoHA2Apb/Ack+B3evJjokbQ1shq6WdAmVi9AtWiY1B8= +k8s.io/kubectl v0.26.10/go.mod h1:U8Zb+jkWVI3H/LSbCDHQ0d70uYmOJtNQk9V2fmg7tGw= +k8s.io/kubelet v0.26.10 h1:/ChL4fCohFNPEvZbpt6qFXMrwFgLw0dgRaseMQ1wehw= +k8s.io/kubelet v0.26.10/go.mod h1:CptPtpIILi3Z0Z2522hMBF+gnDW/rwWGTg3fteoK0Qk= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20221107191617-1a15be271d1d/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230209194617-a36077c30491 h1:r0BAOLElQnnFhE/ApUsg3iHdVYYPBjNSSOMowRZxxsY= @@ -1884,8 +2071,6 @@ sigs.k8s.io/controller-runtime v0.14.6/go.mod h1:WqIdsAY6JBsjfc/CqO0CORmNtoCtE4S sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/scheduler-plugins v0.24.9 h1:9oGtwk6uh7mZMCX8+O+PipQzBiRq9d2+E3xq1cn7zbc= -sigs.k8s.io/scheduler-plugins v0.24.9/go.mod h1:0u2b/0SwY2ozDhOD/f1S3e5IbStoDFLUK8yP5dJTaQ8= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/pkg/numaresourcesscheduler/manifests/yaml/configmap.nrt.yaml b/pkg/numaresourcesscheduler/manifests/yaml/configmap.nrt.yaml index 2dfd8172c..534fde4ba 100644 --- a/pkg/numaresourcesscheduler/manifests/yaml/configmap.nrt.yaml +++ b/pkg/numaresourcesscheduler/manifests/yaml/configmap.nrt.yaml @@ -43,5 +43,7 @@ data: pluginConfig: - name: NodeResourceTopologyMatch args: + apiVersion: kubescheduler.config.k8s.io/v1beta2 + kind: NodeResourceTopologyMatchArgs scoringStrategy: type: LeastAllocated diff --git a/pkg/numaresourcesscheduler/objectstate/sched/sched.go b/pkg/numaresourcesscheduler/objectstate/sched/sched.go index 6f404589d..ec8541d3b 100644 --- a/pkg/numaresourcesscheduler/objectstate/sched/sched.go +++ b/pkg/numaresourcesscheduler/objectstate/sched/sched.go @@ -22,6 +22,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/klog/v2" "sigs.k8s.io/controller-runtime/pkg/client" "github.com/k8stopologyawareschedwg/deployer/pkg/manifests" @@ -154,17 +155,20 @@ func SchedulerNameFromObject(obj client.Object) (string, bool) { if !ok { return "", false } - schedCfg, err := manifests.KubeSchedulerConfigurationFromData([]byte(data)) + + allParams, err := manifests.DecodeSchedulerProfilesFromData([]byte(data)) if err != nil { return "", false } - for _, schedProf := range schedCfg.Profiles { - // TODO: actually check this profile refers to a NodeResourceTopologyMatch - if schedProf.SchedulerName != nil { - return *schedProf.SchedulerName, true - } + if len(allParams) == 0 { + return "", false + } + + params := allParams[0] + if len(allParams) > 1 { + klog.InfoS("detected more params than expected, using first", "profileName", params.ProfileName, "count", len(allParams)) } - return "", false + return params.ProfileName, true } func NewSchedConfigVolume(schedVolumeConfigName, configMapName string) corev1.Volume { diff --git a/pkg/objectupdate/sched/compat.go b/pkg/objectupdate/sched/compat.go deleted file mode 100644 index 90f984f3c..000000000 --- a/pkg/objectupdate/sched/compat.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2023 Red Hat, Inc. - */ - -package sched - -import ( - "bytes" - "fmt" - - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/klog/v2" - - "sigs.k8s.io/yaml" - - schedscheme "sigs.k8s.io/scheduler-plugins/apis/config/scheme" - schedapiv1beta2 "sigs.k8s.io/scheduler-plugins/apis/config/v1beta2" - - schedstate "github.com/openshift-kni/numaresources-operator/pkg/numaresourcesscheduler/objectstate/sched" -) - -// all this pain because in the internal unversioned types cacheResyncPeriodSeconds is NOT a pointer type :\ -func CleanSchedulerConfig(data []byte) []byte { - var r unstructured.Unstructured - if err := yaml.Unmarshal(data, &r.Object); err != nil { - klog.ErrorS(err, "cannot unmarshal scheduler config, passing through") - return data - } - - profiles, ok, err := unstructured.NestedSlice(r.Object, "profiles") - if !ok || err != nil { - klog.ErrorS(err, "failed to process unstructured data", "profiles", ok) - return data - } - for _, prof := range profiles { - profile, ok := prof.(map[string]interface{}) - if !ok { - klog.V(1).InfoS("unexpected profile data") - return data - } - - pluginConfigs, ok, err := unstructured.NestedSlice(profile, "pluginConfig") - if !ok || err != nil { - klog.ErrorS(err, "failed to process unstructured data", "pluginConfig", ok) - return data - } - for _, plConf := range pluginConfigs { - pluginConf, ok := plConf.(map[string]interface{}) - if !ok { - klog.V(1).InfoS("unexpected profile coonfig data") - return data - } - - name, ok, err := unstructured.NestedString(pluginConf, "name") - if !ok || err != nil { - klog.ErrorS(err, "failed to process unstructured data", "name", ok) - return data - } - if name != schedstate.SchedulerPluginName { - continue - } - args, ok, err := unstructured.NestedMap(pluginConf, "args") - if !ok || err != nil { - klog.ErrorS(err, "failed to process unstructured data", "args", ok) - return data - } - - // TODO - resyncPeriod, ok, err := unstructured.NestedFloat64(args, "cacheResyncPeriodSeconds") - if !ok || err != nil { - klog.ErrorS(err, "failed to process unstructured data", "cacheResyncPeriodSeconds", ok) - return data - } - - if resyncPeriod > 0 { - return data // nothing to do! - } - - delete(args, "cacheResyncPeriodSeconds") - - if err := unstructured.SetNestedMap(pluginConf, args, "args"); err != nil { - klog.ErrorS(err, "failed to override unstructured data", "data", "args") - return data - } - } - - if err := unstructured.SetNestedSlice(profile, pluginConfigs, "pluginConfig"); err != nil { - klog.ErrorS(err, "failed to override unstructured data", "data", "pluginConfig") - return data - } - } - - if err := unstructured.SetNestedSlice(r.Object, profiles, "profiles"); err != nil { - klog.ErrorS(err, "failed to override unstructured data", "data", "profiles") - return data - } - - newData, err := encodeUnstructuredSchedulerConfigToData(r) - if err != nil { - klog.ErrorS(err, "cannot re-encode scheduler config, passing through") - return data // silent passthrough - } - return newData -} - -func encodeUnstructuredSchedulerConfigToData(r unstructured.Unstructured) ([]byte, error) { - yamlInfo, ok := runtime.SerializerInfoForMediaType(schedscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeYAML) - if !ok { - return nil, fmt.Errorf("unable to locate encoder -- %q is not a supported media type", runtime.ContentTypeYAML) - } - - encoder := schedscheme.Codecs.EncoderForVersion(yamlInfo.Serializer, schedapiv1beta2.SchemeGroupVersion) - - var buf bytes.Buffer - err := encoder.Encode(&r, &buf) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} diff --git a/pkg/objectupdate/sched/compat_test.go b/pkg/objectupdate/sched/compat_test.go deleted file mode 100644 index 9d8599301..000000000 --- a/pkg/objectupdate/sched/compat_test.go +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * Copyright 2023 Red Hat, Inc. - */ - -package sched - -import ( - "testing" - "time" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - schedstate "github.com/openshift-kni/numaresources-operator/pkg/numaresourcesscheduler/objectstate/sched" -) - -func TestCleanSchedulerConfig(t *testing.T) { - type testCase struct { - name string - cacheResyncPeriod time.Duration - isErrExpected bool - configMap corev1.ConfigMap - expectedYAML string - } - - testCases := []testCase{ - { - name: "enable-reserve", - cacheResyncPeriod: 3 * time.Second, - configMap: corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "test-ns", - }, - Data: map[string]string{ - "config.yaml": schedConfig, - }, - }, - expectedYAML: expectedYAMLWithReconcilePeriod, - }, - { - name: "tune-reserve", - cacheResyncPeriod: 3 * time.Second, - configMap: corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "test-ns", - }, - Data: map[string]string{ - "config.yaml": schedConfigWithPeriod, - }, - }, - expectedYAML: expectedYAMLWithReconcilePeriod, - }, - { - name: "keep-reserve-enabled", - cacheResyncPeriod: 3 * time.Second, - configMap: corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "test-ns", - }, - Data: map[string]string{ - "config.yaml": schedConfigWithParams, - }, - }, - expectedYAML: expectedYAMLWithReconcilePeriod, - }, - { - name: "keep-reserve-disabled", - configMap: corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "test-ns", - }, - Data: map[string]string{ - "config.yaml": schedConfig, - }, - }, - expectedYAML: expectedYAMLWithoutReconcile, - }, - { - name: "disable-reconcile", - configMap: corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "test-ns", - }, - Data: map[string]string{ - "config.yaml": schedConfigWithParams, - }, - }, - expectedYAML: expectedYAMLWithoutReconcile, - }, - { - name: "remove-zero-reconcile", - configMap: corev1.ConfigMap{ - ObjectMeta: metav1.ObjectMeta{ - Name: "test-cm", - Namespace: "test-ns", - }, - Data: map[string]string{ - "config.yaml": expectedYAMLWithZeroReconcile, - }, - }, - expectedYAML: expectedYAMLWithoutReconcile, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if err := SchedulerConfigWithFilter(&tc.configMap, "", CleanSchedulerConfig, tc.cacheResyncPeriod); err != nil { - if !tc.isErrExpected { - t.Errorf("test %q: failed with error: %v", tc.name, err) - } - } - - gotYAML, ok := tc.configMap.Data[schedstate.SchedulerConfigFileName] - if !ok { - t.Fatalf("test %q failed: malformed config map", tc.name) - } - - yamlCompare(t, tc.name, gotYAML, tc.expectedYAML) - }) - } -} diff --git a/pkg/objectupdate/sched/fakeconfs.go b/pkg/objectupdate/sched/fakeconfs.go index 94a2f6ae3..203805475 100644 --- a/pkg/objectupdate/sched/fakeconfs.go +++ b/pkg/objectupdate/sched/fakeconfs.go @@ -29,44 +29,38 @@ kind: KubeSchedulerConfiguration leaderElection: leaderElect: false profiles: - - schedulerName: test-topo-aware-sched - plugins: - filter: - enabled: - - name: NodeResourceTopologyMatch - reserve: - enabled: - - name: NodeResourceTopologyMatch - score: - enabled: - - name: NodeResourceTopologyMatch - # optional plugin configs - pluginConfig: - - name: NodeResourceTopologyMatch - args: - scoringStrategy: - type: LeastAllocated` +- pluginConfig: + - args: + apiVersion: kubescheduler.config.k8s.io/v1beta2 + cacheResyncPeriodSeconds: 3 + kind: NodeResourceTopologyMatchArgs + scoringStrategy: + resources: + - name: cpu + weight: 1 + - name: memory + weight: 1 + type: LeastAllocated + name: NodeResourceTopologyMatch + plugins: + filter: + enabled: + - name: NodeResourceTopologyMatch + reserve: + enabled: + - name: NodeResourceTopologyMatch + score: + enabled: + - name: NodeResourceTopologyMatch + schedulerName: test-topo-aware-sched` - schedConfigWithParams = `apiVersion: kubescheduler.config.k8s.io/v1beta2 + schedConfigWithPeriod = `apiVersion: kubescheduler.config.k8s.io/v1beta2 kind: KubeSchedulerConfiguration leaderElection: leaderElect: false profiles: - - schedulerName: test-topo-aware-sched - plugins: - filter: - enabled: - - name: NodeResourceTopologyMatch - reserve: - enabled: - - name: NodeResourceTopologyMatch - score: - enabled: - - name: NodeResourceTopologyMatch - # optional plugin configs - pluginConfig: - - name: NodeResourceTopologyMatch - args: + - pluginConfig: + - args: apiVersion: kubescheduler.config.k8s.io/v1beta2 cacheResyncPeriodSeconds: 3 kind: NodeResourceTopologyMatchArgs @@ -76,58 +70,26 @@ profiles: weight: 1 - name: memory weight: 1 - type: LeastAllocated` - - schedConfigWithPeriod = `apiVersion: kubescheduler.config.k8s.io/v1beta2 -kind: KubeSchedulerConfiguration -leaderElection: - leaderElect: false -profiles: - - schedulerName: test-topo-aware-sched + type: LeastAllocated + name: NodeResourceTopologyMatch plugins: filter: enabled: - - name: NodeResourceTopologyMatch + - name: NodeResourceTopologyMatch reserve: enabled: - - name: NodeResourceTopologyMatch + - name: NodeResourceTopologyMatch score: enabled: - - name: NodeResourceTopologyMatch - # optional plugin configs - pluginConfig: - - name: NodeResourceTopologyMatch - args: - cacheResyncPeriodSeconds: 10 - scoringStrategy: - type: LeastAllocated` + - name: NodeResourceTopologyMatch + schedulerName: test-topo-aware-sched` ) const ( expectedYAMLWithReconcilePeriod = `apiVersion: kubescheduler.config.k8s.io/v1beta2 -clientConnection: - acceptContentTypes: "" - burst: 100 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: "" - qps: 50 -enableContentionProfiling: true -enableProfiling: true -healthzBindAddress: "" kind: KubeSchedulerConfiguration leaderElection: leaderElect: false - leaseDuration: 15s - renewDeadline: 10s - resourceLock: leases - resourceName: kube-scheduler - resourceNamespace: kube-system - retryPeriod: 2s -metricsBindAddress: "" -parallelism: 16 -percentageOfNodesToScore: 0 -podInitialBackoffSeconds: 1 -podMaxBackoffSeconds: 10 profiles: - pluginConfig: - args: @@ -142,373 +104,23 @@ profiles: weight: 1 type: LeastAllocated name: NodeResourceTopologyMatch - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: DefaultPreemptionArgs - minCandidateNodesAbsolute: 100 - minCandidateNodesPercentage: 10 - name: DefaultPreemption - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - hardPodAffinityWeight: 1 - kind: InterPodAffinityArgs - name: InterPodAffinity - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeAffinityArgs - name: NodeAffinity - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeResourcesBalancedAllocationArgs - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - name: NodeResourcesBalancedAllocation - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeResourcesFitArgs - scoringStrategy: - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - type: LeastAllocated - name: NodeResourcesFit - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - defaultingType: System - kind: PodTopologySpreadArgs - name: PodTopologySpread - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - bindTimeoutSeconds: 600 - kind: VolumeBindingArgs - name: VolumeBinding plugins: - bind: - enabled: - - name: DefaultBinder - weight: 0 filter: enabled: - - name: NodeUnschedulable - weight: 0 - - name: NodeName - weight: 0 - - name: TaintToleration - weight: 0 - - name: NodeAffinity - weight: 0 - - name: NodePorts - weight: 0 - - name: NodeResourcesFit - weight: 0 - - name: VolumeRestrictions - weight: 0 - - name: EBSLimits - weight: 0 - - name: GCEPDLimits - weight: 0 - - name: NodeVolumeLimits - weight: 0 - - name: AzureDiskLimits - weight: 0 - - name: VolumeBinding - weight: 0 - - name: VolumeZone - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: InterPodAffinity - weight: 0 - name: NodeResourceTopologyMatch - weight: 0 - multiPoint: {} - permit: {} - postBind: {} - postFilter: - enabled: - - name: DefaultPreemption - weight: 0 - preBind: - enabled: - - name: VolumeBinding - weight: 0 - preEnqueue: {} - preFilter: - enabled: - - name: NodeResourcesFit - weight: 0 - - name: NodePorts - weight: 0 - - name: VolumeRestrictions - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: InterPodAffinity - weight: 0 - - name: VolumeBinding - weight: 0 - - name: NodeAffinity - weight: 0 - preScore: - enabled: - - name: InterPodAffinity - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: TaintToleration - weight: 0 - - name: NodeAffinity - weight: 0 - queueSort: - enabled: - - name: PrioritySort - weight: 0 reserve: enabled: - - name: VolumeBinding - weight: 0 - name: NodeResourceTopologyMatch - weight: 0 score: enabled: - - name: NodeResourcesBalancedAllocation - weight: 1 - - name: ImageLocality - weight: 1 - - name: InterPodAffinity - weight: 1 - - name: NodeResourcesFit - weight: 1 - - name: NodeAffinity - weight: 1 - - name: PodTopologySpread - weight: 2 - - name: TaintToleration - weight: 1 - name: NodeResourceTopologyMatch - weight: 0 - schedulerName: test-topo-aware-sched -` - expectedYAMLWithZeroReconcile = `apiVersion: kubescheduler.config.k8s.io/v1beta2 -clientConnection: - acceptContentTypes: "" - burst: 100 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: "" - qps: 50 -enableContentionProfiling: true -enableProfiling: true -healthzBindAddress: "" -kind: KubeSchedulerConfiguration -leaderElection: - leaderElect: false - leaseDuration: 15s - renewDeadline: 10s - resourceLock: leases - resourceName: kube-scheduler - resourceNamespace: kube-system - retryPeriod: 2s -metricsBindAddress: "" -parallelism: 16 -percentageOfNodesToScore: 0 -podInitialBackoffSeconds: 1 -podMaxBackoffSeconds: 10 -profiles: -- pluginConfig: - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - cacheResyncPeriodSeconds: 0 - kind: NodeResourceTopologyMatchArgs - scoringStrategy: - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - type: LeastAllocated - name: NodeResourceTopologyMatch - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: DefaultPreemptionArgs - minCandidateNodesAbsolute: 100 - minCandidateNodesPercentage: 10 - name: DefaultPreemption - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - hardPodAffinityWeight: 1 - kind: InterPodAffinityArgs - name: InterPodAffinity - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeAffinityArgs - name: NodeAffinity - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeResourcesBalancedAllocationArgs - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - name: NodeResourcesBalancedAllocation - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeResourcesFitArgs - scoringStrategy: - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - type: LeastAllocated - name: NodeResourcesFit - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - defaultingType: System - kind: PodTopologySpreadArgs - name: PodTopologySpread - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - bindTimeoutSeconds: 600 - kind: VolumeBindingArgs - name: VolumeBinding - plugins: - bind: - enabled: - - name: DefaultBinder - weight: 0 - filter: - enabled: - - name: NodeUnschedulable - weight: 0 - - name: NodeName - weight: 0 - - name: TaintToleration - weight: 0 - - name: NodeAffinity - weight: 0 - - name: NodePorts - weight: 0 - - name: NodeResourcesFit - weight: 0 - - name: VolumeRestrictions - weight: 0 - - name: EBSLimits - weight: 0 - - name: GCEPDLimits - weight: 0 - - name: NodeVolumeLimits - weight: 0 - - name: AzureDiskLimits - weight: 0 - - name: VolumeBinding - weight: 0 - - name: VolumeZone - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: InterPodAffinity - weight: 0 - - name: NodeResourceTopologyMatch - weight: 0 - multiPoint: {} - permit: {} - postBind: {} - postFilter: - enabled: - - name: DefaultPreemption - weight: 0 - preBind: - enabled: - - name: VolumeBinding - weight: 0 - preEnqueue: {} - preFilter: - enabled: - - name: NodeResourcesFit - weight: 0 - - name: NodePorts - weight: 0 - - name: VolumeRestrictions - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: InterPodAffinity - weight: 0 - - name: VolumeBinding - weight: 0 - - name: NodeAffinity - weight: 0 - preScore: - enabled: - - name: InterPodAffinity - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: TaintToleration - weight: 0 - - name: NodeAffinity - weight: 0 - queueSort: - enabled: - - name: PrioritySort - weight: 0 - reserve: - enabled: - - name: VolumeBinding - weight: 0 - - name: NodeResourceTopologyMatch - weight: 0 - score: - enabled: - - name: NodeResourcesBalancedAllocation - weight: 1 - - name: ImageLocality - weight: 1 - - name: InterPodAffinity - weight: 1 - - name: NodeResourcesFit - weight: 1 - - name: NodeAffinity - weight: 1 - - name: PodTopologySpread - weight: 2 - - name: TaintToleration - weight: 1 - - name: NodeResourceTopologyMatch - weight: 0 schedulerName: test-topo-aware-sched ` expectedYAMLWithoutReconcile = `apiVersion: kubescheduler.config.k8s.io/v1beta2 -clientConnection: - acceptContentTypes: "" - burst: 100 - contentType: application/vnd.kubernetes.protobuf - kubeconfig: "" - qps: 50 -enableContentionProfiling: true -enableProfiling: true -healthzBindAddress: "" kind: KubeSchedulerConfiguration leaderElection: leaderElect: false - leaseDuration: 15s - renewDeadline: 10s - resourceLock: leases - resourceName: kube-scheduler - resourceNamespace: kube-system - retryPeriod: 2s -metricsBindAddress: "" -parallelism: 16 -percentageOfNodesToScore: 0 -podInitialBackoffSeconds: 1 -podMaxBackoffSeconds: 10 profiles: - pluginConfig: - args: @@ -522,156 +134,16 @@ profiles: weight: 1 type: LeastAllocated name: NodeResourceTopologyMatch - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: DefaultPreemptionArgs - minCandidateNodesAbsolute: 100 - minCandidateNodesPercentage: 10 - name: DefaultPreemption - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - hardPodAffinityWeight: 1 - kind: InterPodAffinityArgs - name: InterPodAffinity - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeAffinityArgs - name: NodeAffinity - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeResourcesBalancedAllocationArgs - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - name: NodeResourcesBalancedAllocation - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - kind: NodeResourcesFitArgs - scoringStrategy: - resources: - - name: cpu - weight: 1 - - name: memory - weight: 1 - type: LeastAllocated - name: NodeResourcesFit - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - defaultingType: System - kind: PodTopologySpreadArgs - name: PodTopologySpread - - args: - apiVersion: kubescheduler.config.k8s.io/v1beta2 - bindTimeoutSeconds: 600 - kind: VolumeBindingArgs - name: VolumeBinding plugins: - bind: - enabled: - - name: DefaultBinder - weight: 0 filter: enabled: - - name: NodeUnschedulable - weight: 0 - - name: NodeName - weight: 0 - - name: TaintToleration - weight: 0 - - name: NodeAffinity - weight: 0 - - name: NodePorts - weight: 0 - - name: NodeResourcesFit - weight: 0 - - name: VolumeRestrictions - weight: 0 - - name: EBSLimits - weight: 0 - - name: GCEPDLimits - weight: 0 - - name: NodeVolumeLimits - weight: 0 - - name: AzureDiskLimits - weight: 0 - - name: VolumeBinding - weight: 0 - - name: VolumeZone - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: InterPodAffinity - weight: 0 - name: NodeResourceTopologyMatch - weight: 0 - multiPoint: {} - permit: {} - postBind: {} - postFilter: - enabled: - - name: DefaultPreemption - weight: 0 - preBind: - enabled: - - name: VolumeBinding - weight: 0 - preEnqueue: {} - preFilter: - enabled: - - name: NodeResourcesFit - weight: 0 - - name: NodePorts - weight: 0 - - name: VolumeRestrictions - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: InterPodAffinity - weight: 0 - - name: VolumeBinding - weight: 0 - - name: NodeAffinity - weight: 0 - preScore: - enabled: - - name: InterPodAffinity - weight: 0 - - name: PodTopologySpread - weight: 0 - - name: TaintToleration - weight: 0 - - name: NodeAffinity - weight: 0 - queueSort: - enabled: - - name: PrioritySort - weight: 0 reserve: enabled: - - name: VolumeBinding - weight: 0 - name: NodeResourceTopologyMatch - weight: 0 score: enabled: - - name: NodeResourcesBalancedAllocation - weight: 1 - - name: ImageLocality - weight: 1 - - name: InterPodAffinity - weight: 1 - - name: NodeResourcesFit - weight: 1 - - name: NodeAffinity - weight: 1 - - name: PodTopologySpread - weight: 2 - - name: TaintToleration - weight: 1 - name: NodeResourceTopologyMatch - weight: 0 schedulerName: test-topo-aware-sched ` ) @@ -689,11 +161,11 @@ func yamlCompare(t *testing.T, testName, got, expected string) { } if diffCount > 0 { var err error - err = os.WriteFile("got.yaml", []byte(got), 0644) + err = os.WriteFile(testName+"-got.yaml", []byte(got), 0644) if err != nil { t.Fatalf("cannot write got.yaml") } - err = os.WriteFile("exp.yaml", []byte(expected), 0644) + err = os.WriteFile(testName+"-exp.yaml", []byte(expected), 0644) if err != nil { t.Fatalf("cannot write exp.yaml") } diff --git a/pkg/objectupdate/sched/sched.go b/pkg/objectupdate/sched/sched.go index 8845807b0..d002044c5 100644 --- a/pkg/objectupdate/sched/sched.go +++ b/pkg/objectupdate/sched/sched.go @@ -18,12 +18,12 @@ package sched import ( "fmt" - "time" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/klog/v2" + k8swgmanifests "github.com/k8stopologyawareschedwg/deployer/pkg/manifests" k8swgschedupdate "github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched" nropv1 "github.com/openshift-kni/numaresources-operator/api/numaresourcesoperator/v1" @@ -126,11 +126,7 @@ func DeploymentConfigMapSettings(dp *appsv1.Deployment, cmName, cmHash string) { template.Annotations[hash.ConfigMapAnnotation] = cmHash } -func SchedulerConfig(cm *corev1.ConfigMap, name string, cacheResyncPeriod time.Duration) error { - return SchedulerConfigWithFilter(cm, name, Passthrough, cacheResyncPeriod) -} - -func SchedulerConfigWithFilter(cm *corev1.ConfigMap, name string, filterFunc func([]byte) []byte, cacheResyncPeriod time.Duration) error { +func SchedulerConfig(cm *corev1.ConfigMap, name string, params *k8swgmanifests.ConfigParams) error { if cm.Data == nil { return fmt.Errorf("no data found in ConfigMap: %s/%s", cm.Namespace, cm.Name) } @@ -140,19 +136,18 @@ func SchedulerConfigWithFilter(cm *corev1.ConfigMap, name string, filterFunc fun return fmt.Errorf("no data key named: %s found in ConfigMap: %s/%s", schedstate.SchedulerConfigFileName, cm.Namespace, cm.Name) } - newData, err := k8swgschedupdate.RenderConfig(data, name, cacheResyncPeriod) + newData, ok, err := k8swgschedupdate.RenderConfig([]byte(data), name, params) if err != nil { return err } + if !ok { + klog.V(2).InfoS("scheduler config not updated") + } - cm.Data[schedstate.SchedulerConfigFileName] = string(filterFunc([]byte(newData))) + cm.Data[schedstate.SchedulerConfigFileName] = string(newData) return nil } -func Passthrough(data []byte) []byte { - return data -} - func FindContainerByName(podSpec *corev1.PodSpec, containerName string) (*corev1.Container, error) { for idx := 0; idx < len(podSpec.Containers); idx++ { cnt := &podSpec.Containers[idx] diff --git a/pkg/objectupdate/sched/sched_test.go b/pkg/objectupdate/sched/sched_test.go index 060b21792..01496d908 100644 --- a/pkg/objectupdate/sched/sched_test.go +++ b/pkg/objectupdate/sched/sched_test.go @@ -27,6 +27,8 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8swgmanifests "github.com/k8stopologyawareschedwg/deployer/pkg/manifests" + nropv1 "github.com/openshift-kni/numaresources-operator/api/numaresourcesoperator/v1" "github.com/openshift-kni/numaresources-operator/pkg/hash" schedstate "github.com/openshift-kni/numaresources-operator/pkg/numaresourcesscheduler/objectstate/sched" @@ -197,7 +199,10 @@ func TestUpdateSchedulerName(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - if err := SchedulerConfig(&tc.configMap, tc.schedulerName, 0); err != nil { + params := k8swgmanifests.ConfigParams{ + ProfileName: tc.schedulerName, + } + if err := SchedulerConfig(&tc.configMap, "test-topo-aware-sched", ¶ms); err != nil { if !tc.isErrExpected { t.Errorf("test %q: failed with error: %v", tc.name, err) } @@ -260,7 +265,7 @@ func TestUpdateSchedulerConfig(t *testing.T) { Namespace: "test-ns", }, Data: map[string]string{ - "config.yaml": schedConfigWithParams, + "config.yaml": schedConfig, }, }, expectedYAML: expectedYAMLWithReconcilePeriod, @@ -276,7 +281,7 @@ func TestUpdateSchedulerConfig(t *testing.T) { "config.yaml": schedConfig, }, }, - expectedYAML: expectedYAMLWithZeroReconcile, + expectedYAML: expectedYAMLWithoutReconcile, }, { name: "disable-reconcile", @@ -286,16 +291,23 @@ func TestUpdateSchedulerConfig(t *testing.T) { Namespace: "test-ns", }, Data: map[string]string{ - "config.yaml": schedConfigWithParams, + "config.yaml": schedConfig, }, }, - expectedYAML: expectedYAMLWithZeroReconcile, + expectedYAML: expectedYAMLWithoutReconcile, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - if err := SchedulerConfig(&tc.configMap, "", tc.cacheResyncPeriod); err != nil { + resyncPeriod := int64(tc.cacheResyncPeriod.Seconds()) + params := k8swgmanifests.ConfigParams{ + Cache: &k8swgmanifests.ConfigCacheParams{ + ResyncPeriodSeconds: &resyncPeriod, + }, + } + + if err := SchedulerConfig(&tc.configMap, "test-topo-aware-sched", ¶ms); err != nil { if !tc.isErrExpected { t.Errorf("test %q: failed with error: %v", tc.name, err) } diff --git a/test/e2e/sched/sched_test.go b/test/e2e/sched/sched_test.go index 04d94946f..51deee8d0 100644 --- a/test/e2e/sched/sched_test.go +++ b/test/e2e/sched/sched_test.go @@ -23,9 +23,7 @@ import ( corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/klog/v2" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" "sigs.k8s.io/controller-runtime/pkg/client" - pluginconfig "sigs.k8s.io/scheduler-plugins/apis/config" "github.com/google/go-cmp/cmp" . "github.com/onsi/ginkgo/v2" @@ -263,20 +261,16 @@ var _ = Describe("[Scheduler] imageReplacement", func() { Expect(nroschedCM).ToNot(BeNil(), "failed to find ConfigMap owned by %q", nroSchedKey) data, ok := nroschedCM.Data[schedstate.SchedulerConfigFileName] Expect(data).ToNot(BeEmpty(), "no data found under %s/%s", nroschedCM.Namespace, nroschedCM.Name) + Expect(ok).To(BeTrue(), "no data found under %s/%s", nroschedCM.Namespace, nroschedCM.Name) - schedCfg, err := manifests.DecodeSchedulerConfigFromData([]byte(data)) + schedParams, err := manifests.DecodeSchedulerProfilesFromData([]byte(data)) Expect(err).ToNot(HaveOccurred()) - schedProf, pluginConf := findKubeSchedulerProfileByName(schedCfg, schedulerPluginName) - Expect(schedProf).ToNot(BeNil(), "cannot find scheduler profile for %q", schedulerPluginName) - Expect(pluginConf).ToNot(BeNil(), "cannot find plugin config for %q", schedulerPluginName) - - confObj := pluginConf.Args - cfg, ok := confObj.(*pluginconfig.NodeResourceTopologyMatchArgs) - Expect(ok).To(BeTrue(), "NRT arguments are missing for the scheduler config") - - cacheCfg := time.Duration(cfg.CacheResyncPeriodSeconds) * time.Second - Expect(cacheCfg).To(Equal(nroSchedObj.Spec.CacheResyncPeriod.Duration)) + schedCfg := manifests.FindSchedulerProfileByName(schedParams, nroSchedObj.Status.SchedulerName) + Expect(schedCfg).ToNot(BeNil(), "cannot find profile config for profile %q", nroSchedObj.Status.SchedulerName) + Expect(schedCfg.Cache).ToNot(BeNil(), "missing cache configuration") + Expect(schedCfg.Cache.ResyncPeriodSeconds).ToNot(BeNil(), "missing cache resync configuration") + Expect(*schedCfg.Cache.ResyncPeriodSeconds).To(Equal(int64(nroSchedObj.Spec.CacheResyncPeriod.Duration.Seconds()))) By("checking new scheduler pod has been created") dp, err := podlist.With(e2eclient.Client).DeploymentByOwnerReference(context.TODO(), nroSchedObj.UID) @@ -294,17 +288,3 @@ var _ = Describe("[Scheduler] imageReplacement", func() { }) }) }) - -func findKubeSchedulerProfileByName(sc *schedconfig.KubeSchedulerConfiguration, name string) (*schedconfig.KubeSchedulerProfile, *schedconfig.PluginConfig) { - for i := range sc.Profiles { - // if we have a configuration for the NodeResourceTopologyMatch - // this is a valid profile - for j := range sc.Profiles[i].PluginConfig { - if sc.Profiles[i].PluginConfig[j].Name == name { - return &sc.Profiles[i], &sc.Profiles[i].PluginConfig[j] - } - } - } - - return nil, nil -} diff --git a/vendor/github.com/go-task/slim-sprig/.editorconfig b/vendor/github.com/go-task/slim-sprig/.editorconfig new file mode 100644 index 000000000..b0c95367e --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.editorconfig @@ -0,0 +1,14 @@ +# editorconfig.org + +root = true + +[*] +insert_final_newline = true +charset = utf-8 +trim_trailing_whitespace = true +indent_style = tab +indent_size = 8 + +[*.{md,yml,yaml,json}] +indent_style = space +indent_size = 2 diff --git a/vendor/github.com/go-task/slim-sprig/.gitattributes b/vendor/github.com/go-task/slim-sprig/.gitattributes new file mode 100644 index 000000000..176a458f9 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitattributes @@ -0,0 +1 @@ +* text=auto diff --git a/vendor/github.com/go-task/slim-sprig/.gitignore b/vendor/github.com/go-task/slim-sprig/.gitignore new file mode 100644 index 000000000..5e3002f88 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/go-task/slim-sprig/CHANGELOG.md b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md new file mode 100644 index 000000000..61d8ebffc --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/CHANGELOG.md @@ -0,0 +1,364 @@ +# Changelog + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/go-task/slim-sprig/LICENSE.txt b/vendor/github.com/go-task/slim-sprig/LICENSE.txt new file mode 100644 index 000000000..f311b1eaa --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/go-task/slim-sprig/README.md b/vendor/github.com/go-task/slim-sprig/README.md new file mode 100644 index 000000000..72579471f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/README.md @@ -0,0 +1,73 @@ +# Slim-Sprig: Template functions for Go templates [![GoDoc](https://godoc.org/github.com/go-task/slim-sprig?status.svg)](https://godoc.org/github.com/go-task/slim-sprig) [![Go Report Card](https://goreportcard.com/badge/github.com/go-task/slim-sprig)](https://goreportcard.com/report/github.com/go-task/slim-sprig) + +Slim-Sprig is a fork of [Sprig](https://github.com/Masterminds/sprig), but with +all functions that depend on external (non standard library) or crypto packages +removed. +The reason for this is to make this library more lightweight. Most of these +functions (specially crypto ones) are not needed on most apps, but costs a lot +in terms of binary size and compilation time. + +## Usage + +**Template developers**: Please use Slim-Sprig's [function documentation](https://go-task.github.io/slim-sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Slim-Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/go-task/slim-sprig). + +For standard usage, read on. + +### Load the Slim-Sprig library + +To load the Slim-Sprig `FuncMap`: + +```go + +import ( + "html/template" + + "github.com/go-task/slim-sprig" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/go-task/slim-sprig/Taskfile.yml b/vendor/github.com/go-task/slim-sprig/Taskfile.yml new file mode 100644 index 000000000..cdcfd223b --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/Taskfile.yml @@ -0,0 +1,12 @@ +# https://taskfile.dev + +version: '2' + +tasks: + default: + cmds: + - task: test + + test: + cmds: + - go test -v . diff --git a/vendor/github.com/go-task/slim-sprig/crypto.go b/vendor/github.com/go-task/slim-sprig/crypto.go new file mode 100644 index 000000000..d06e516d4 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/crypto.go @@ -0,0 +1,24 @@ +package sprig + +import ( + "crypto/sha1" + "crypto/sha256" + "encoding/hex" + "fmt" + "hash/adler32" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} diff --git a/vendor/github.com/go-task/slim-sprig/date.go b/vendor/github.com/go-task/slim-sprig/date.go new file mode 100644 index 000000000..ed022ddac --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/go-task/slim-sprig/defaults.go b/vendor/github.com/go-task/slim-sprig/defaults.go new file mode 100644 index 000000000..b9f979666 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/go-task/slim-sprig/dict.go b/vendor/github.com/go-task/slim-sprig/dict.go new file mode 100644 index 000000000..77ebc61b1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/dict.go @@ -0,0 +1,118 @@ +package sprig + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/go-task/slim-sprig/doc.go b/vendor/github.com/go-task/slim-sprig/doc.go new file mode 100644 index 000000000..aabb9d448 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/go-task/slim-sprig/functions.go b/vendor/github.com/go-task/slim-sprig/functions.go new file mode 100644 index 000000000..5ea74f899 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/functions.go @@ -0,0 +1,317 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/go-task/slim-sprig/list.go b/vendor/github.com/go-task/slim-sprig/list.go new file mode 100644 index 000000000..ca0fbb789 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/go-task/slim-sprig/network.go b/vendor/github.com/go-task/slim-sprig/network.go new file mode 100644 index 000000000..108d78a94 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/go-task/slim-sprig/numeric.go b/vendor/github.com/go-task/slim-sprig/numeric.go new file mode 100644 index 000000000..98cbb37a1 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/numeric.go @@ -0,0 +1,228 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "strconv" + "strings" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseFloat(str, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return float64(val.Int()) + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return float64(val.Uint()) + case reflect.Uint, reflect.Uint64: + return float64(val.Uint()) + case reflect.Float32, reflect.Float64: + return val.Float() + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func toInt(v interface{}) int { + //It's not optimal. Bud I don't want duplicate toInt64 code. + return int(toInt64(v)) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + if str, ok := v.(string); ok { + iv, err := strconv.ParseInt(str, 10, 64) + if err != nil { + return 0 + } + return iv + } + + val := reflect.Indirect(reflect.ValueOf(v)) + switch val.Kind() { + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return val.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32: + return int64(val.Uint()) + case reflect.Uint, reflect.Uint64: + tv := val.Uint() + if tv <= math.MaxInt64 { + return int64(tv) + } + // TODO: What is the sensible thing to do here? + return math.MaxInt64 + case reflect.Float32, reflect.Float64: + return int64(val.Float()) + case reflect.Bool: + if val.Bool() { + return 1 + } + return 0 + default: + return 0 + } +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} diff --git a/vendor/github.com/go-task/slim-sprig/reflect.go b/vendor/github.com/go-task/slim-sprig/reflect.go new file mode 100644 index 000000000..8a65c132f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/go-task/slim-sprig/regex.go b/vendor/github.com/go-task/slim-sprig/regex.go new file mode 100644 index 000000000..fab551018 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/go-task/slim-sprig/strings.go b/vendor/github.com/go-task/slim-sprig/strings.go new file mode 100644 index 000000000..3c62d6b6f --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/strings.go @@ -0,0 +1,189 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/go-task/slim-sprig/url.go b/vendor/github.com/go-task/slim-sprig/url.go new file mode 100644 index 000000000..b8e120e19 --- /dev/null +++ b/vendor/github.com/go-task/slim-sprig/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/google/pprof/AUTHORS b/vendor/github.com/google/pprof/AUTHORS new file mode 100644 index 000000000..fd736cb1c --- /dev/null +++ b/vendor/github.com/google/pprof/AUTHORS @@ -0,0 +1,7 @@ +# This is the official list of pprof authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. +# Names should be added to this file as: +# Name or Organization +# The email address is not required for organizations. +Google Inc. \ No newline at end of file diff --git a/vendor/github.com/google/pprof/CONTRIBUTORS b/vendor/github.com/google/pprof/CONTRIBUTORS new file mode 100644 index 000000000..8c8c37d2c --- /dev/null +++ b/vendor/github.com/google/pprof/CONTRIBUTORS @@ -0,0 +1,16 @@ +# People who have agreed to one of the CLAs and can contribute patches. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# https://developers.google.com/open-source/cla/individual +# https://developers.google.com/open-source/cla/corporate +# +# Names should be added to this file as: +# Name +Raul Silvera +Tipp Moseley +Hyoun Kyu Cho +Martin Spier +Taco de Wolff +Andrew Hunter diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/github.com/google/pprof/LICENSE similarity index 100% rename from vendor/k8s.io/apiserver/LICENSE rename to vendor/github.com/google/pprof/LICENSE diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go new file mode 100644 index 000000000..ab7f03ae2 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -0,0 +1,567 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "errors" + "sort" +) + +func (p *Profile) decoder() []decoder { + return profileDecoder +} + +// preEncode populates the unexported fields to be used by encode +// (with suffix X) from the corresponding exported fields. The +// exported fields are cleared up to facilitate testing. +func (p *Profile) preEncode() { + strings := make(map[string]int) + addString(strings, "") + + for _, st := range p.SampleType { + st.typeX = addString(strings, st.Type) + st.unitX = addString(strings, st.Unit) + } + + for _, s := range p.Sample { + s.labelX = nil + var keys []string + for k := range s.Label { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + vs := s.Label[k] + for _, v := range vs { + s.labelX = append(s.labelX, + label{ + keyX: addString(strings, k), + strX: addString(strings, v), + }, + ) + } + } + var numKeys []string + for k := range s.NumLabel { + numKeys = append(numKeys, k) + } + sort.Strings(numKeys) + for _, k := range numKeys { + keyX := addString(strings, k) + vs := s.NumLabel[k] + units := s.NumUnit[k] + for i, v := range vs { + var unitX int64 + if len(units) != 0 { + unitX = addString(strings, units[i]) + } + s.labelX = append(s.labelX, + label{ + keyX: keyX, + numX: v, + unitX: unitX, + }, + ) + } + } + s.locationIDX = make([]uint64, len(s.Location)) + for i, loc := range s.Location { + s.locationIDX[i] = loc.ID + } + } + + for _, m := range p.Mapping { + m.fileX = addString(strings, m.File) + m.buildIDX = addString(strings, m.BuildID) + } + + for _, l := range p.Location { + for i, ln := range l.Line { + if ln.Function != nil { + l.Line[i].functionIDX = ln.Function.ID + } else { + l.Line[i].functionIDX = 0 + } + } + if l.Mapping != nil { + l.mappingIDX = l.Mapping.ID + } else { + l.mappingIDX = 0 + } + } + for _, f := range p.Function { + f.nameX = addString(strings, f.Name) + f.systemNameX = addString(strings, f.SystemName) + f.filenameX = addString(strings, f.Filename) + } + + p.dropFramesX = addString(strings, p.DropFrames) + p.keepFramesX = addString(strings, p.KeepFrames) + + if pt := p.PeriodType; pt != nil { + pt.typeX = addString(strings, pt.Type) + pt.unitX = addString(strings, pt.Unit) + } + + p.commentX = nil + for _, c := range p.Comments { + p.commentX = append(p.commentX, addString(strings, c)) + } + + p.defaultSampleTypeX = addString(strings, p.DefaultSampleType) + + p.stringTable = make([]string, len(strings)) + for s, i := range strings { + p.stringTable[i] = s + } +} + +func (p *Profile) encode(b *buffer) { + for _, x := range p.SampleType { + encodeMessage(b, 1, x) + } + for _, x := range p.Sample { + encodeMessage(b, 2, x) + } + for _, x := range p.Mapping { + encodeMessage(b, 3, x) + } + for _, x := range p.Location { + encodeMessage(b, 4, x) + } + for _, x := range p.Function { + encodeMessage(b, 5, x) + } + encodeStrings(b, 6, p.stringTable) + encodeInt64Opt(b, 7, p.dropFramesX) + encodeInt64Opt(b, 8, p.keepFramesX) + encodeInt64Opt(b, 9, p.TimeNanos) + encodeInt64Opt(b, 10, p.DurationNanos) + if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) { + encodeMessage(b, 11, p.PeriodType) + } + encodeInt64Opt(b, 12, p.Period) + encodeInt64s(b, 13, p.commentX) + encodeInt64(b, 14, p.defaultSampleTypeX) +} + +var profileDecoder = []decoder{ + nil, // 0 + // repeated ValueType sample_type = 1 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.SampleType = append(pp.SampleType, x) + return decodeMessage(b, x) + }, + // repeated Sample sample = 2 + func(b *buffer, m message) error { + x := new(Sample) + pp := m.(*Profile) + pp.Sample = append(pp.Sample, x) + return decodeMessage(b, x) + }, + // repeated Mapping mapping = 3 + func(b *buffer, m message) error { + x := new(Mapping) + pp := m.(*Profile) + pp.Mapping = append(pp.Mapping, x) + return decodeMessage(b, x) + }, + // repeated Location location = 4 + func(b *buffer, m message) error { + x := new(Location) + x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + pp := m.(*Profile) + pp.Location = append(pp.Location, x) + err := decodeMessage(b, x) + var tmp []Line + x.Line = append(tmp, x.Line...) // Shrink to allocated size + return err + }, + // repeated Function function = 5 + func(b *buffer, m message) error { + x := new(Function) + pp := m.(*Profile) + pp.Function = append(pp.Function, x) + return decodeMessage(b, x) + }, + // repeated string string_table = 6 + func(b *buffer, m message) error { + err := decodeStrings(b, &m.(*Profile).stringTable) + if err != nil { + return err + } + if m.(*Profile).stringTable[0] != "" { + return errors.New("string_table[0] must be ''") + } + return nil + }, + // int64 drop_frames = 7 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) }, + // int64 keep_frames = 8 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) }, + // int64 time_nanos = 9 + func(b *buffer, m message) error { + if m.(*Profile).TimeNanos != 0 { + return errConcatProfile + } + return decodeInt64(b, &m.(*Profile).TimeNanos) + }, + // int64 duration_nanos = 10 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) }, + // ValueType period_type = 11 + func(b *buffer, m message) error { + x := new(ValueType) + pp := m.(*Profile) + pp.PeriodType = x + return decodeMessage(b, x) + }, + // int64 period = 12 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) }, + // repeated int64 comment = 13 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) }, + // int64 defaultSampleType = 14 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) }, +} + +// postDecode takes the unexported fields populated by decode (with +// suffix X) and populates the corresponding exported fields. +// The unexported fields are cleared up to facilitate testing. +func (p *Profile) postDecode() error { + var err error + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + mappingIds := make([]*Mapping, len(p.Mapping)+1) + for _, m := range p.Mapping { + m.File, err = getString(p.stringTable, &m.fileX, err) + m.BuildID, err = getString(p.stringTable, &m.buildIDX, err) + if m.ID < uint64(len(mappingIds)) { + mappingIds[m.ID] = m + } else { + mappings[m.ID] = m + } + } + + functions := make(map[uint64]*Function, len(p.Function)) + functionIds := make([]*Function, len(p.Function)+1) + for _, f := range p.Function { + f.Name, err = getString(p.stringTable, &f.nameX, err) + f.SystemName, err = getString(p.stringTable, &f.systemNameX, err) + f.Filename, err = getString(p.stringTable, &f.filenameX, err) + if f.ID < uint64(len(functionIds)) { + functionIds[f.ID] = f + } else { + functions[f.ID] = f + } + } + + locations := make(map[uint64]*Location, len(p.Location)) + locationIds := make([]*Location, len(p.Location)+1) + for _, l := range p.Location { + if id := l.mappingIDX; id < uint64(len(mappingIds)) { + l.Mapping = mappingIds[id] + } else { + l.Mapping = mappings[id] + } + l.mappingIDX = 0 + for i, ln := range l.Line { + if id := ln.functionIDX; id != 0 { + l.Line[i].functionIDX = 0 + if id < uint64(len(functionIds)) { + l.Line[i].Function = functionIds[id] + } else { + l.Line[i].Function = functions[id] + } + } + } + if l.ID < uint64(len(locationIds)) { + locationIds[l.ID] = l + } else { + locations[l.ID] = l + } + } + + for _, st := range p.SampleType { + st.Type, err = getString(p.stringTable, &st.typeX, err) + st.Unit, err = getString(p.stringTable, &st.unitX, err) + } + + for _, s := range p.Sample { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) + } + } + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } + s.Location = make([]*Location, len(s.locationIDX)) + for i, lid := range s.locationIDX { + if lid < uint64(len(locationIds)) { + s.Location[i] = locationIds[lid] + } else { + s.Location[i] = locations[lid] + } + } + s.locationIDX = nil + } + + p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err) + p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err) + + if pt := p.PeriodType; pt == nil { + p.PeriodType = &ValueType{} + } + + if pt := p.PeriodType; pt != nil { + pt.Type, err = getString(p.stringTable, &pt.typeX, err) + pt.Unit, err = getString(p.stringTable, &pt.unitX, err) + } + + for _, i := range p.commentX { + var c string + c, err = getString(p.stringTable, &i, err) + p.Comments = append(p.Comments, c) + } + + p.commentX = nil + p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err) + p.stringTable = nil + return err +} + +// padStringArray pads arr with enough empty strings to make arr +// length l when arr's length is less than l. +func padStringArray(arr []string, l int) []string { + if l <= len(arr) { + return arr + } + return append(arr, make([]string, l-len(arr))...) +} + +func (p *ValueType) decoder() []decoder { + return valueTypeDecoder +} + +func (p *ValueType) encode(b *buffer) { + encodeInt64Opt(b, 1, p.typeX) + encodeInt64Opt(b, 2, p.unitX) +} + +var valueTypeDecoder = []decoder{ + nil, // 0 + // optional int64 type = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) }, + // optional int64 unit = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) }, +} + +func (p *Sample) decoder() []decoder { + return sampleDecoder +} + +func (p *Sample) encode(b *buffer) { + encodeUint64s(b, 1, p.locationIDX) + encodeInt64s(b, 2, p.Value) + for _, x := range p.labelX { + encodeMessage(b, 3, x) + } +} + +var sampleDecoder = []decoder{ + nil, // 0 + // repeated uint64 location = 1 + func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) }, + // repeated int64 value = 2 + func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) }, + // repeated Label label = 3 + func(b *buffer, m message) error { + s := m.(*Sample) + n := len(s.labelX) + s.labelX = append(s.labelX, label{}) + return decodeMessage(b, &s.labelX[n]) + }, +} + +func (p label) decoder() []decoder { + return labelDecoder +} + +func (p label) encode(b *buffer) { + encodeInt64Opt(b, 1, p.keyX) + encodeInt64Opt(b, 2, p.strX) + encodeInt64Opt(b, 3, p.numX) + encodeInt64Opt(b, 4, p.unitX) +} + +var labelDecoder = []decoder{ + nil, // 0 + // optional int64 key = 1 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) }, + // optional int64 str = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) }, + // optional int64 num = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) }, + // optional int64 num = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) }, +} + +func (p *Mapping) decoder() []decoder { + return mappingDecoder +} + +func (p *Mapping) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.Start) + encodeUint64Opt(b, 3, p.Limit) + encodeUint64Opt(b, 4, p.Offset) + encodeInt64Opt(b, 5, p.fileX) + encodeInt64Opt(b, 6, p.buildIDX) + encodeBoolOpt(b, 7, p.HasFunctions) + encodeBoolOpt(b, 8, p.HasFilenames) + encodeBoolOpt(b, 9, p.HasLineNumbers) + encodeBoolOpt(b, 10, p.HasInlineFrames) +} + +var mappingDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9 + func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10 +} + +func (p *Location) decoder() []decoder { + return locationDecoder +} + +func (p *Location) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeUint64Opt(b, 2, p.mappingIDX) + encodeUint64Opt(b, 3, p.Address) + for i := range p.Line { + encodeMessage(b, 4, &p.Line[i]) + } + encodeBoolOpt(b, 5, p.IsFolded) +} + +var locationDecoder = []decoder{ + nil, // 0 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2; + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3; + func(b *buffer, m message) error { // repeated Line line = 4 + pp := m.(*Location) + n := len(pp.Line) + pp.Line = append(pp.Line, Line{}) + return decodeMessage(b, &pp.Line[n]) + }, + func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5; +} + +func (p *Line) decoder() []decoder { + return lineDecoder +} + +func (p *Line) encode(b *buffer) { + encodeUint64Opt(b, 1, p.functionIDX) + encodeInt64Opt(b, 2, p.Line) +} + +var lineDecoder = []decoder{ + nil, // 0 + // optional uint64 function_id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) }, + // optional int64 line = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) }, +} + +func (p *Function) decoder() []decoder { + return functionDecoder +} + +func (p *Function) encode(b *buffer) { + encodeUint64Opt(b, 1, p.ID) + encodeInt64Opt(b, 2, p.nameX) + encodeInt64Opt(b, 3, p.systemNameX) + encodeInt64Opt(b, 4, p.filenameX) + encodeInt64Opt(b, 5, p.StartLine) +} + +var functionDecoder = []decoder{ + nil, // 0 + // optional uint64 id = 1 + func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) }, + // optional int64 function_name = 2 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) }, + // optional int64 function_system_name = 3 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) }, + // repeated int64 filename = 4 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) }, + // optional int64 start_line = 5 + func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) }, +} + +func addString(strings map[string]int, s string) int64 { + i, ok := strings[s] + if !ok { + i = len(strings) + strings[s] = i + } + return int64(i) +} + +func getString(strings []string, strng *int64, err error) (string, error) { + if err != nil { + return "", err + } + s := int(*strng) + if s < 0 || s >= len(strings) { + return "", errMalformed + } + *strng = 0 + return strings[s], nil +} diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go new file mode 100644 index 000000000..ea8e66c68 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -0,0 +1,270 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +// Implements methods to filter samples from profiles. + +import "regexp" + +// FilterSamplesByName filters the samples in a profile and only keeps +// samples where at least one frame matches focus but none match ignore. +// Returns true is the corresponding regexp matched at least one sample. +func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + focusOrIgnore := make(map[uint64]bool) + hidden := make(map[uint64]bool) + for _, l := range p.Location { + if ignore != nil && l.matchesName(ignore) { + im = true + focusOrIgnore[l.ID] = false + } else if focus == nil || l.matchesName(focus) { + fm = true + focusOrIgnore[l.ID] = true + } + + if hide != nil && l.matchesName(hide) { + hm = true + l.Line = l.unmatchedLines(hide) + if len(l.Line) == 0 { + hidden[l.ID] = true + } + } + if show != nil { + l.Line = l.matchedLines(show) + if len(l.Line) == 0 { + hidden[l.ID] = true + } else { + hnm = true + } + } + } + + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + if focusedAndNotIgnored(sample.Location, focusOrIgnore) { + if len(hidden) > 0 { + var locs []*Location + for _, loc := range sample.Location { + if !hidden[loc.ID] { + locs = append(locs, loc) + } + } + if len(locs) == 0 { + // Remove sample with no locations (by not adding it to s). + continue + } + sample.Location = locs + } + s = append(s, sample) + } + } + p.Sample = s + + return +} + +// ShowFrom drops all stack frames above the highest matching frame and returns +// whether a match was found. If showFrom is nil it returns false and does not +// modify the profile. +// +// Example: consider a sample with frames [A, B, C, B], where A is the root. +// ShowFrom(nil) returns false and has frames [A, B, C, B]. +// ShowFrom(A) returns true and has frames [A, B, C, B]. +// ShowFrom(B) returns true and has frames [B, C, B]. +// ShowFrom(C) returns true and has frames [C, B]. +// ShowFrom(D) returns false and drops the sample because no frames remain. +func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) { + if showFrom == nil { + return false + } + // showFromLocs stores location IDs that matched ShowFrom. + showFromLocs := make(map[uint64]bool) + // Apply to locations. + for _, loc := range p.Location { + if filterShowFromLocation(loc, showFrom) { + showFromLocs[loc.ID] = true + matched = true + } + } + // For all samples, strip locations after the highest matching one. + s := make([]*Sample, 0, len(p.Sample)) + for _, sample := range p.Sample { + for i := len(sample.Location) - 1; i >= 0; i-- { + if showFromLocs[sample.Location[i].ID] { + sample.Location = sample.Location[:i+1] + s = append(s, sample) + break + } + } + } + p.Sample = s + return matched +} + +// filterShowFromLocation tests a showFrom regex against a location, removes +// lines after the last match and returns whether a match was found. If the +// mapping is matched, then all lines are kept. +func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool { + if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) { + return true + } + if i := loc.lastMatchedLineIndex(showFrom); i >= 0 { + loc.Line = loc.Line[:i+1] + return true + } + return false +} + +// lastMatchedLineIndex returns the index of the last line that matches a regex, +// or -1 if no match is found. +func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int { + for i := len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return i + } + } + } + return -1 +} + +// FilterTagsByName filters the tags in a profile and only keeps +// tags that match show and not hide. +func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) { + matchRemove := func(name string) bool { + matchShow := show == nil || show.MatchString(name) + matchHide := hide != nil && hide.MatchString(name) + + if matchShow { + sm = true + } + if matchHide { + hm = true + } + return !matchShow || matchHide + } + for _, s := range p.Sample { + for lab := range s.Label { + if matchRemove(lab) { + delete(s.Label, lab) + } + } + for lab := range s.NumLabel { + if matchRemove(lab) { + delete(s.NumLabel, lab) + } + } + } + return +} + +// matchesName returns whether the location matches the regular +// expression. It checks any available function names, file names, and +// mapping object filename. +func (loc *Location) matchesName(re *regexp.Regexp) bool { + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + return true + } + } + } + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return true + } + return false +} + +// unmatchedLines returns the lines in the location that do not match +// the regular expression. +func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return nil + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if re.MatchString(fn.Name) || re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// matchedLines returns the lines in the location that match +// the regular expression. +func (loc *Location) matchedLines(re *regexp.Regexp) []Line { + if m := loc.Mapping; m != nil && re.MatchString(m.File) { + return loc.Line + } + var lines []Line + for _, ln := range loc.Line { + if fn := ln.Function; fn != nil { + if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) { + continue + } + } + lines = append(lines, ln) + } + return lines +} + +// focusedAndNotIgnored looks up a slice of ids against a map of +// focused/ignored locations. The map only contains locations that are +// explicitly focused or ignored. Returns whether there is at least +// one focused location but no ignored locations. +func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool { + var f bool + for _, loc := range locs { + if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore { + if focus { + // Found focused location. Must keep searching in case there + // is an ignored one as well. + f = true + } else { + // Found ignored location. Can return false right away. + return false + } + } + } + return f +} + +// TagMatch selects tags for filtering +type TagMatch func(s *Sample) bool + +// FilterSamplesByTag removes all samples from the profile, except +// those that match focus and do not match the ignore regular +// expression. +func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) { + samples := make([]*Sample, 0, len(p.Sample)) + for _, s := range p.Sample { + focused, ignored := true, false + if focus != nil { + focused = focus(s) + } + if ignore != nil { + ignored = ignore(s) + } + fm = fm || focused + im = im || ignored + if focused && !ignored { + samples = append(samples, s) + } + } + p.Sample = samples + return +} diff --git a/vendor/github.com/google/pprof/profile/index.go b/vendor/github.com/google/pprof/profile/index.go new file mode 100644 index 000000000..bef1d6046 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/index.go @@ -0,0 +1,64 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "strconv" + "strings" +) + +// SampleIndexByName returns the appropriate index for a value of sample index. +// If numeric, it returns the number, otherwise it looks up the text in the +// profile sample types. +func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) { + if sampleIndex == "" { + if dst := p.DefaultSampleType; dst != "" { + for i, t := range sampleTypes(p) { + if t == dst { + return i, nil + } + } + } + // By default select the last sample value + return len(p.SampleType) - 1, nil + } + if i, err := strconv.Atoi(sampleIndex); err == nil { + if i < 0 || i >= len(p.SampleType) { + return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1) + } + return i, nil + } + + // Remove the inuse_ prefix to support legacy pprof options + // "inuse_space" and "inuse_objects" for profiles containing types + // "space" and "objects". + noInuse := strings.TrimPrefix(sampleIndex, "inuse_") + for i, t := range p.SampleType { + if t.Type == sampleIndex || t.Type == noInuse { + return i, nil + } + } + + return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p)) +} + +func sampleTypes(p *Profile) []string { + types := make([]string, len(p.SampleType)) + for i, t := range p.SampleType { + types[i] = t.Type + } + return types +} diff --git a/vendor/github.com/google/pprof/profile/legacy_java_profile.go b/vendor/github.com/google/pprof/profile/legacy_java_profile.go new file mode 100644 index 000000000..91f45e53c --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_java_profile.go @@ -0,0 +1,315 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert java legacy profiles into +// the profile.proto format. + +package profile + +import ( + "bytes" + "fmt" + "io" + "path/filepath" + "regexp" + "strconv" + "strings" +) + +var ( + attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`) + javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`) + javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`) + javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`) + javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`) +) + +// javaCPUProfile returns a new Profile from profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}}, + } + var err error + var locs map[uint64]*Location + if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil { + return nil, err + } + + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaProfile returns a new profile from heapz or contentionz +// data. b is the profile bytes after the header. +func parseJavaProfile(b []byte) (*Profile, error) { + h := bytes.SplitAfterN(b, []byte("\n"), 2) + if len(h) < 2 { + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{}, + } + header := string(bytes.TrimSpace(h[0])) + + var err error + var pType string + switch header { + case "--- heapz 1 ---": + pType = "heap" + case "--- contentionz 1 ---": + pType = "contention" + default: + return nil, errUnrecognized + } + + if b, err = parseJavaHeader(pType, h[1], p); err != nil { + return nil, err + } + var locs map[uint64]*Location + if b, locs, err = parseJavaSamples(pType, b, p); err != nil { + return nil, err + } + if err = parseJavaLocations(b, locs, p); err != nil { + return nil, err + } + + // Strip out addresses for better merge. + if err = p.Aggregate(true, true, true, true, false); err != nil { + return nil, err + } + + return p, nil +} + +// parseJavaHeader parses the attribute section on a java profile and +// populates a profile. Returns the remainder of the buffer after all +// attributes. +func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + h := attributeRx.FindStringSubmatch(line) + if h == nil { + // Not a valid attribute, exit. + return b, nil + } + + attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2]) + var err error + switch pType + "/" + attribute { + case "heap/format", "cpu/format", "contention/format": + if value != "java" { + return nil, errUnrecognized + } + case "heap/resolution": + p.SampleType = []*ValueType{ + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: value}, + } + case "contention/resolution": + p.SampleType = []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: value}, + } + case "contention/sampling period": + p.PeriodType = &ValueType{ + Type: "contentions", Unit: "count", + } + if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + case "contention/ms since reset": + millis, err := strconv.ParseInt(value, 0, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err) + } + p.DurationNanos = millis * 1000 * 1000 + default: + return nil, errUnrecognized + } + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, nil +} + +// parseJavaSamples parses the samples from a java profile and +// populates the Samples in a profile. Returns the remainder of the +// buffer after the samples. +func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) { + nextNewLine := bytes.IndexByte(b, byte('\n')) + locs := make(map[uint64]*Location) + for nextNewLine != -1 { + line := string(bytes.TrimSpace(b[0:nextNewLine])) + if line != "" { + sample := javaSampleRx.FindStringSubmatch(line) + if sample == nil { + // Not a valid sample, exit. + return b, locs, nil + } + + // Java profiles have data/fields inverted compared to other + // profile types. + var err error + value1, value2, value3 := sample[2], sample[1], sample[3] + addrs, err := parseHexAddresses(value3) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + var sloc []*Location + for _, addr := range addrs { + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + s := &Sample{ + Value: make([]int64, 2), + Location: sloc, + } + + if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil { + return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err) + } + + switch pType { + case "heap": + const javaHeapzSamplingRate = 524288 // 512K + if s.Value[0] == 0 { + return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line) + } + s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}} + s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate) + case "contention": + if period := p.Period; period != 0 { + s.Value[0] = s.Value[0] * p.Period + s.Value[1] = s.Value[1] * p.Period + } + } + p.Sample = append(p.Sample, s) + } + // Grab next line. + b = b[nextNewLine+1:] + nextNewLine = bytes.IndexByte(b, byte('\n')) + } + return b, locs, nil +} + +// parseJavaLocations parses the location information in a java +// profile and populates the Locations in a profile. It uses the +// location addresses from the profile as both the ID of each +// location. +func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error { + r := bytes.NewBuffer(b) + fns := make(map[string]*Function) + for { + line, err := r.ReadString('\n') + if err != nil { + if err != io.EOF { + return err + } + if line == "" { + break + } + } + + if line = strings.TrimSpace(line); line == "" { + continue + } + + jloc := javaLocationRx.FindStringSubmatch(line) + if len(jloc) != 3 { + continue + } + addr, err := strconv.ParseUint(jloc[1], 16, 64) + if err != nil { + return fmt.Errorf("parsing sample %s: %v", line, err) + } + loc := locs[addr] + if loc == nil { + // Unused/unseen + continue + } + var lineFunc, lineFile string + var lineNo int64 + + if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 { + // Found a line of the form: "function (file:line)" + lineFunc, lineFile = fileLine[1], fileLine[2] + if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 { + lineNo = n + } + } else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 { + // If there's not a file:line, it's a shared library path. + // The path isn't interesting, so just give the .so. + lineFunc, lineFile = filePath[1], filepath.Base(filePath[2]) + } else if strings.Contains(jloc[2], "generated stub/JIT") { + lineFunc = "STUB" + } else { + // Treat whole line as the function name. This is used by the + // java agent for internal states such as "GC" or "VM". + lineFunc = jloc[2] + } + fn := fns[lineFunc] + + if fn == nil { + fn = &Function{ + Name: lineFunc, + SystemName: lineFunc, + Filename: lineFile, + } + fns[lineFunc] = fn + p.Function = append(p.Function, fn) + } + loc.Line = []Line{ + { + Function: fn, + Line: lineNo, + }, + } + loc.Address = 0 + } + + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + + return nil +} diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go new file mode 100644 index 000000000..0c8f3bb5b --- /dev/null +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -0,0 +1,1225 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file implements parsers to convert legacy profiles into the +// profile.proto format. + +package profile + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "regexp" + "strconv" + "strings" +) + +var ( + countStartRE = regexp.MustCompile(`\A(\S+) profile: total \d+\z`) + countRE = regexp.MustCompile(`\A(\d+) @(( 0x[0-9a-f]+)+)\z`) + + heapHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] *@ *(heap[_a-z0-9]*)/?(\d*)`) + heapSampleRE = regexp.MustCompile(`(-?\d+): *(-?\d+) *\[ *(\d+): *(\d+) *] @([ x0-9a-f]*)`) + + contentionSampleRE = regexp.MustCompile(`(\d+) *(\d+) @([ x0-9a-f]*)`) + + hexNumberRE = regexp.MustCompile(`0x[0-9a-f]+`) + + growthHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ growthz?`) + + fragmentationHeaderRE = regexp.MustCompile(`heap profile: *(\d+): *(\d+) *\[ *(\d+): *(\d+) *\] @ fragmentationz?`) + + threadzStartRE = regexp.MustCompile(`--- threadz \d+ ---`) + threadStartRE = regexp.MustCompile(`--- Thread ([[:xdigit:]]+) \(name: (.*)/(\d+)\) stack: ---`) + + // Regular expressions to parse process mappings. Support the format used by Linux /proc/.../maps and other tools. + // Recommended format: + // Start End object file name offset(optional) linker build id + // 0x40000-0x80000 /path/to/binary (@FF00) abc123456 + spaceDigits = `\s+[[:digit:]]+` + hexPair = `\s+[[:xdigit:]]+:[[:xdigit:]]+` + oSpace = `\s*` + // Capturing expressions. + cHex = `(?:0x)?([[:xdigit:]]+)` + cHexRange = `\s*` + cHex + `[\s-]?` + oSpace + cHex + `:?` + cSpaceString = `(?:\s+(\S+))?` + cSpaceHex = `(?:\s+([[:xdigit:]]+))?` + cSpaceAtOffset = `(?:\s+\(@([[:xdigit:]]+)\))?` + cPerm = `(?:\s+([-rwxp]+))?` + + procMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceHex + hexPair + spaceDigits + cSpaceString) + briefMapsRE = regexp.MustCompile(`^` + cHexRange + cPerm + cSpaceString + cSpaceAtOffset + cSpaceHex) + + // Regular expression to parse log data, of the form: + // ... file:line] msg... + logInfoRE = regexp.MustCompile(`^[^\[\]]+:[0-9]+]\s`) +) + +func isSpaceOrComment(line string) bool { + trimmed := strings.TrimSpace(line) + return len(trimmed) == 0 || trimmed[0] == '#' +} + +// parseGoCount parses a Go count profile (e.g., threadcreate or +// goroutine) and returns a new Profile. +func parseGoCount(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip comments at the beginning of the file. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + if err := s.Err(); err != nil { + return nil, err + } + m := countStartRE.FindStringSubmatch(s.Text()) + if m == nil { + return nil, errUnrecognized + } + profileType := m[1] + p := &Profile{ + PeriodType: &ValueType{Type: profileType, Unit: "count"}, + Period: 1, + SampleType: []*ValueType{{Type: profileType, Unit: "count"}}, + } + locations := make(map[uint64]*Location) + for s.Scan() { + line := s.Text() + if isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + m := countRE.FindStringSubmatch(line) + if m == nil { + return nil, errMalformed + } + n, err := strconv.ParseInt(m[1], 0, 64) + if err != nil { + return nil, errMalformed + } + fields := strings.Fields(m[2]) + locs := make([]*Location, 0, len(fields)) + for _, stk := range fields { + addr, err := strconv.ParseUint(stk, 0, 64) + if err != nil { + return nil, errMalformed + } + // Adjust all frames by -1 to land on top of the call instruction. + addr-- + loc := locations[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locations[addr] = loc + p.Location = append(p.Location, loc) + } + locs = append(locs, loc) + } + p.Sample = append(p.Sample, &Sample{ + Location: locs, + Value: []int64{n}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +// remapLocationIDs ensures there is a location for each address +// referenced by a sample, and remaps the samples to point to the new +// location ids. +func (p *Profile) remapLocationIDs() { + seen := make(map[*Location]bool, len(p.Location)) + var locs []*Location + + for _, s := range p.Sample { + for _, l := range s.Location { + if seen[l] { + continue + } + l.ID = uint64(len(locs) + 1) + locs = append(locs, l) + seen[l] = true + } + } + p.Location = locs +} + +func (p *Profile) remapFunctionIDs() { + seen := make(map[*Function]bool, len(p.Function)) + var fns []*Function + + for _, l := range p.Location { + for _, ln := range l.Line { + fn := ln.Function + if fn == nil || seen[fn] { + continue + } + fn.ID = uint64(len(fns) + 1) + fns = append(fns, fn) + seen[fn] = true + } + } + p.Function = fns +} + +// remapMappingIDs matches location addresses with existing mappings +// and updates them appropriately. This is O(N*M), if this ever shows +// up as a bottleneck, evaluate sorting the mappings and doing a +// binary search, which would make it O(N*log(M)). +func (p *Profile) remapMappingIDs() { + // Some profile handlers will incorrectly set regions for the main + // executable if its section is remapped. Fix them through heuristics. + + if len(p.Mapping) > 0 { + // Remove the initial mapping if named '/anon_hugepage' and has a + // consecutive adjacent mapping. + if m := p.Mapping[0]; strings.HasPrefix(m.File, "/anon_hugepage") { + if len(p.Mapping) > 1 && m.Limit == p.Mapping[1].Start { + p.Mapping = p.Mapping[1:] + } + } + } + + // Subtract the offset from the start of the main mapping if it + // ends up at a recognizable start address. + if len(p.Mapping) > 0 { + const expectedStart = 0x400000 + if m := p.Mapping[0]; m.Start-m.Offset == expectedStart { + m.Start = expectedStart + m.Offset = 0 + } + } + + // Associate each location with an address to the corresponding + // mapping. Create fake mapping if a suitable one isn't found. + var fake *Mapping +nextLocation: + for _, l := range p.Location { + a := l.Address + if l.Mapping != nil || a == 0 { + continue + } + for _, m := range p.Mapping { + if m.Start <= a && a < m.Limit { + l.Mapping = m + continue nextLocation + } + } + // Work around legacy handlers failing to encode the first + // part of mappings split into adjacent ranges. + for _, m := range p.Mapping { + if m.Offset != 0 && m.Start-m.Offset <= a && a < m.Start { + m.Start -= m.Offset + m.Offset = 0 + l.Mapping = m + continue nextLocation + } + } + // If there is still no mapping, create a fake one. + // This is important for the Go legacy handler, which produced + // no mappings. + if fake == nil { + fake = &Mapping{ + ID: 1, + Limit: ^uint64(0), + } + p.Mapping = append(p.Mapping, fake) + } + l.Mapping = fake + } + + // Reset all mapping IDs. + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +var cpuInts = []func([]byte) (uint64, []byte){ + get32l, + get32b, + get64l, + get64b, +} + +func get32l(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24, b[4:] +} + +func get32b(b []byte) (uint64, []byte) { + if len(b) < 4 { + return 0, nil + } + return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24, b[4:] +} + +func get64l(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56, b[8:] +} + +func get64b(b []byte) (uint64, []byte) { + if len(b) < 8 { + return 0, nil + } + return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 | uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56, b[8:] +} + +// parseCPU parses a profilez legacy profile and returns a newly +// populated Profile. +// +// The general format for profilez samples is a sequence of words in +// binary format. The first words are a header with the following data: +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. +func parseCPU(b []byte) (*Profile, error) { + var parse func([]byte) (uint64, []byte) + var n1, n2, n3, n4, n5 uint64 + for _, parse = range cpuInts { + var tmp []byte + n1, tmp = parse(b) + n2, tmp = parse(tmp) + n3, tmp = parse(tmp) + n4, tmp = parse(tmp) + n5, tmp = parse(tmp) + + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 0 && n4 > 0 && n5 == 0 { + b = tmp + return cpuProfile(b, int64(n4), parse) + } + if tmp != nil && n1 == 0 && n2 == 3 && n3 == 1 && n4 > 0 && n5 == 0 { + b = tmp + return javaCPUProfile(b, int64(n4), parse) + } + } + return nil, errUnrecognized +} + +// cpuProfile returns a new Profile from C++ profilez data. +// b is the profile bytes after the header, period is the profiling +// period, and parse is a function to parse 8-byte chunks from the +// profile in its native endianness. +func cpuProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) { + p := &Profile{ + Period: period * 1000, + PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"}, + SampleType: []*ValueType{ + {Type: "samples", Unit: "count"}, + {Type: "cpu", Unit: "nanoseconds"}, + }, + } + var err error + if b, _, err = parseCPUSamples(b, parse, true, p); err != nil { + return nil, err + } + + // If *most* samples have the same second-to-the-bottom frame, it + // strongly suggests that it is an uninteresting artifact of + // measurement -- a stack frame pushed by the signal handler. The + // bottom frame is always correct as it is picked up from the signal + // structure, not the stack. Check if this is the case and if so, + // remove. + + // Remove up to two frames. + maxiter := 2 + // Allow one different sample for this many samples with the same + // second-to-last frame. + similarSamples := 32 + margin := len(p.Sample) / similarSamples + + for iter := 0; iter < maxiter; iter++ { + addr1 := make(map[uint64]int) + for _, s := range p.Sample { + if len(s.Location) > 1 { + a := s.Location[1].Address + addr1[a] = addr1[a] + 1 + } + } + + for id1, count := range addr1 { + if count >= len(p.Sample)-margin { + // Found uninteresting frame, strip it out from all samples + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[1].Address == id1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } + break + } + } + } + + if err := p.ParseMemoryMap(bytes.NewBuffer(b)); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +func cleanupDuplicateLocations(p *Profile) { + // The profile handler may duplicate the leaf frame, because it gets + // its address both from stack unwinding and from the signal + // context. Detect this and delete the duplicate, which has been + // adjusted by -1. The leaf address should not be adjusted as it is + // not a call. + for _, s := range p.Sample { + if len(s.Location) > 1 && s.Location[0].Address == s.Location[1].Address+1 { + s.Location = append(s.Location[:1], s.Location[2:]...) + } + } +} + +// parseCPUSamples parses a collection of profilez samples from a +// profile. +// +// profilez samples are a repeated sequence of stack frames of the +// form: +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// The last stack trace is of the form: +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 +// +// Addresses from stack traces may point to the next instruction after +// each call. Optionally adjust by -1 to land somewhere on the actual +// call (except for the leaf, which is not a call). +func parseCPUSamples(b []byte, parse func(b []byte) (uint64, []byte), adjust bool, p *Profile) ([]byte, map[uint64]*Location, error) { + locs := make(map[uint64]*Location) + for len(b) > 0 { + var count, nstk uint64 + count, b = parse(b) + nstk, b = parse(b) + if b == nil || nstk > uint64(len(b)/4) { + return nil, nil, errUnrecognized + } + var sloc []*Location + addrs := make([]uint64, nstk) + for i := 0; i < int(nstk); i++ { + addrs[i], b = parse(b) + } + + if count == 0 && nstk == 1 && addrs[0] == 0 { + // End of data marker + break + } + for i, addr := range addrs { + if adjust && i > 0 { + addr-- + } + loc := locs[addr] + if loc == nil { + loc = &Location{ + Address: addr, + } + locs[addr] = loc + p.Location = append(p.Location, loc) + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, + &Sample{ + Value: []int64{int64(count), int64(count) * p.Period}, + Location: sloc, + }) + } + // Reached the end without finding the EOD marker. + return b, locs, nil +} + +// parseHeap parses a heapz legacy or a growthz profile and +// returns a newly populated Profile. +func parseHeap(b []byte) (p *Profile, err error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + p = &Profile{} + + sampling := "" + hasAlloc := false + + line := s.Text() + p.PeriodType = &ValueType{Type: "space", Unit: "bytes"} + if header := heapHeaderRE.FindStringSubmatch(line); header != nil { + sampling, p.Period, hasAlloc, err = parseHeapHeader(line) + if err != nil { + return nil, err + } + } else if header = growthHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else if header = fragmentationHeaderRE.FindStringSubmatch(line); header != nil { + p.Period = 1 + } else { + return nil, errUnrecognized + } + + if hasAlloc { + // Put alloc before inuse so that default pprof selection + // will prefer inuse_space. + p.SampleType = []*ValueType{ + {Type: "alloc_objects", Unit: "count"}, + {Type: "alloc_space", Unit: "bytes"}, + {Type: "inuse_objects", Unit: "count"}, + {Type: "inuse_space", Unit: "bytes"}, + } + } else { + p.SampleType = []*ValueType{ + {Type: "objects", Unit: "count"}, + {Type: "space", Unit: "bytes"}, + } + } + + locs := make(map[uint64]*Location) + for s.Scan() { + line := strings.TrimSpace(s.Text()) + + if isSpaceOrComment(line) { + continue + } + + if isMemoryMapSentinel(line) { + break + } + + value, blocksize, addrs, err := parseHeapSample(line, p.Period, sampling, hasAlloc) + if err != nil { + return nil, err + } + + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + NumLabel: map[string][]int64{"bytes": {blocksize}}, + }) + } + if err := s.Err(); err != nil { + return nil, err + } + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + return p, nil +} + +func parseHeapHeader(line string) (sampling string, period int64, hasAlloc bool, err error) { + header := heapHeaderRE.FindStringSubmatch(line) + if header == nil { + return "", 0, false, errUnrecognized + } + + if len(header[6]) > 0 { + if period, err = strconv.ParseInt(header[6], 10, 64); err != nil { + return "", 0, false, errUnrecognized + } + } + + if (header[3] != header[1] && header[3] != "0") || (header[4] != header[2] && header[4] != "0") { + hasAlloc = true + } + + switch header[5] { + case "heapz_v2", "heap_v2": + return "v2", period, hasAlloc, nil + case "heapprofile": + return "", 1, hasAlloc, nil + case "heap": + return "v2", period / 2, hasAlloc, nil + default: + return "", 0, false, errUnrecognized + } +} + +// parseHeapSample parses a single row from a heap profile into a new Sample. +func parseHeapSample(line string, rate int64, sampling string, includeAlloc bool) (value []int64, blocksize int64, addrs []uint64, err error) { + sampleData := heapSampleRE.FindStringSubmatch(line) + if len(sampleData) != 6 { + return nil, 0, nil, fmt.Errorf("unexpected number of sample values: got %d, want 6", len(sampleData)) + } + + // This is a local-scoped helper function to avoid needing to pass + // around rate, sampling and many return parameters. + addValues := func(countString, sizeString string, label string) error { + count, err := strconv.ParseInt(countString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + size, err := strconv.ParseInt(sizeString, 10, 64) + if err != nil { + return fmt.Errorf("malformed sample: %s: %v", line, err) + } + if count == 0 && size != 0 { + return fmt.Errorf("%s count was 0 but %s bytes was %d", label, label, size) + } + if count != 0 { + blocksize = size / count + if sampling == "v2" { + count, size = scaleHeapSample(count, size, rate) + } + } + value = append(value, count, size) + return nil + } + + if includeAlloc { + if err := addValues(sampleData[3], sampleData[4], "allocation"); err != nil { + return nil, 0, nil, err + } + } + + if err := addValues(sampleData[1], sampleData[2], "inuse"); err != nil { + return nil, 0, nil, err + } + + addrs, err = parseHexAddresses(sampleData[5]) + if err != nil { + return nil, 0, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, blocksize, addrs, nil +} + +// parseHexAddresses extracts hex numbers from a string, attempts to convert +// each to an unsigned 64-bit number and returns the resulting numbers as a +// slice, or an error if the string contains hex numbers which are too large to +// handle (which means a malformed profile). +func parseHexAddresses(s string) ([]uint64, error) { + hexStrings := hexNumberRE.FindAllString(s, -1) + var addrs []uint64 + for _, s := range hexStrings { + if addr, err := strconv.ParseUint(s, 0, 64); err == nil { + addrs = append(addrs, addr) + } else { + return nil, fmt.Errorf("failed to parse as hex 64-bit number: %s", s) + } + } + return addrs, nil +} + +// scaleHeapSample adjusts the data from a heapz Sample to +// account for its probability of appearing in the collected +// data. heapz profiles are a sampling of the memory allocations +// requests in a program. We estimate the unsampled value by dividing +// each collected sample by its probability of appearing in the +// profile. heapz v2 profiles rely on a poisson process to determine +// which samples to collect, based on the desired average collection +// rate R. The probability of a sample of size S to appear in that +// profile is 1-exp(-S/R). +func scaleHeapSample(count, size, rate int64) (int64, int64) { + if count == 0 || size == 0 { + return 0, 0 + } + + if rate <= 1 { + // if rate==1 all samples were collected so no adjustment is needed. + // if rate<1 treat as unknown and skip scaling. + return count, size + } + + avgSize := float64(size) / float64(count) + scale := 1 / (1 - math.Exp(-avgSize/float64(rate))) + + return int64(float64(count) * scale), int64(float64(size) * scale) +} + +// parseContention parses a mutex or contention profile. There are 2 cases: +// "--- contentionz " for legacy C++ profiles (and backwards compatibility) +// "--- mutex:" or "--- contention:" for profiles generated by the Go runtime. +func parseContention(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + if !s.Scan() { + if err := s.Err(); err != nil { + return nil, err + } + return nil, errUnrecognized + } + + switch l := s.Text(); { + case strings.HasPrefix(l, "--- contentionz "): + case strings.HasPrefix(l, "--- mutex:"): + case strings.HasPrefix(l, "--- contention:"): + default: + return nil, errUnrecognized + } + + p := &Profile{ + PeriodType: &ValueType{Type: "contentions", Unit: "count"}, + Period: 1, + SampleType: []*ValueType{ + {Type: "contentions", Unit: "count"}, + {Type: "delay", Unit: "nanoseconds"}, + }, + } + + var cpuHz int64 + // Parse text of the form "attribute = value" before the samples. + const delimiter = "=" + for s.Scan() { + line := s.Text() + if line = strings.TrimSpace(line); isSpaceOrComment(line) { + continue + } + if strings.HasPrefix(line, "---") { + break + } + attr := strings.SplitN(line, delimiter, 2) + if len(attr) != 2 { + break + } + key, val := strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1]) + var err error + switch key { + case "cycles/second": + if cpuHz, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "sampling period": + if p.Period, err = strconv.ParseInt(val, 0, 64); err != nil { + return nil, errUnrecognized + } + case "ms since reset": + ms, err := strconv.ParseInt(val, 0, 64) + if err != nil { + return nil, errUnrecognized + } + p.DurationNanos = ms * 1000 * 1000 + case "format": + // CPP contentionz profiles don't have format. + return nil, errUnrecognized + case "resolution": + // CPP contentionz profiles don't have resolution. + return nil, errUnrecognized + case "discarded samples": + default: + return nil, errUnrecognized + } + } + if err := s.Err(); err != nil { + return nil, err + } + + locs := make(map[uint64]*Location) + for { + line := strings.TrimSpace(s.Text()) + if strings.HasPrefix(line, "---") { + break + } + if !isSpaceOrComment(line) { + value, addrs, err := parseContentionSample(line, p.Period, cpuHz) + if err != nil { + return nil, err + } + var sloc []*Location + for _, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call. + addr-- + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + p.Sample = append(p.Sample, &Sample{ + Value: value, + Location: sloc, + }) + } + if !s.Scan() { + break + } + } + if err := s.Err(); err != nil { + return nil, err + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + return p, nil +} + +// parseContentionSample parses a single row from a contention profile +// into a new Sample. +func parseContentionSample(line string, period, cpuHz int64) (value []int64, addrs []uint64, err error) { + sampleData := contentionSampleRE.FindStringSubmatch(line) + if sampleData == nil { + return nil, nil, errUnrecognized + } + + v1, err := strconv.ParseInt(sampleData[1], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + v2, err := strconv.ParseInt(sampleData[2], 10, 64) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + // Unsample values if period and cpuHz are available. + // - Delays are scaled to cycles and then to nanoseconds. + // - Contentions are scaled to cycles. + if period > 0 { + if cpuHz > 0 { + cpuGHz := float64(cpuHz) / 1e9 + v1 = int64(float64(v1) * float64(period) / cpuGHz) + } + v2 = v2 * period + } + + value = []int64{v2, v1} + addrs, err = parseHexAddresses(sampleData[3]) + if err != nil { + return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + + return value, addrs, nil +} + +// parseThread parses a Threadz profile and returns a new Profile. +func parseThread(b []byte) (*Profile, error) { + s := bufio.NewScanner(bytes.NewBuffer(b)) + // Skip past comments and empty lines seeking a real header. + for s.Scan() && isSpaceOrComment(s.Text()) { + } + + line := s.Text() + if m := threadzStartRE.FindStringSubmatch(line); m != nil { + // Advance over initial comments until first stack trace. + for s.Scan() { + if line = s.Text(); isMemoryMapSentinel(line) || strings.HasPrefix(line, "-") { + break + } + } + } else if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + p := &Profile{ + SampleType: []*ValueType{{Type: "thread", Unit: "count"}}, + PeriodType: &ValueType{Type: "thread", Unit: "count"}, + Period: 1, + } + + locs := make(map[uint64]*Location) + // Recognize each thread and populate profile samples. + for !isMemoryMapSentinel(line) { + if strings.HasPrefix(line, "---- no stack trace for") { + line = "" + break + } + if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { + return nil, errUnrecognized + } + + var addrs []uint64 + var err error + line, addrs, err = parseThreadSample(s) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + // We got a --same as previous threads--. Bump counters. + if len(p.Sample) > 0 { + s := p.Sample[len(p.Sample)-1] + s.Value[0]++ + } + continue + } + + var sloc []*Location + for i, addr := range addrs { + // Addresses from stack traces point to the next instruction after + // each call. Adjust by -1 to land somewhere on the actual call + // (except for the leaf, which is not a call). + if i > 0 { + addr-- + } + loc := locs[addr] + if locs[addr] == nil { + loc = &Location{ + Address: addr, + } + p.Location = append(p.Location, loc) + locs[addr] = loc + } + sloc = append(sloc, loc) + } + + p.Sample = append(p.Sample, &Sample{ + Value: []int64{1}, + Location: sloc, + }) + } + + if err := parseAdditionalSections(s, p); err != nil { + return nil, err + } + + cleanupDuplicateLocations(p) + return p, nil +} + +// parseThreadSample parses a symbolized or unsymbolized stack trace. +// Returns the first line after the traceback, the sample (or nil if +// it hits a 'same-as-previous' marker) and an error. +func parseThreadSample(s *bufio.Scanner) (nextl string, addrs []uint64, err error) { + var line string + sameAsPrevious := false + for s.Scan() { + line = strings.TrimSpace(s.Text()) + if line == "" { + continue + } + + if strings.HasPrefix(line, "---") { + break + } + if strings.Contains(line, "same as previous thread") { + sameAsPrevious = true + continue + } + + curAddrs, err := parseHexAddresses(line) + if err != nil { + return "", nil, fmt.Errorf("malformed sample: %s: %v", line, err) + } + addrs = append(addrs, curAddrs...) + } + if err := s.Err(); err != nil { + return "", nil, err + } + if sameAsPrevious { + return line, nil, nil + } + return line, addrs, nil +} + +// parseAdditionalSections parses any additional sections in the +// profile, ignoring any unrecognized sections. +func parseAdditionalSections(s *bufio.Scanner, p *Profile) error { + for !isMemoryMapSentinel(s.Text()) && s.Scan() { + } + if err := s.Err(); err != nil { + return err + } + return p.ParseMemoryMapFromScanner(s) +} + +// ParseProcMaps parses a memory map in the format of /proc/self/maps. +// ParseMemoryMap should be called after setting on a profile to +// associate locations to the corresponding mapping based on their +// address. +func ParseProcMaps(rd io.Reader) ([]*Mapping, error) { + s := bufio.NewScanner(rd) + return parseProcMapsFromScanner(s) +} + +func parseProcMapsFromScanner(s *bufio.Scanner) ([]*Mapping, error) { + var mapping []*Mapping + + var attrs []string + const delimiter = "=" + r := strings.NewReplacer() + for s.Scan() { + line := r.Replace(removeLoggingInfo(s.Text())) + m, err := parseMappingEntry(line) + if err != nil { + if err == errUnrecognized { + // Recognize assignments of the form: attr=value, and replace + // $attr with value on subsequent mappings. + if attr := strings.SplitN(line, delimiter, 2); len(attr) == 2 { + attrs = append(attrs, "$"+strings.TrimSpace(attr[0]), strings.TrimSpace(attr[1])) + r = strings.NewReplacer(attrs...) + } + // Ignore any unrecognized entries + continue + } + return nil, err + } + if m == nil { + continue + } + mapping = append(mapping, m) + } + if err := s.Err(); err != nil { + return nil, err + } + return mapping, nil +} + +// removeLoggingInfo detects and removes log prefix entries generated +// by the glog package. If no logging prefix is detected, the string +// is returned unmodified. +func removeLoggingInfo(line string) string { + if match := logInfoRE.FindStringIndex(line); match != nil { + return line[match[1]:] + } + return line +} + +// ParseMemoryMap parses a memory map in the format of +// /proc/self/maps, and overrides the mappings in the current profile. +// It renumbers the samples and locations in the profile correspondingly. +func (p *Profile) ParseMemoryMap(rd io.Reader) error { + return p.ParseMemoryMapFromScanner(bufio.NewScanner(rd)) +} + +// ParseMemoryMapFromScanner parses a memory map in the format of +// /proc/self/maps or a variety of legacy format, and overrides the +// mappings in the current profile. It renumbers the samples and +// locations in the profile correspondingly. +func (p *Profile) ParseMemoryMapFromScanner(s *bufio.Scanner) error { + mapping, err := parseProcMapsFromScanner(s) + if err != nil { + return err + } + p.Mapping = append(p.Mapping, mapping...) + p.massageMappings() + p.remapLocationIDs() + p.remapFunctionIDs() + p.remapMappingIDs() + return nil +} + +func parseMappingEntry(l string) (*Mapping, error) { + var start, end, perm, file, offset, buildID string + if me := procMapsRE.FindStringSubmatch(l); len(me) == 6 { + start, end, perm, offset, file = me[1], me[2], me[3], me[4], me[5] + } else if me := briefMapsRE.FindStringSubmatch(l); len(me) == 7 { + start, end, perm, file, offset, buildID = me[1], me[2], me[3], me[4], me[5], me[6] + } else { + return nil, errUnrecognized + } + + var err error + mapping := &Mapping{ + File: file, + BuildID: buildID, + } + if perm != "" && !strings.Contains(perm, "x") { + // Skip non-executable entries. + return nil, nil + } + if mapping.Start, err = strconv.ParseUint(start, 16, 64); err != nil { + return nil, errUnrecognized + } + if mapping.Limit, err = strconv.ParseUint(end, 16, 64); err != nil { + return nil, errUnrecognized + } + if offset != "" { + if mapping.Offset, err = strconv.ParseUint(offset, 16, 64); err != nil { + return nil, errUnrecognized + } + } + return mapping, nil +} + +var memoryMapSentinels = []string{ + "--- Memory map: ---", + "MAPPED_LIBRARIES:", +} + +// isMemoryMapSentinel returns true if the string contains one of the +// known sentinels for memory map information. +func isMemoryMapSentinel(line string) bool { + for _, s := range memoryMapSentinels { + if strings.Contains(line, s) { + return true + } + } + return false +} + +func (p *Profile) addLegacyFrameInfo() { + switch { + case isProfileType(p, heapzSampleTypes): + p.DropFrames, p.KeepFrames = allocRxStr, allocSkipRxStr + case isProfileType(p, contentionzSampleTypes): + p.DropFrames, p.KeepFrames = lockRxStr, "" + default: + p.DropFrames, p.KeepFrames = cpuProfilerRxStr, "" + } +} + +var heapzSampleTypes = [][]string{ + {"allocations", "size"}, // early Go pprof profiles + {"objects", "space"}, + {"inuse_objects", "inuse_space"}, + {"alloc_objects", "alloc_space"}, + {"alloc_objects", "alloc_space", "inuse_objects", "inuse_space"}, // Go pprof legacy profiles +} +var contentionzSampleTypes = [][]string{ + {"contentions", "delay"}, +} + +func isProfileType(p *Profile, types [][]string) bool { + st := p.SampleType +nextType: + for _, t := range types { + if len(st) != len(t) { + continue + } + + for i := range st { + if st[i].Type != t[i] { + continue nextType + } + } + return true + } + return false +} + +var allocRxStr = strings.Join([]string{ + // POSIX entry points. + `calloc`, + `cfree`, + `malloc`, + `free`, + `memalign`, + `do_memalign`, + `(__)?posix_memalign`, + `pvalloc`, + `valloc`, + `realloc`, + + // TC malloc. + `tcmalloc::.*`, + `tc_calloc`, + `tc_cfree`, + `tc_malloc`, + `tc_free`, + `tc_memalign`, + `tc_posix_memalign`, + `tc_pvalloc`, + `tc_valloc`, + `tc_realloc`, + `tc_new`, + `tc_delete`, + `tc_newarray`, + `tc_deletearray`, + `tc_new_nothrow`, + `tc_newarray_nothrow`, + + // Memory-allocation routines on OS X. + `malloc_zone_malloc`, + `malloc_zone_calloc`, + `malloc_zone_valloc`, + `malloc_zone_realloc`, + `malloc_zone_memalign`, + `malloc_zone_free`, + + // Go runtime + `runtime\..*`, + + // Other misc. memory allocation routines + `BaseArena::.*`, + `(::)?do_malloc_no_errno`, + `(::)?do_malloc_pages`, + `(::)?do_malloc`, + `DoSampledAllocation`, + `MallocedMemBlock::MallocedMemBlock`, + `_M_allocate`, + `__builtin_(vec_)?delete`, + `__builtin_(vec_)?new`, + `__gnu_cxx::new_allocator::allocate`, + `__libc_malloc`, + `__malloc_alloc_template::allocate`, + `allocate`, + `cpp_alloc`, + `operator new(\[\])?`, + `simple_alloc::allocate`, +}, `|`) + +var allocSkipRxStr = strings.Join([]string{ + // Preserve Go runtime frames that appear in the middle/bottom of + // the stack. + `runtime\.panic`, + `runtime\.reflectcall`, + `runtime\.call[0-9]*`, +}, `|`) + +var cpuProfilerRxStr = strings.Join([]string{ + `ProfileData::Add`, + `ProfileData::prof_handler`, + `CpuProfiler::prof_handler`, + `__pthread_sighandler`, + `__restore`, +}, `|`) + +var lockRxStr = strings.Join([]string{ + `RecordLockProfileData`, + `(base::)?RecordLockProfileData.*`, + `(base::)?SubmitMutexProfileData.*`, + `(base::)?SubmitSpinLockProfileData.*`, + `(base::Mutex::)?AwaitCommon.*`, + `(base::Mutex::)?Unlock.*`, + `(base::Mutex::)?UnlockSlow.*`, + `(base::Mutex::)?ReaderUnlock.*`, + `(base::MutexLock::)?~MutexLock.*`, + `(Mutex::)?AwaitCommon.*`, + `(Mutex::)?Unlock.*`, + `(Mutex::)?UnlockSlow.*`, + `(Mutex::)?ReaderUnlock.*`, + `(MutexLock::)?~MutexLock.*`, + `(SpinLock::)?Unlock.*`, + `(SpinLock::)?SlowUnlock.*`, + `(SpinLockHolder::)?~SpinLockHolder.*`, +}, `|`) diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go new file mode 100644 index 000000000..9978e7330 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -0,0 +1,481 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package profile + +import ( + "fmt" + "sort" + "strconv" + "strings" +) + +// Compact performs garbage collection on a profile to remove any +// unreferenced fields. This is useful to reduce the size of a profile +// after samples or locations have been removed. +func (p *Profile) Compact() *Profile { + p, _ = Merge([]*Profile{p}) + return p +} + +// Merge merges all the profiles in profs into a single Profile. +// Returns a new profile independent of the input profiles. The merged +// profile is compacted to eliminate unused samples, locations, +// functions and mappings. Profiles must have identical profile sample +// and period types or the merge will fail. profile.Period of the +// resulting profile will be the maximum of all profiles, and +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. +func Merge(srcs []*Profile) (*Profile, error) { + if len(srcs) == 0 { + return nil, fmt.Errorf("no profiles to merge") + } + p, err := combineHeaders(srcs) + if err != nil { + return nil, err + } + + pm := &profileMerger{ + p: p, + samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)), + locations: make(map[locationKey]*Location, len(srcs[0].Location)), + functions: make(map[functionKey]*Function, len(srcs[0].Function)), + mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)), + } + + for _, src := range srcs { + // Clear the profile-specific hash tables + pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.functionsByID = make(map[uint64]*Function, len(src.Function)) + pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) + + if len(pm.mappings) == 0 && len(src.Mapping) > 0 { + // The Mapping list has the property that the first mapping + // represents the main binary. Take the first Mapping we see, + // otherwise the operations below will add mappings in an + // arbitrary order. + pm.mapMapping(src.Mapping[0]) + } + + for _, s := range src.Sample { + if !isZeroSample(s) { + pm.mapSample(s) + } + } + } + + for _, s := range p.Sample { + if isZeroSample(s) { + // If there are any zero samples, re-merge the profile to GC + // them. + return Merge([]*Profile{p}) + } + } + + return p, nil +} + +// Normalize normalizes the source profile by multiplying each value in profile by the +// ratio of the sum of the base profile's values of that sample type to the sum of the +// source profile's value of that sample type. +func (p *Profile) Normalize(pb *Profile) error { + + if err := p.compatible(pb); err != nil { + return err + } + + baseVals := make([]int64, len(p.SampleType)) + for _, s := range pb.Sample { + for i, v := range s.Value { + baseVals[i] += v + } + } + + srcVals := make([]int64, len(p.SampleType)) + for _, s := range p.Sample { + for i, v := range s.Value { + srcVals[i] += v + } + } + + normScale := make([]float64, len(baseVals)) + for i := range baseVals { + if srcVals[i] == 0 { + normScale[i] = 0.0 + } else { + normScale[i] = float64(baseVals[i]) / float64(srcVals[i]) + } + } + p.ScaleN(normScale) + return nil +} + +func isZeroSample(s *Sample) bool { + for _, v := range s.Value { + if v != 0 { + return false + } + } + return true +} + +type profileMerger struct { + p *Profile + + // Memoization tables within a profile. + locationsByID map[uint64]*Location + functionsByID map[uint64]*Function + mappingsByID map[uint64]mapInfo + + // Memoization tables for profile entities. + samples map[sampleKey]*Sample + locations map[locationKey]*Location + functions map[functionKey]*Function + mappings map[mappingKey]*Mapping +} + +type mapInfo struct { + m *Mapping + offset int64 +} + +func (pm *profileMerger) mapSample(src *Sample) *Sample { + s := &Sample{ + Location: make([]*Location, len(src.Location)), + Value: make([]int64, len(src.Value)), + Label: make(map[string][]string, len(src.Label)), + NumLabel: make(map[string][]int64, len(src.NumLabel)), + NumUnit: make(map[string][]string, len(src.NumLabel)), + } + for i, l := range src.Location { + s.Location[i] = pm.mapLocation(l) + } + for k, v := range src.Label { + vv := make([]string, len(v)) + copy(vv, v) + s.Label[k] = vv + } + for k, v := range src.NumLabel { + u := src.NumUnit[k] + vv := make([]int64, len(v)) + uu := make([]string, len(u)) + copy(vv, v) + copy(uu, u) + s.NumLabel[k] = vv + s.NumUnit[k] = uu + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping. Add current values to the + // existing sample. + k := s.key() + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + copy(s.Value, src.Value) + pm.samples[k] = s + pm.p.Sample = append(pm.p.Sample, s) + return s +} + +// key generates sampleKey to be used as a key for maps. +func (sample *Sample) key() sampleKey { + ids := make([]string, len(sample.Location)) + for i, l := range sample.Location { + ids[i] = strconv.FormatUint(l.ID, 16) + } + + labels := make([]string, 0, len(sample.Label)) + for k, v := range sample.Label { + labels = append(labels, fmt.Sprintf("%q%q", k, v)) + } + sort.Strings(labels) + + numlabels := make([]string, 0, len(sample.NumLabel)) + for k, v := range sample.NumLabel { + numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + } + sort.Strings(numlabels) + + return sampleKey{ + strings.Join(ids, "|"), + strings.Join(labels, ""), + strings.Join(numlabels, ""), + } +} + +type sampleKey struct { + locations string + labels string + numlabels string +} + +func (pm *profileMerger) mapLocation(src *Location) *Location { + if src == nil { + return nil + } + + if l, ok := pm.locationsByID[src.ID]; ok { + return l + } + + mi := pm.mapMapping(src.Mapping) + l := &Location{ + ID: uint64(len(pm.p.Location) + 1), + Mapping: mi.m, + Address: uint64(int64(src.Address) + mi.offset), + Line: make([]Line, len(src.Line)), + IsFolded: src.IsFolded, + } + for i, ln := range src.Line { + l.Line[i] = pm.mapLine(ln) + } + // Check memoization table. Must be done on the remapped location to + // account for the remapped mapping ID. + k := l.key() + if ll, ok := pm.locations[k]; ok { + pm.locationsByID[src.ID] = ll + return ll + } + pm.locationsByID[src.ID] = l + pm.locations[k] = l + pm.p.Location = append(pm.p.Location, l) + return l +} + +// key generates locationKey to be used as a key for maps. +func (l *Location) key() locationKey { + key := locationKey{ + addr: l.Address, + isFolded: l.IsFolded, + } + if l.Mapping != nil { + // Normalizes address to handle address space randomization. + key.addr -= l.Mapping.Start + key.mappingID = l.Mapping.ID + } + lines := make([]string, len(l.Line)*2) + for i, line := range l.Line { + if line.Function != nil { + lines[i*2] = strconv.FormatUint(line.Function.ID, 16) + } + lines[i*2+1] = strconv.FormatInt(line.Line, 16) + } + key.lines = strings.Join(lines, "|") + return key +} + +type locationKey struct { + addr, mappingID uint64 + lines string + isFolded bool +} + +func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { + if src == nil { + return mapInfo{} + } + + if mi, ok := pm.mappingsByID[src.ID]; ok { + return mi + } + + // Check memoization tables. + mk := src.key() + if m, ok := pm.mappings[mk]; ok { + mi := mapInfo{m, int64(m.Start) - int64(src.Start)} + pm.mappingsByID[src.ID] = mi + return mi + } + m := &Mapping{ + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, + } + pm.p.Mapping = append(pm.p.Mapping, m) + + // Update memoization tables. + pm.mappings[mk] = m + mi := mapInfo{m, 0} + pm.mappingsByID[src.ID] = mi + return mi +} + +// key generates encoded strings of Mapping to be used as a key for +// maps. +func (m *Mapping) key() mappingKey { + // Normalize addresses to handle address space randomization. + // Round up to next 4K boundary to avoid minor discrepancies. + const mapsizeRounding = 0x1000 + + size := m.Limit - m.Start + size = size + mapsizeRounding - 1 + size = size - (size % mapsizeRounding) + key := mappingKey{ + size: size, + offset: m.Offset, + } + + switch { + case m.BuildID != "": + key.buildIDOrFile = m.BuildID + case m.File != "": + key.buildIDOrFile = m.File + default: + // A mapping containing neither build ID nor file name is a fake mapping. A + // key with empty buildIDOrFile is used for fake mappings so that they are + // treated as the same mapping during merging. + } + return key +} + +type mappingKey struct { + size, offset uint64 + buildIDOrFile string +} + +func (pm *profileMerger) mapLine(src Line) Line { + ln := Line{ + Function: pm.mapFunction(src.Function), + Line: src.Line, + } + return ln +} + +func (pm *profileMerger) mapFunction(src *Function) *Function { + if src == nil { + return nil + } + if f, ok := pm.functionsByID[src.ID]; ok { + return f + } + k := src.key() + if f, ok := pm.functions[k]; ok { + pm.functionsByID[src.ID] = f + return f + } + f := &Function{ + ID: uint64(len(pm.p.Function) + 1), + Name: src.Name, + SystemName: src.SystemName, + Filename: src.Filename, + StartLine: src.StartLine, + } + pm.functions[k] = f + pm.functionsByID[src.ID] = f + pm.p.Function = append(pm.p.Function, f) + return f +} + +// key generates a struct to be used as a key for maps. +func (f *Function) key() functionKey { + return functionKey{ + f.StartLine, + f.Name, + f.SystemName, + f.Filename, + } +} + +type functionKey struct { + startLine int64 + name, systemName, fileName string +} + +// combineHeaders checks that all profiles can be merged and returns +// their combined profile. +func combineHeaders(srcs []*Profile) (*Profile, error) { + for _, s := range srcs[1:] { + if err := srcs[0].compatible(s); err != nil { + return nil, err + } + } + + var timeNanos, durationNanos, period int64 + var comments []string + seenComments := map[string]bool{} + var defaultSampleType string + for _, s := range srcs { + if timeNanos == 0 || s.TimeNanos < timeNanos { + timeNanos = s.TimeNanos + } + durationNanos += s.DurationNanos + if period == 0 || period < s.Period { + period = s.Period + } + for _, c := range s.Comments { + if seen := seenComments[c]; !seen { + comments = append(comments, c) + seenComments[c] = true + } + } + if defaultSampleType == "" { + defaultSampleType = s.DefaultSampleType + } + } + + p := &Profile{ + SampleType: make([]*ValueType, len(srcs[0].SampleType)), + + DropFrames: srcs[0].DropFrames, + KeepFrames: srcs[0].KeepFrames, + + TimeNanos: timeNanos, + DurationNanos: durationNanos, + PeriodType: srcs[0].PeriodType, + Period: period, + + Comments: comments, + DefaultSampleType: defaultSampleType, + } + copy(p.SampleType, srcs[0].SampleType) + return p, nil +} + +// compatible determines if two profiles can be compared/merged. +// returns nil if the profiles are compatible; otherwise an error with +// details on the incompatibility. +func (p *Profile) compatible(pb *Profile) error { + if !equalValueType(p.PeriodType, pb.PeriodType) { + return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType) + } + + if len(p.SampleType) != len(pb.SampleType) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + + for i := range p.SampleType { + if !equalValueType(p.SampleType[i], pb.SampleType[i]) { + return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType) + } + } + return nil +} + +// equalValueType returns true if the two value types are semantically +// equal. It ignores the internal fields used during encode/decode. +func equalValueType(st1, st2 *ValueType) bool { + return st1.Type == st2.Type && st1.Unit == st2.Unit +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go new file mode 100644 index 000000000..2590c8ddb --- /dev/null +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -0,0 +1,805 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package profile provides a representation of profile.proto and +// methods to encode/decode profiles in this format. +package profile + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "math" + "path/filepath" + "regexp" + "sort" + "strings" + "sync" + "time" +) + +// Profile is an in-memory representation of profile.proto. +type Profile struct { + SampleType []*ValueType + DefaultSampleType string + Sample []*Sample + Mapping []*Mapping + Location []*Location + Function []*Function + Comments []string + + DropFrames string + KeepFrames string + + TimeNanos int64 + DurationNanos int64 + PeriodType *ValueType + Period int64 + + // The following fields are modified during encoding and copying, + // so are protected by a Mutex. + encodeMu sync.Mutex + + commentX []int64 + dropFramesX int64 + keepFramesX int64 + stringTable []string + defaultSampleTypeX int64 +} + +// ValueType corresponds to Profile.ValueType +type ValueType struct { + Type string // cpu, wall, inuse_space, etc + Unit string // seconds, nanoseconds, bytes, etc + + typeX int64 + unitX int64 +} + +// Sample corresponds to Profile.Sample +type Sample struct { + Location []*Location + Value []int64 + Label map[string][]string + NumLabel map[string][]int64 + NumUnit map[string][]string + + locationIDX []uint64 + labelX []label +} + +// label corresponds to Profile.Label +type label struct { + keyX int64 + // Exactly one of the two following values must be set + strX int64 + numX int64 // Integer value for this label + // can be set if numX has value + unitX int64 +} + +// Mapping corresponds to Profile.Mapping +type Mapping struct { + ID uint64 + Start uint64 + Limit uint64 + Offset uint64 + File string + BuildID string + HasFunctions bool + HasFilenames bool + HasLineNumbers bool + HasInlineFrames bool + + fileX int64 + buildIDX int64 +} + +// Location corresponds to Profile.Location +type Location struct { + ID uint64 + Mapping *Mapping + Address uint64 + Line []Line + IsFolded bool + + mappingIDX uint64 +} + +// Line corresponds to Profile.Line +type Line struct { + Function *Function + Line int64 + + functionIDX uint64 +} + +// Function corresponds to Profile.Function +type Function struct { + ID uint64 + Name string + SystemName string + Filename string + StartLine int64 + + nameX int64 + systemNameX int64 + filenameX int64 +} + +// Parse parses a profile and checks for its validity. The input +// may be a gzip-compressed encoded protobuf or one of many legacy +// profile formats which may be unsupported in the future. +func Parse(r io.Reader) (*Profile, error) { + data, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + return ParseData(data) +} + +// ParseData parses a profile from a buffer and checks for its +// validity. +func ParseData(data []byte) (*Profile, error) { + var p *Profile + var err error + if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err == nil { + data, err = ioutil.ReadAll(gz) + } + if err != nil { + return nil, fmt.Errorf("decompressing profile: %v", err) + } + } + if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile { + p, err = parseLegacy(data) + } + + if err != nil { + return nil, fmt.Errorf("parsing profile: %v", err) + } + + if err := p.CheckValid(); err != nil { + return nil, fmt.Errorf("malformed profile: %v", err) + } + return p, nil +} + +var errUnrecognized = fmt.Errorf("unrecognized profile format") +var errMalformed = fmt.Errorf("malformed profile format") +var errNoData = fmt.Errorf("empty input file") +var errConcatProfile = fmt.Errorf("concatenated profiles detected") + +func parseLegacy(data []byte) (*Profile, error) { + parsers := []func([]byte) (*Profile, error){ + parseCPU, + parseHeap, + parseGoCount, // goroutine, threadcreate + parseThread, + parseContention, + parseJavaProfile, + } + + for _, parser := range parsers { + p, err := parser(data) + if err == nil { + p.addLegacyFrameInfo() + return p, nil + } + if err != errUnrecognized { + return nil, err + } + } + return nil, errUnrecognized +} + +// ParseUncompressed parses an uncompressed protobuf into a profile. +func ParseUncompressed(data []byte) (*Profile, error) { + if len(data) == 0 { + return nil, errNoData + } + p := &Profile{} + if err := unmarshal(data, p); err != nil { + return nil, err + } + + if err := p.postDecode(); err != nil { + return nil, err + } + + return p, nil +} + +var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`) + +// massageMappings applies heuristic-based changes to the profile +// mappings to account for quirks of some environments. +func (p *Profile) massageMappings() { + // Merge adjacent regions with matching names, checking that the offsets match + if len(p.Mapping) > 1 { + mappings := []*Mapping{p.Mapping[0]} + for _, m := range p.Mapping[1:] { + lm := mappings[len(mappings)-1] + if adjacent(lm, m) { + lm.Limit = m.Limit + if m.File != "" { + lm.File = m.File + } + if m.BuildID != "" { + lm.BuildID = m.BuildID + } + p.updateLocationMapping(m, lm) + continue + } + mappings = append(mappings, m) + } + p.Mapping = mappings + } + + // Use heuristics to identify main binary and move it to the top of the list of mappings + for i, m := range p.Mapping { + file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1)) + if len(file) == 0 { + continue + } + if len(libRx.FindStringSubmatch(file)) > 0 { + continue + } + if file[0] == '[' { + continue + } + // Swap what we guess is main to position 0. + p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0] + break + } + + // Keep the mapping IDs neatly sorted + for i, m := range p.Mapping { + m.ID = uint64(i + 1) + } +} + +// adjacent returns whether two mapping entries represent the same +// mapping that has been split into two. Check that their addresses are adjacent, +// and if the offsets match, if they are available. +func adjacent(m1, m2 *Mapping) bool { + if m1.File != "" && m2.File != "" { + if m1.File != m2.File { + return false + } + } + if m1.BuildID != "" && m2.BuildID != "" { + if m1.BuildID != m2.BuildID { + return false + } + } + if m1.Limit != m2.Start { + return false + } + if m1.Offset != 0 && m2.Offset != 0 { + offset := m1.Offset + (m1.Limit - m1.Start) + if offset != m2.Offset { + return false + } + } + return true +} + +func (p *Profile) updateLocationMapping(from, to *Mapping) { + for _, l := range p.Location { + if l.Mapping == from { + l.Mapping = to + } + } +} + +func serialize(p *Profile) []byte { + p.encodeMu.Lock() + p.preEncode() + b := marshal(p) + p.encodeMu.Unlock() + return b +} + +// Write writes the profile as a gzip-compressed marshaled protobuf. +func (p *Profile) Write(w io.Writer) error { + zw := gzip.NewWriter(w) + defer zw.Close() + _, err := zw.Write(serialize(p)) + return err +} + +// WriteUncompressed writes the profile as a marshaled protobuf. +func (p *Profile) WriteUncompressed(w io.Writer) error { + _, err := w.Write(serialize(p)) + return err +} + +// CheckValid tests whether the profile is valid. Checks include, but are +// not limited to: +// - len(Profile.Sample[n].value) == len(Profile.value_unit) +// - Sample.id has a corresponding Profile.Location +func (p *Profile) CheckValid() error { + // Check that sample values are consistent + sampleLen := len(p.SampleType) + if sampleLen == 0 && len(p.Sample) != 0 { + return fmt.Errorf("missing sample type information") + } + for _, s := range p.Sample { + if s == nil { + return fmt.Errorf("profile has nil sample") + } + if len(s.Value) != sampleLen { + return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType)) + } + for _, l := range s.Location { + if l == nil { + return fmt.Errorf("sample has nil location") + } + } + } + + // Check that all mappings/locations/functions are in the tables + // Check that there are no duplicate ids + mappings := make(map[uint64]*Mapping, len(p.Mapping)) + for _, m := range p.Mapping { + if m == nil { + return fmt.Errorf("profile has nil mapping") + } + if m.ID == 0 { + return fmt.Errorf("found mapping with reserved ID=0") + } + if mappings[m.ID] != nil { + return fmt.Errorf("multiple mappings with same id: %d", m.ID) + } + mappings[m.ID] = m + } + functions := make(map[uint64]*Function, len(p.Function)) + for _, f := range p.Function { + if f == nil { + return fmt.Errorf("profile has nil function") + } + if f.ID == 0 { + return fmt.Errorf("found function with reserved ID=0") + } + if functions[f.ID] != nil { + return fmt.Errorf("multiple functions with same id: %d", f.ID) + } + functions[f.ID] = f + } + locations := make(map[uint64]*Location, len(p.Location)) + for _, l := range p.Location { + if l == nil { + return fmt.Errorf("profile has nil location") + } + if l.ID == 0 { + return fmt.Errorf("found location with reserved id=0") + } + if locations[l.ID] != nil { + return fmt.Errorf("multiple locations with same id: %d", l.ID) + } + locations[l.ID] = l + if m := l.Mapping; m != nil { + if m.ID == 0 || mappings[m.ID] != m { + return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID) + } + } + for _, ln := range l.Line { + f := ln.Function + if f == nil { + return fmt.Errorf("location id: %d has a line with nil function", l.ID) + } + if f.ID == 0 || functions[f.ID] != f { + return fmt.Errorf("inconsistent function %p: %d", f, f.ID) + } + } + } + return nil +} + +// Aggregate merges the locations in the profile into equivalence +// classes preserving the request attributes. It also updates the +// samples to point to the merged locations. +func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error { + for _, m := range p.Mapping { + m.HasInlineFrames = m.HasInlineFrames && inlineFrame + m.HasFunctions = m.HasFunctions && function + m.HasFilenames = m.HasFilenames && filename + m.HasLineNumbers = m.HasLineNumbers && linenumber + } + + // Aggregate functions + if !function || !filename { + for _, f := range p.Function { + if !function { + f.Name = "" + f.SystemName = "" + } + if !filename { + f.Filename = "" + } + } + } + + // Aggregate locations + if !inlineFrame || !address || !linenumber { + for _, l := range p.Location { + if !inlineFrame && len(l.Line) > 1 { + l.Line = l.Line[len(l.Line)-1:] + } + if !linenumber { + for i := range l.Line { + l.Line[i].Line = 0 + } + } + if !address { + l.Address = 0 + } + } + } + + return p.CheckValid() +} + +// NumLabelUnits returns a map of numeric label keys to the units +// associated with those keys and a map of those keys to any units +// that were encountered but not used. +// Unit for a given key is the first encountered unit for that key. If multiple +// units are encountered for values paired with a particular key, then the first +// unit encountered is used and all other units are returned in sorted order +// in map of ignored units. +// If no units are encountered for a particular key, the unit is then inferred +// based on the key. +func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) { + numLabelUnits := map[string]string{} + ignoredUnits := map[string]map[string]bool{} + encounteredKeys := map[string]bool{} + + // Determine units based on numeric tags for each sample. + for _, s := range p.Sample { + for k := range s.NumLabel { + encounteredKeys[k] = true + for _, unit := range s.NumUnit[k] { + if unit == "" { + continue + } + if wantUnit, ok := numLabelUnits[k]; !ok { + numLabelUnits[k] = unit + } else if wantUnit != unit { + if v, ok := ignoredUnits[k]; ok { + v[unit] = true + } else { + ignoredUnits[k] = map[string]bool{unit: true} + } + } + } + } + } + // Infer units for keys without any units associated with + // numeric tag values. + for key := range encounteredKeys { + unit := numLabelUnits[key] + if unit == "" { + switch key { + case "alignment", "request": + numLabelUnits[key] = "bytes" + default: + numLabelUnits[key] = key + } + } + } + + // Copy ignored units into more readable format + unitsIgnored := make(map[string][]string, len(ignoredUnits)) + for key, values := range ignoredUnits { + units := make([]string, len(values)) + i := 0 + for unit := range values { + units[i] = unit + i++ + } + sort.Strings(units) + unitsIgnored[key] = units + } + + return numLabelUnits, unitsIgnored +} + +// String dumps a text representation of a profile. Intended mainly +// for debugging purposes. +func (p *Profile) String() string { + ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location)) + for _, c := range p.Comments { + ss = append(ss, "Comment: "+c) + } + if pt := p.PeriodType; pt != nil { + ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit)) + } + ss = append(ss, fmt.Sprintf("Period: %d", p.Period)) + if p.TimeNanos != 0 { + ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos))) + } + if p.DurationNanos != 0 { + ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos))) + } + + ss = append(ss, "Samples:") + var sh1 string + for _, s := range p.SampleType { + dflt := "" + if s.Type == p.DefaultSampleType { + dflt = "[dflt]" + } + sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt) + } + ss = append(ss, strings.TrimSpace(sh1)) + for _, s := range p.Sample { + ss = append(ss, s.string()) + } + + ss = append(ss, "Locations") + for _, l := range p.Location { + ss = append(ss, l.string()) + } + + ss = append(ss, "Mappings") + for _, m := range p.Mapping { + ss = append(ss, m.string()) + } + + return strings.Join(ss, "\n") + "\n" +} + +// string dumps a text representation of a mapping. Intended mainly +// for debugging purposes. +func (m *Mapping) string() string { + bits := "" + if m.HasFunctions { + bits = bits + "[FN]" + } + if m.HasFilenames { + bits = bits + "[FL]" + } + if m.HasLineNumbers { + bits = bits + "[LN]" + } + if m.HasInlineFrames { + bits = bits + "[IN]" + } + return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s", + m.ID, + m.Start, m.Limit, m.Offset, + m.File, + m.BuildID, + bits) +} + +// string dumps a text representation of a location. Intended mainly +// for debugging purposes. +func (l *Location) string() string { + ss := []string{} + locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address) + if m := l.Mapping; m != nil { + locStr = locStr + fmt.Sprintf("M=%d ", m.ID) + } + if l.IsFolded { + locStr = locStr + "[F] " + } + if len(l.Line) == 0 { + ss = append(ss, locStr) + } + for li := range l.Line { + lnStr := "??" + if fn := l.Line[li].Function; fn != nil { + lnStr = fmt.Sprintf("%s %s:%d s=%d", + fn.Name, + fn.Filename, + l.Line[li].Line, + fn.StartLine) + if fn.Name != fn.SystemName { + lnStr = lnStr + "(" + fn.SystemName + ")" + } + } + ss = append(ss, locStr+lnStr) + // Do not print location details past the first line + locStr = " " + } + return strings.Join(ss, "\n") +} + +// string dumps a text representation of a sample. Intended mainly +// for debugging purposes. +func (s *Sample) string() string { + ss := []string{} + var sv string + for _, v := range s.Value { + sv = fmt.Sprintf("%s %10d", sv, v) + } + sv = sv + ": " + for _, l := range s.Location { + sv = sv + fmt.Sprintf("%d ", l.ID) + } + ss = append(ss, sv) + const labelHeader = " " + if len(s.Label) > 0 { + ss = append(ss, labelHeader+labelsToString(s.Label)) + } + if len(s.NumLabel) > 0 { + ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit)) + } + return strings.Join(ss, "\n") +} + +// labelsToString returns a string representation of a +// map representing labels. +func labelsToString(labels map[string][]string) string { + ls := []string{} + for k, v := range labels { + ls = append(ls, fmt.Sprintf("%s:%v", k, v)) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// numLabelsToString returns a string representation of a map +// representing numeric labels. +func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string { + ls := []string{} + for k, v := range numLabels { + units := numUnits[k] + var labelString string + if len(units) == len(v) { + values := make([]string, len(v)) + for i, vv := range v { + values[i] = fmt.Sprintf("%d %s", vv, units[i]) + } + labelString = fmt.Sprintf("%s:%v", k, values) + } else { + labelString = fmt.Sprintf("%s:%v", k, v) + } + ls = append(ls, labelString) + } + sort.Strings(ls) + return strings.Join(ls, " ") +} + +// SetLabel sets the specified key to the specified value for all samples in the +// profile. +func (p *Profile) SetLabel(key string, value []string) { + for _, sample := range p.Sample { + if sample.Label == nil { + sample.Label = map[string][]string{key: value} + } else { + sample.Label[key] = value + } + } +} + +// RemoveLabel removes all labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveLabel(key string) { + for _, sample := range p.Sample { + delete(sample.Label, key) + } +} + +// HasLabel returns true if a sample has a label with indicated key and value. +func (s *Sample) HasLabel(key, value string) bool { + for _, v := range s.Label[key] { + if v == value { + return true + } + } + return false +} + +// DiffBaseSample returns true if a sample belongs to the diff base and false +// otherwise. +func (s *Sample) DiffBaseSample() bool { + return s.HasLabel("pprof::base", "true") +} + +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. +func (p *Profile) Scale(ratio float64) { + if ratio == 1 { + return + } + ratios := make([]float64, len(p.SampleType)) + for i := range p.SampleType { + ratios[i] = ratio + } + p.ScaleN(ratios) +} + +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. +func (p *Profile) ScaleN(ratios []float64) error { + if len(p.SampleType) != len(ratios) { + return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) + } + allOnes := true + for _, r := range ratios { + if r != 1 { + allOnes = false + break + } + } + if allOnes { + return nil + } + fillIdx := 0 + for _, s := range p.Sample { + keepSample := false + for i, v := range s.Value { + if ratios[i] != 1 { + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 + } + } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } + } + p.Sample = p.Sample[:fillIdx] + return nil +} + +// HasFunctions determines if all locations in this profile have +// symbolized function information. +func (p *Profile) HasFunctions() bool { + for _, l := range p.Location { + if l.Mapping != nil && !l.Mapping.HasFunctions { + return false + } + } + return true +} + +// HasFileLines determines if all locations in this profile have +// symbolized file and line number information. +func (p *Profile) HasFileLines() bool { + for _, l := range p.Location { + if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) { + return false + } + } + return true +} + +// Unsymbolizable returns true if a mapping points to a binary for which +// locations can't be symbolized in principle, at least now. Examples are +// "[vdso]", [vsyscall]" and some others, see the code. +func (m *Mapping) Unsymbolizable() bool { + name := filepath.Base(m.File) + return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/") +} + +// Copy makes a fully independent copy of a profile. +func (p *Profile) Copy() *Profile { + pp := &Profile{} + if err := unmarshal(serialize(p), pp); err != nil { + panic(err) + } + if err := pp.postDecode(); err != nil { + panic(err) + } + + return pp +} diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go new file mode 100644 index 000000000..539ad3ab3 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -0,0 +1,370 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file is a simple protocol buffer encoder and decoder. +// The format is described at +// https://developers.google.com/protocol-buffers/docs/encoding +// +// A protocol message must implement the message interface: +// decoder() []decoder +// encode(*buffer) +// +// The decode method returns a slice indexed by field number that gives the +// function to decode that field. +// The encode method encodes its receiver into the given buffer. +// +// The two methods are simple enough to be implemented by hand rather than +// by using a protocol compiler. +// +// See profile.go for examples of messages implementing this interface. +// +// There is no support for groups, message sets, or "has" bits. + +package profile + +import ( + "errors" + "fmt" +) + +type buffer struct { + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte +} + +type decoder func(*buffer, message) error + +type message interface { + decoder() []decoder + encode(*buffer) +} + +func marshal(m message) []byte { + var b buffer + m.encode(&b) + return b.data +} + +func encodeVarint(b *buffer, x uint64) { + for x >= 128 { + b.data = append(b.data, byte(x)|0x80) + x >>= 7 + } + b.data = append(b.data, byte(x)) +} + +func encodeLength(b *buffer, tag int, len int) { + encodeVarint(b, uint64(tag)<<3|2) + encodeVarint(b, uint64(len)) +} + +func encodeUint64(b *buffer, tag int, x uint64) { + // append varint to b.data + encodeVarint(b, uint64(tag)<<3) + encodeVarint(b, x) +} + +func encodeUint64s(b *buffer, tag int, x []uint64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, u) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeUint64(b, tag, u) + } +} + +func encodeUint64Opt(b *buffer, tag int, x uint64) { + if x == 0 { + return + } + encodeUint64(b, tag, x) +} + +func encodeInt64(b *buffer, tag int, x int64) { + u := uint64(x) + encodeUint64(b, tag, u) +} + +func encodeInt64s(b *buffer, tag int, x []int64) { + if len(x) > 2 { + // Use packed encoding + n1 := len(b.data) + for _, u := range x { + encodeVarint(b, uint64(u)) + } + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) + return + } + for _, u := range x { + encodeInt64(b, tag, u) + } +} + +func encodeInt64Opt(b *buffer, tag int, x int64) { + if x == 0 { + return + } + encodeInt64(b, tag, x) +} + +func encodeString(b *buffer, tag int, x string) { + encodeLength(b, tag, len(x)) + b.data = append(b.data, x...) +} + +func encodeStrings(b *buffer, tag int, x []string) { + for _, s := range x { + encodeString(b, tag, s) + } +} + +func encodeBool(b *buffer, tag int, x bool) { + if x { + encodeUint64(b, tag, 1) + } else { + encodeUint64(b, tag, 0) + } +} + +func encodeBoolOpt(b *buffer, tag int, x bool) { + if x { + encodeBool(b, tag, x) + } +} + +func encodeMessage(b *buffer, tag int, m message) { + n1 := len(b.data) + m.encode(b) + n2 := len(b.data) + encodeLength(b, tag, n2-n1) + n3 := len(b.data) + copy(b.tmp[:], b.data[n2:n3]) + copy(b.data[n1+(n3-n2):], b.data[n1:n2]) + copy(b.data[n1:], b.tmp[:n3-n2]) +} + +func unmarshal(data []byte, m message) (err error) { + b := buffer{data: data, typ: 2} + return decodeMessage(&b, m) +} + +func le64(p []byte) uint64 { + return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56 +} + +func le32(p []byte) uint32 { + return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24 +} + +func decodeVarint(data []byte) (uint64, []byte, error) { + var u uint64 + for i := 0; ; i++ { + if i >= 10 || i >= len(data) { + return 0, nil, errors.New("bad varint") + } + u |= uint64(data[i]&0x7F) << uint(7*i) + if data[i]&0x80 == 0 { + return u, data[i+1:], nil + } + } +} + +func decodeField(b *buffer, data []byte) ([]byte, error) { + x, data, err := decodeVarint(data) + if err != nil { + return nil, err + } + b.field = int(x >> 3) + b.typ = int(x & 7) + b.data = nil + b.u64 = 0 + switch b.typ { + case 0: + b.u64, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + case 1: + if len(data) < 8 { + return nil, errors.New("not enough data") + } + b.u64 = le64(data[:8]) + data = data[8:] + case 2: + var n uint64 + n, data, err = decodeVarint(data) + if err != nil { + return nil, err + } + if n > uint64(len(data)) { + return nil, errors.New("too much data") + } + b.data = data[:n] + data = data[n:] + case 5: + if len(data) < 4 { + return nil, errors.New("not enough data") + } + b.u64 = uint64(le32(data[:4])) + data = data[4:] + default: + return nil, fmt.Errorf("unknown wire type: %d", b.typ) + } + + return data, nil +} + +func checkType(b *buffer, typ int) error { + if b.typ != typ { + return errors.New("type mismatch") + } + return nil +} + +func decodeMessage(b *buffer, m message) error { + if err := checkType(b, 2); err != nil { + return err + } + dec := m.decoder() + data := b.data + for len(data) > 0 { + // pull varint field# + type + var err error + data, err = decodeField(b, data) + if err != nil { + return err + } + if b.field >= len(dec) || dec[b.field] == nil { + continue + } + if err := dec[b.field](b, m); err != nil { + return err + } + } + return nil +} + +func decodeInt64(b *buffer, x *int64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = int64(b.u64) + return nil +} + +func decodeInt64s(b *buffer, x *[]int64) error { + if b.typ == 2 { + // Packed encoding + data := b.data + tmp := make([]int64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, int64(u)) + } + *x = append(*x, tmp...) + return nil + } + var i int64 + if err := decodeInt64(b, &i); err != nil { + return err + } + *x = append(*x, i) + return nil +} + +func decodeUint64(b *buffer, x *uint64) error { + if err := checkType(b, 0); err != nil { + return err + } + *x = b.u64 + return nil +} + +func decodeUint64s(b *buffer, x *[]uint64) error { + if b.typ == 2 { + data := b.data + // Packed encoding + tmp := make([]uint64, 0, len(data)) // Maximally sized + for len(data) > 0 { + var u uint64 + var err error + + if u, data, err = decodeVarint(data); err != nil { + return err + } + tmp = append(tmp, u) + } + *x = append(*x, tmp...) + return nil + } + var u uint64 + if err := decodeUint64(b, &u); err != nil { + return err + } + *x = append(*x, u) + return nil +} + +func decodeString(b *buffer, x *string) error { + if err := checkType(b, 2); err != nil { + return err + } + *x = string(b.data) + return nil +} + +func decodeStrings(b *buffer, x *[]string) error { + var s string + if err := decodeString(b, &s); err != nil { + return err + } + *x = append(*x, s) + return nil +} + +func decodeBool(b *buffer, x *bool) error { + if err := checkType(b, 0); err != nil { + return err + } + if int64(b.u64) == 0 { + *x = false + } else { + *x = true + } + return nil +} diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go new file mode 100644 index 000000000..02d21a818 --- /dev/null +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -0,0 +1,178 @@ +// Copyright 2014 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Implements methods to remove frames from profiles. + +package profile + +import ( + "fmt" + "regexp" + "strings" +) + +var ( + reservedNames = []string{"(anonymous namespace)", "operator()"} + bracketRx = func() *regexp.Regexp { + var quotedNames []string + for _, name := range append(reservedNames, "(") { + quotedNames = append(quotedNames, regexp.QuoteMeta(name)) + } + return regexp.MustCompile(strings.Join(quotedNames, "|")) + }() +) + +// simplifyFunc does some primitive simplification of function names. +func simplifyFunc(f string) string { + // Account for leading '.' on the PPC ELF v1 ABI. + funcName := strings.TrimPrefix(f, ".") + // Account for unsimplified names -- try to remove the argument list by trimming + // starting from the first '(', but skipping reserved names that have '('. + for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) { + foundReserved := false + for _, res := range reservedNames { + if funcName[ind[0]:ind[1]] == res { + foundReserved = true + break + } + } + if !foundReserved { + funcName = funcName[:ind[0]] + break + } + } + return funcName +} + +// Prune removes all nodes beneath a node matching dropRx, and not +// matching keepRx. If the root node of a Sample matches, the sample +// will have an empty stack. +func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { + prune := make(map[uint64]bool) + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + var i int + for i = len(loc.Line) - 1; i >= 0; i-- { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + break + } + } + } + } + + if i >= 0 { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + + // Remove the matching location. + if i == len(loc.Line)-1 { + // Matched the top entry: prune the whole location. + prune[loc.ID] = true + } else { + loc.Line = loc.Line[i+1:] + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the root to the leaves to find the prune location. + // Do not prune frames before the first user frame, to avoid + // pruning everything. + foundUser := false + for i := len(sample.Location) - 1; i >= 0; i-- { + id := sample.Location[i].ID + if !prune[id] && !pruneBeneath[id] { + foundUser = true + continue + } + if !foundUser { + continue + } + if prune[id] { + sample.Location = sample.Location[i+1:] + break + } + if pruneBeneath[id] { + sample.Location = sample.Location[i:] + break + } + } + } +} + +// RemoveUninteresting prunes and elides profiles using built-in +// tables of uninteresting function names. +func (p *Profile) RemoveUninteresting() error { + var keep, drop *regexp.Regexp + var err error + + if p.DropFrames != "" { + if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err) + } + if p.KeepFrames != "" { + if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil { + return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err) + } + } + p.Prune(drop, keep) + } + return nil +} + +// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself. +// +// Please see the example below to understand this method as well as +// the difference from Prune method. +// +// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline. +// +// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A. +// Prune(A, nil) returns [B,C,B,D] by removing A itself. +// +// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom. +// Prune(B, nil) returns [D] because a matching node is found by scanning from the root. +func (p *Profile) PruneFrom(dropRx *regexp.Regexp) { + pruneBeneath := make(map[uint64]bool) + + for _, loc := range p.Location { + for i := 0; i < len(loc.Line); i++ { + if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { + funcName := simplifyFunc(fn.Name) + if dropRx.MatchString(funcName) { + // Found matching entry to prune. + pruneBeneath[loc.ID] = true + loc.Line = loc.Line[i:] + break + } + } + } + } + + // Prune locs from each Sample + for _, sample := range p.Sample { + // Scan from the bottom leaf to the root to find the prune location. + for i, loc := range sample.Location { + if pruneBeneath[loc.ID] { + sample.Location = sample.Location[i:] + break + } + } + } +} diff --git a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/images/consts.go b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/images/consts.go index 32953204a..13a94cc09 100644 --- a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/images/consts.go +++ b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/images/consts.go @@ -20,12 +20,12 @@ const ( SchedulerPluginSchedulerDefaultImageTag = "quay.io/k8stopologyawareschedwg/scheduler-plugins-kube-scheduler:v0.0.2023031503" SchedulerPluginControllerDefaultImageTag = "quay.io/k8stopologyawareschedwg/scheduler-plugins-controller:v0.0.2023031503" NodeFeatureDiscoveryDefaultImageTag = "quay.io/k8stopologyawareschedwg/node-feature-discovery:v0.0.2023031605" - ResourceTopologyExporterDefaultImageTag = "quay.io/k8stopologyawareschedwg/resource-topology-exporter:v0.10.1" + ResourceTopologyExporterDefaultImageTag = "quay.io/k8stopologyawareschedwg/resource-topology-exporter:v0.10.2" ) const ( SchedulerPluginSchedulerDefaultImageSHA = "quay.io/k8stopologyawareschedwg/scheduler-plugins-kube-scheduler@sha256:7c75900ae5eb94f7d8ea89e2fc640a03731b2f17960e23cdf50ea99258b0beeb" SchedulerPluginControllerDefaultImageSHA = "quay.io/k8stopologyawareschedwg/scheduler-plugins-controller@sha256:ac030bb933bfe0c9b11a0192bd81d1bb2d27e80f9dc356c69f865d9395bfe007" NodeFeatureDiscoveryDefaultImageSHA = "quay.io/k8stopologyawareschedwg/node-feature-discovery@sha256:984c9b12ff32bab1716da12d898d058c57b932dcc38b42ebc16234a26cb234aa" - ResourceTopologyExporterDefaultImageSHA = "quay.io/k8stopologyawareschedwg/resource-topology-exporter@sha256:633b1975b746238a9a3ec5b5db5eaac046dac2189508068810978cd323de94f5" + ResourceTopologyExporterDefaultImageSHA = "quay.io/k8stopologyawareschedwg/resource-topology-exporter@sha256:fbbd701e0d4d4ffc9e373db4e39a0e61a433c71e5c6410e9bd60265e37059ae1" ) diff --git a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/codec.go b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/codec.go index 42511e87f..73bd7031f 100644 --- a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/codec.go +++ b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/codec.go @@ -27,9 +27,7 @@ import ( k8sjson "k8s.io/apimachinery/pkg/runtime/serializer/json" k8sscheme "k8s.io/client-go/kubernetes/scheme" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - schedscheme "sigs.k8s.io/scheduler-plugins/apis/config/scheme" - "sigs.k8s.io/scheduler-plugins/apis/config/v1beta2" + "sigs.k8s.io/controller-runtime/pkg/client" ) func SerializeObject(obj runtime.Object, out io.Writer) error { @@ -76,34 +74,13 @@ func loadObject(path string) (runtime.Object, error) { return DeserializeObjectFromData(data) } -func DecodeSchedulerConfigFromData(data []byte) (*schedconfig.KubeSchedulerConfiguration, error) { - decoder := schedscheme.Codecs.UniversalDecoder() - obj, gvk, err := decoder.Decode(data, nil, nil) - - if err != nil { - return nil, err - } - - schedCfg, ok := obj.(*schedconfig.KubeSchedulerConfiguration) - if !ok { - return nil, fmt.Errorf("decoded unsupported type: %T gvk=%s", obj, gvk) - } - return schedCfg, nil -} - -func EncodeSchedulerConfigToData(schedCfg *schedconfig.KubeSchedulerConfiguration) ([]byte, error) { - yamlInfo, ok := runtime.SerializerInfoForMediaType(schedscheme.Codecs.SupportedMediaTypes(), runtime.ContentTypeYAML) - if !ok { - return nil, fmt.Errorf("unable to locate encoder -- %q is not a supported media type", runtime.ContentTypeYAML) - } - - encoder := schedscheme.Codecs.EncoderForVersion(yamlInfo.Serializer, v1beta2.SchemeGroupVersion) - - var buf bytes.Buffer - err := encoder.Encode(schedCfg, &buf) - if err != nil { - return nil, err +func RenderObjects(objs []client.Object, w io.Writer) error { + for _, obj := range objs { + fmt.Fprintf(w, "---\n") + if err := SerializeObject(obj, w); err != nil { + return err + } } - return buf.Bytes(), nil + return nil } diff --git a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/manifests.go b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/manifests.go index 5a88c059d..e0f22ed93 100644 --- a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/manifests.go +++ b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/manifests.go @@ -34,13 +34,7 @@ import ( apiextensionv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/kubernetes/scheme" - kubeschedulerconfigv1beta2 "k8s.io/kube-scheduler/config/v1beta2" "k8s.io/utils/pointer" - apiconfigv1beta2 "sigs.k8s.io/scheduler-plugins/apis/config/v1beta2" - - k8sschedpluginsconf "sigs.k8s.io/scheduler-plugins/apis/config" - k8sschedpluginsconfv1beta2 "sigs.k8s.io/scheduler-plugins/apis/config/v1beta2" - k8sschedpluginsconfv1beta3 "sigs.k8s.io/scheduler-plugins/apis/config/v1beta3" rteassets "github.com/k8stopologyawareschedwg/deployer/pkg/assets/rte" "github.com/k8stopologyawareschedwg/deployer/pkg/deployer/platform" @@ -92,11 +86,6 @@ var src embed.FS func init() { apiextensionv1.AddToScheme(scheme.Scheme) - apiconfigv1beta2.AddToScheme(scheme.Scheme) - kubeschedulerconfigv1beta2.AddToScheme(scheme.Scheme) - k8sschedpluginsconf.AddToScheme(scheme.Scheme) - k8sschedpluginsconfv1beta2.AddToScheme(scheme.Scheme) - k8sschedpluginsconfv1beta3.AddToScheme(scheme.Scheme) machineconfigv1.Install(scheme.Scheme) securityv1.Install(scheme.Scheme) } @@ -580,23 +569,6 @@ func SecurityContextConstraint(component string) (*securityv1.SecurityContextCon return scc, nil } -func KubeSchedulerConfigurationFromData(data []byte) (*kubeschedulerconfigv1beta2.KubeSchedulerConfiguration, error) { - obj, err := DeserializeObjectFromData(data) - if err != nil { - return nil, err - } - - sc, ok := obj.(*kubeschedulerconfigv1beta2.KubeSchedulerConfiguration) - if !ok { - return nil, fmt.Errorf("unexpected type, got %T %v", obj, obj.GetObjectKind()) - } - return sc, nil -} - -func KubeSchedulerConfigurationToData(sc *kubeschedulerconfigv1beta2.KubeSchedulerConfiguration) ([]byte, error) { - return SerializeObjectToData(sc) -} - func validateComponent(component string) error { if component == ComponentAPI || component == ComponentResourceTopologyExporter || component == ComponentNodeFeatureDiscovery || component == ComponentSchedulerPlugin { return nil diff --git a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/schedparams.go b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/schedparams.go new file mode 100644 index 000000000..62fc3c39a --- /dev/null +++ b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/manifests/schedparams.go @@ -0,0 +1,137 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2023 Red Hat, Inc. + */ + +package manifests + +import ( + "fmt" + + "sigs.k8s.io/yaml" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog/v2" +) + +const ( + SchedulerConfigFileName = "scheduler-config.yaml" // TODO duplicate from yaml + SchedulerPluginName = "NodeResourceTopologyMatch" +) + +type ConfigCacheParams struct { + ResyncPeriodSeconds *int64 +} + +type ConfigParams struct { + ProfileName string // can't be empty, so no need for pointer + Cache *ConfigCacheParams +} + +func DecodeSchedulerProfilesFromData(data []byte) ([]ConfigParams, error) { + params := []ConfigParams{} + + var r unstructured.Unstructured + if err := yaml.Unmarshal(data, &r.Object); err != nil { + klog.ErrorS(err, "cannot unmarshal scheduler config") + return params, nil + } + + profiles, ok, err := unstructured.NestedSlice(r.Object, "profiles") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "profiles", ok) + return params, nil + } + for _, prof := range profiles { + profile, ok := prof.(map[string]interface{}) + if !ok { + klog.V(1).InfoS("unexpected profile data") + return params, nil + } + + profileName, ok, err := unstructured.NestedString(profile, "schedulerName") + if !ok || err != nil { + klog.ErrorS(err, "failed to get profile name", "profileName", ok) + return params, nil + } + + pluginConfigs, ok, err := unstructured.NestedSlice(profile, "pluginConfig") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "pluginConfig", ok) + return params, nil + } + for _, plConf := range pluginConfigs { + pluginConf, ok := plConf.(map[string]interface{}) + if !ok { + klog.V(1).InfoS("unexpected profile coonfig data") + return params, nil + } + + name, ok, err := unstructured.NestedString(pluginConf, "name") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "name", ok) + return params, nil + } + if name != SchedulerPluginName { + continue + } + args, ok, err := unstructured.NestedMap(pluginConf, "args") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "args", ok) + return params, nil + } + + profileParams, err := extractParams(profileName, args) + if err != nil { + klog.ErrorS(err, "failed to extract params", "name", name, "profile", profileName) + continue + } + + params = append(params, profileParams) + } + } + + return params, nil + +} + +func FindSchedulerProfileByName(profileParams []ConfigParams, schedulerName string) *ConfigParams { + for idx := range profileParams { + params := &profileParams[idx] + if params.ProfileName == schedulerName { + return params + } + } + return nil +} + +func extractParams(profileName string, args map[string]interface{}) (ConfigParams, error) { + params := ConfigParams{ + ProfileName: profileName, + Cache: &ConfigCacheParams{}, + } + // json quirk: we know it's int64, yet it's detected as float64 + resyncPeriod, ok, err := unstructured.NestedFloat64(args, "cacheResyncPeriodSeconds") + if !ok { + // nothing to do + return params, nil + } + if err != nil { + return params, fmt.Errorf("cannot process field cacheResyncPeriodSeconds: %w", err) + } + + val := int64(resyncPeriod) + params.Cache.ResyncPeriodSeconds = &val + return params, nil +} diff --git a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/render.go b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/render.go new file mode 100644 index 000000000..577f55945 --- /dev/null +++ b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/render.go @@ -0,0 +1,196 @@ +/* + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2023 Red Hat, Inc. + */ + +package sched + +import ( + "encoding/json" + "fmt" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/klog/v2" + + "sigs.k8s.io/yaml" + + "github.com/k8stopologyawareschedwg/deployer/pkg/manifests" +) + +func SchedulerConfig(cm *corev1.ConfigMap, schedulerName string, params *manifests.ConfigParams) error { + if cm.Data == nil { + return fmt.Errorf("no data found in ConfigMap: %s/%s", cm.Namespace, cm.Name) + } + + data, ok := cm.Data[manifests.SchedulerConfigFileName] + if !ok { + return fmt.Errorf("no data key named: %s found in ConfigMap: %s/%s", manifests.SchedulerConfigFileName, cm.Namespace, cm.Name) + } + + newData, _, err := RenderConfig([]byte(data), schedulerName, params) + if err != nil { + return err + } + + cm.Data[manifests.SchedulerConfigFileName] = string(newData) + return nil +} + +func RenderConfig(data []byte, schedulerName string, params *manifests.ConfigParams) ([]byte, bool, error) { + if schedulerName == "" || params == nil { + klog.V(2).InfoS("missing parameters, passing through", "schedulerName", schedulerName, "params", toJSON(params)) + return data, false, nil + } + + var r unstructured.Unstructured + if err := yaml.Unmarshal(data, &r.Object); err != nil { + klog.ErrorS(err, "cannot unmarshal scheduler config, passing through") + return data, false, err + } + + updated := false + + profiles, ok, err := unstructured.NestedSlice(r.Object, "profiles") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "profiles", ok) + return data, false, err + } + for _, prof := range profiles { + profile, ok := prof.(map[string]interface{}) + if !ok { + klog.V(1).InfoS("unexpected profile data") + return data, false, nil + } + + profileName, ok, err := unstructured.NestedString(profile, "schedulerName") + if !ok || err != nil { + klog.ErrorS(err, "failed to get profile name", "profileName", ok) + return data, false, err + } + + if profileName != schedulerName { + continue + } + + if params.ProfileName != "" { + unstructured.SetNestedField(profile, params.ProfileName, "schedulerName") + updated = true + } + + pluginConfigs, ok, err := unstructured.NestedSlice(profile, "pluginConfig") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "pluginConfig", ok) + return data, false, err + } + for _, plConf := range pluginConfigs { + pluginConf, ok := plConf.(map[string]interface{}) + if !ok { + klog.V(1).InfoS("unexpected profile coonfig data") + return data, false, nil + } + + name, ok, err := unstructured.NestedString(pluginConf, "name") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "name", ok) + return data, false, err + } + if name != manifests.SchedulerPluginName { + continue + } + args, ok, err := unstructured.NestedMap(pluginConf, "args") + if !ok || err != nil { + klog.ErrorS(err, "failed to process unstructured data", "args", ok) + return data, false, err + } + + argsUpdated, err := updateArgs(args, params) + if err != nil { + klog.ErrorS(err, "failed to update unstructured data", "args", args, "params", params) + return data, false, err + } + if argsUpdated { + updated = true + } + + if err := unstructured.SetNestedMap(pluginConf, args, "args"); err != nil { + klog.ErrorS(err, "failed to override unstructured data", "data", "args") + return data, false, err + } + } + + if err := unstructured.SetNestedSlice(profile, pluginConfigs, "pluginConfig"); err != nil { + klog.ErrorS(err, "failed to override unstructured data", "data", "pluginConfig") + return data, false, err + } + } + + if err := unstructured.SetNestedSlice(r.Object, profiles, "profiles"); err != nil { + klog.ErrorS(err, "failed to override unstructured data", "data", "profiles") + return data, false, err + } + + newData, err := yaml.Marshal(&r.Object) + if err != nil { + klog.ErrorS(err, "cannot re-encode scheduler config, passing through") + return data, false, nil + } + return newData, updated, nil +} + +func updateArgs(args map[string]interface{}, params *manifests.ConfigParams) (bool, error) { + var updated int + var err error + + if params.Cache != nil { + if params.Cache.ResyncPeriodSeconds != nil { + resyncPeriod := *params.Cache.ResyncPeriodSeconds // shortcut + err = unstructured.SetNestedField(args, resyncPeriod, "cacheResyncPeriodSeconds") + if err != nil { + return updated > 0, err + } + updated++ + } + } + return updated > 0, ensureBackwardCompatibility(args) +} + +func ensureBackwardCompatibility(args map[string]interface{}) error { + resyncPeriod, ok, err := unstructured.NestedInt64(args, "cacheResyncPeriodSeconds") + if !ok { + // nothing to do + return nil + } + if err != nil { + return fmt.Errorf("cannot process field cacheResyncPeriodSeconds: %w", err) + } + if resyncPeriod > 0 { + // nothing to do + } else { + // remove for backward compatibility + delete(args, "cacheResyncPeriodSeconds") + } + return nil +} + +func toJSON(v any) string { + if v == nil { + return "" + } + data, err := json.Marshal(v) + if err != nil { + return fmt.Sprintf("", err) + } + return string(data) +} diff --git a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/sched.go b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/sched.go index 69c0a2301..dfeea030f 100644 --- a/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/sched.go +++ b/vendor/github.com/k8stopologyawareschedwg/deployer/pkg/objectupdate/sched/sched.go @@ -17,17 +17,10 @@ package sched import ( - "fmt" - "time" - appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - pluginconfig "sigs.k8s.io/scheduler-plugins/apis/config" - "github.com/k8stopologyawareschedwg/deployer/pkg/images" - "github.com/k8stopologyawareschedwg/deployer/pkg/manifests" ) const ( @@ -45,69 +38,6 @@ func ControllerDeployment(dp *appsv1.Deployment, pullIfNotPresent bool) { dp.Spec.Template.Spec.Containers[0].ImagePullPolicy = pullPolicy(pullIfNotPresent) } -func SchedulerConfig(cm *corev1.ConfigMap, schedulerName string, cacheResyncPeriod time.Duration) error { - if cm.Data == nil { - return fmt.Errorf("no data found in ConfigMap: %s/%s", cm.Namespace, cm.Name) - } - - data, ok := cm.Data[SchedulerConfigFileName] - if !ok { - return fmt.Errorf("no data key named: %s found in ConfigMap: %s/%s", SchedulerConfigFileName, cm.Namespace, cm.Name) - } - - newData, err := RenderConfig(data, schedulerName, cacheResyncPeriod) - if err != nil { - return err - } - - cm.Data[SchedulerConfigFileName] = string(newData) - return nil -} - -func RenderConfig(data, schedulerName string, cacheResyncPeriod time.Duration) (string, error) { - schedCfg, err := manifests.DecodeSchedulerConfigFromData([]byte(data)) - if err != nil { - return data, err - } - - schedProf, pluginConf := findKubeSchedulerProfileByName(schedCfg, schedulerPluginName) - if schedProf == nil || pluginConf == nil { - return data, fmt.Errorf("no profile or plugin configuration found for %q", schedulerPluginName) - } - - if schedulerName != "" { - schedProf.SchedulerName = schedulerName - } - - confObj := pluginConf.Args.DeepCopyObject() - cfg, ok := confObj.(*pluginconfig.NodeResourceTopologyMatchArgs) - if !ok { - return data, fmt.Errorf("unsupported plugin config type: %T", confObj) - } - - period := int64(cacheResyncPeriod.Seconds()) - cfg.CacheResyncPeriodSeconds = period - - pluginConf.Args = cfg - - newData, err := manifests.EncodeSchedulerConfigToData(schedCfg) - return string(newData), err -} - -func findKubeSchedulerProfileByName(sc *schedconfig.KubeSchedulerConfiguration, name string) (*schedconfig.KubeSchedulerProfile, *schedconfig.PluginConfig) { - for i := range sc.Profiles { - // if we have a configuration for the NodeResourceTopologyMatch - // this is a valid profile - for j := range sc.Profiles[i].PluginConfig { - if sc.Profiles[i].PluginConfig[j].Name == name { - return &sc.Profiles[i], &sc.Profiles[i].PluginConfig[j] - } - } - } - - return nil, nil -} - func pullPolicy(pullIfNotPresent bool) corev1.PullPolicy { if pullIfNotPresent { return corev1.PullIfNotPresent diff --git a/vendor/github.com/onsi/ginkgo/v2/.gitignore b/vendor/github.com/onsi/ginkgo/v2/.gitignore index edf0231cd..18793c248 100644 --- a/vendor/github.com/onsi/ginkgo/v2/.gitignore +++ b/vendor/github.com/onsi/ginkgo/v2/.gitignore @@ -1,5 +1,5 @@ .DS_Store -TODO.md +TODO tmp/**/* *.coverprofile .vscode diff --git a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md index 05a41bd6a..b1811884b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md +++ b/vendor/github.com/onsi/ginkgo/v2/CHANGELOG.md @@ -1,3 +1,257 @@ +## 2.9.5 + +### Fixes +- ensure the correct deterministic sort order is produced when ordered specs are generated by a helper function [7fa0b6b] + +### Maintenance +- fix generators link (#1200) [9f9d8b9] +- Bump golang.org/x/tools from 0.8.0 to 0.9.1 (#1196) [150e3f2] +- fix spelling err in docs (#1199) [0013b1a] +- Bump golang.org/x/sys from 0.7.0 to 0.8.0 (#1193) [9e9e3e5] + +## 2.9.4 + +### Fixes +- fix hang with ginkgo -p (#1192) [15d4bdc] - this addresses a _long_ standing issue related to Ginkgo hanging when a child process spawned by the test does not exit. + +- fix: fail fast may cause Serial spec or cleanup Node interrupted (#1178) [8dea88b] - prior to this there was a small gap in which specs on other processes might start even if one process has tried to abort the suite. + + +### Maintenance +- Document run order when multiple setup nodes are at the same nesting level [903be81] + +## 2.9.3 + +### Features +- Add RenderTimeline to GinkgoT() [c0c77b6] + +### Fixes +- update Measure deprecation message. fixes #1176 [227c662] +- add newlines to GinkgoLogr (#1170) (#1171) [0de0e7c] + +### Maintenance +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#1183) [8b925ab] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#1184) [e3795a4] +- Bump golang.org/x/tools from 0.7.0 to 0.8.0 (#1182) [b453793] +- Bump actions/setup-go from 3 to 4 (#1164) [73ed75b] +- Bump github.com/onsi/gomega from 1.27.4 to 1.27.6 (#1173) [0a2bc64] +- Bump github.com/go-logr/logr from 1.2.3 to 1.2.4 (#1174) [f41c557] +- Bump golang.org/x/sys from 0.6.0 to 0.7.0 (#1179) [8e423e5] + +## 2.9.2 + +### Maintenance +- Bump github.com/go-task/slim-sprig (#1167) [3fcc5bf] +- Bump github.com/onsi/gomega from 1.27.3 to 1.27.4 (#1163) [6143ffe] + +## 2.9.1 + +### Fixes +This release fixes a longstanding issue where `ginkgo -coverpkg=./...` would not work. This is now resolved and fixes [#1161](https://github.com/onsi/ginkgo/issues/1161) and [#995](https://github.com/onsi/ginkgo/issues/995) +- Support -coverpkg=./... [26ca1b5] +- document coverpkg a bit more clearly [fc44c3b] + +### Maintenance +- bump various dependencies +- Improve Documentation and fix typo (#1158) [93de676] + +## 2.9.0 + +### Features +- AttachProgressReporter is an experimental feature that allows users to provide arbitrary information when a ProgressReport is requested [28801fe] + +- GinkgoT() has been expanded to include several Ginkgo-specific methods [2bd5a3b] + + The intent is to enable the development of third-party libraries that integrate deeply with Ginkgo using `GinkgoT()` to access Ginkgo's functionality. + +## 2.8.4 + +### Features +- Add OmitSuiteSetupNodes to JunitReportConfig (#1147) [979fbc2] +- Add a reference to ginkgolinter in docs.index.md (#1143) [8432589] + +### Fixes +- rename tools hack to see if it fixes things for downstream users [a8bb39a] + +### Maintenance +- Bump golang.org/x/text (#1144) [41b2a8a] +- Bump github.com/onsi/gomega from 1.27.0 to 1.27.1 (#1142) [7c4f583] + +## 2.8.3 + +Released to fix security issue in golang.org/x/net dependency + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#1141) [fc1a02e] +- remove tools.go hack from documentation [0718693] + +## 2.8.2 + +Ginkgo now includes a `tools.go` file in the root directory of the `ginkgo` package. This should allow modules that simply `go get github.com/onsi/ginkgo/v2` to also pull in the CLI dependencies. This obviates the need for consumers of Ginkgo to have their own `tools.go` file and makes it simpler to ensure that the version of the `ginkgo` CLI being used matches the version of the library. You can simply run `go run github.com/onsi/ginkgo/v2/ginkgo` to run the version of the cli associated with your package go.mod. + +### Maintenance + +- Bump github.com/onsi/gomega from 1.26.0 to 1.27.0 (#1139) [5767b0a] +- Fix minor typos (#1138) [e1e9723] +- Fix link in V2 Migration Guide (#1137) [a588f60] + +## 2.8.1 + +### Fixes +- lock around default report output to avoid triggering the race detector when calling By from goroutines [2d5075a] +- don't run ReportEntries through sprintf [febbe38] + +### Maintenance +- Bump golang.org/x/tools from 0.5.0 to 0.6.0 (#1135) [11a4860] +- test: update matrix for Go 1.20 (#1130) [4890a62] +- Bump golang.org/x/sys from 0.4.0 to 0.5.0 (#1133) [a774638] +- Bump github.com/onsi/gomega from 1.25.0 to 1.26.0 (#1120) [3f233bd] +- Bump github-pages from 227 to 228 in /docs (#1131) [f9b8649] +- Bump activesupport from 6.0.6 to 6.0.6.1 in /docs (#1127) [6f8c042] +- Update index.md with instructions on how to upgrade Ginkgo [833a75e] + +## 2.8.0 + +### Features + +- Introduce GinkgoHelper() to track and exclude helper functions from potential CodeLocations [e19f556] + +Modeled after `testing.T.Helper()`. Now, rather than write code like: + +```go +func helper(model Model) { + Expect(model).WithOffset(1).To(BeValid()) + Expect(model.SerialNumber).WithOffset(1).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +you can stop tracking offsets (which makes nesting composing helpers nearly impossible) and simply write: + +```go +func helper(model Model) { + GinkgoHelper() + Expect(model).To(BeValid()) + Expect(model.SerialNumber).To(MatchRegexp(/[a-f0-9]*/)) +} +``` + +- Introduce GinkgoLabelFilter() and Label().MatchesLabelFilter() to make it possible to programmatically match filters (fixes #1119) [2f6597c] + +You can now write code like this: + +```go +BeforeSuite(func() { + if Label("slow").MatchesLabelFilter(GinkgoLabelFilter()) { + // do slow setup + } + + if Label("fast").MatchesLabelFilter(GinkgoLabelFilter()) { + // do fast setup + } +}) +``` + +to programmatically check whether a given set of labels will match the configured `--label-filter`. + +### Maintenance + +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#1125) [ea4966e] +- cdeql: add ruby language (#1124) [9dd275b] +- dependabot: add bundler package-ecosystem for docs (#1123) [14e7bdd] + +## 2.7.1 + +### Fixes +- Bring back SuiteConfig.EmitSpecProgress to avoid compilation issue for consumers that set it manually [d2a1cb0] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.2 to 1.25.0 (#1118) [cafece6] +- Bump golang.org/x/tools from 0.4.0 to 0.5.0 (#1111) [eda66c2] +- Bump golang.org/x/sys from 0.3.0 to 0.4.0 (#1112) [ac5ccaa] +- Bump github.com/onsi/gomega from 1.24.1 to 1.24.2 (#1097) [eee6480] + +## 2.7.0 + +### Features +- Introduce ContinueOnFailure for Ordered containers [e0123ca] - Ordered containers that are also decorated with ContinueOnFailure will not stop running specs after the first spec fails. +- Support for bootstrap commands to use custom data for templates (#1110) [7a2b242] +- Support for labels and pending decorator in ginkgo outline output (#1113) [e6e3b98] +- Color aliases for custom color support (#1101) [49fab7a] + +### Fixes +- correctly ensure deterministic spec order, even if specs are generated by iterating over a map [89dda20] +- Fix a bug where timedout specs were not correctly treated as failures when determining whether or not to run AfterAlls in an Ordered container. +- Ensure go test coverprofile outputs to the expected location (#1105) [b0bd77b] + +## 2.6.1 + +### Features +- Override formatter colors from envvars - this is a new feature but an alternative approach involving config files might be taken in the future (#1095) [60240d1] + +### Fixes +- GinkgoRecover now supports ignoring panics that match a specific, hidden, interface [301f3e2] + +### Maintenance +- Bump github.com/onsi/gomega from 1.24.0 to 1.24.1 (#1077) [3643823] +- Bump golang.org/x/tools from 0.2.0 to 0.4.0 (#1090) [f9f856e] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#1091) [0d7087e] + +## 2.6.0 + +### Features +- `ReportBeforeSuite` provides access to the suite report before the suite begins. +- Add junit config option for omitting leafnodetype (#1088) [956e6d2] +- Add support to customize junit report config to omit spec labels (#1087) [de44005] + +### Fixes +- Fix stack trace pruning so that it has a chance of working on windows [2165648] + +## 2.5.1 + +### Fixes +- skipped tests only show as 'S' when running with -v [3ab38ae] +- Fix typo in docs/index.md (#1082) [55fc58d] +- Fix typo in docs/index.md (#1081) [8a14f1f] +- Fix link notation in docs/index.md (#1080) [2669612] +- Fix typo in `--progress` deprecation message (#1076) [b4b7edc] + +### Maintenance +- chore: Included githubactions in the dependabot config (#976) [baea341] +- Bump golang.org/x/sys from 0.1.0 to 0.2.0 (#1075) [9646297] + +## 2.5.0 + +### Ginkgo output now includes a timeline-view of the spec + +This commit changes Ginkgo's default output. Spec details are now +presented as a **timeline** that includes events that occur during the spec +lifecycle interleaved with any GinkgoWriter content. This makes is much easier +to understand the flow of a spec and where a given failure occurs. + +The --progress, --slow-spec-threshold, --always-emit-ginkgo-writer flags +and the SuppressProgressReporting decorator have all been deprecated. Instead +the existing -v and -vv flags better capture the level of verbosity to display. However, +a new --show-node-events flag is added to include node `> Enter` and `< Exit` events +in the spec timeline. + +In addition, JUnit reports now include the timeline (rendered with -vv) and custom JUnit +reports can be configured and generated using +`GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig)` + +Code should continue to work unchanged with this version of Ginkgo - however if you have tooling that +was relying on the specific output format of Ginkgo you _may_ run into issues. Ginkgo's console output is not guaranteed to be stable for tooling and automation purposes. You should, instead, use Ginkgo's JSON format +to build tooling on top of as it has stronger guarantees to be stable from version to version. + +### Features +- Provide details about which timeout expired [0f2fa27] + +### Fixes +- Add Support Policy to docs [c70867a] + +### Maintenance +- Bump github.com/onsi/gomega from 1.22.1 to 1.23.0 (#1070) [bb3b4e2] + ## 2.4.0 ### Features @@ -8,7 +262,7 @@ ### Fixes - correcting some typos (#1064) [1403d3c] -- fix flaky internal_integration interupt specs [2105ba3] +- fix flaky internal_integration interrupt specs [2105ba3] - Correct busted link in README [be6b5b9] ### Maintenance diff --git a/vendor/github.com/onsi/ginkgo/v2/README.md b/vendor/github.com/onsi/ginkgo/v2/README.md index eda4116a9..d0473a467 100644 --- a/vendor/github.com/onsi/ginkgo/v2/README.md +++ b/vendor/github.com/onsi/ginkgo/v2/README.md @@ -4,11 +4,7 @@ --- -# Ginkgo 2.0 is now Generally Available! - -You can learn more about 2.0 in the [Migration Guide](https://onsi.github.io/ginkgo/MIGRATING_TO_V2)! - ---- +# Ginkgo Ginkgo is a mature testing framework for Go designed to help you write expressive specs. Ginkgo builds on top of Go's `testing` foundation and is complemented by the [Gomega](https://github.com/onsi/gomega) matcher library. Together, Ginkgo and Gomega let you express the intent behind your specs clearly: @@ -33,53 +29,53 @@ Describe("Checking books out of the library", Label("library"), func() { }) When("the library has the book in question", func() { - BeforeEach(func() { - Expect(library.Store(book)).To(Succeed()) + BeforeEach(func(ctx SpecContext) { + Expect(library.Store(ctx, book)).To(Succeed()) }) Context("and the book is available", func() { - It("lends it to the reader", func() { - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) + It("lends it to the reader", func(ctx SpecContext) { + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) Expect(valjean.Books()).To(ContainElement(book)) - Expect(library.UserWithBook(book)).To(Equal(valjean)) - }) + Expect(library.UserWithBook(ctx, book)).To(Equal(valjean)) + }, SpecTimeout(time.Second * 5)) }) Context("but the book has already been checked out", func() { var javert *users.User - BeforeEach(func() { + BeforeEach(func(ctx SpecContext) { javert = users.NewUser("Javert") - Expect(javert.Checkout(library, "Les Miserables")).To(Succeed()) + Expect(javert.Checkout(ctx, library, "Les Miserables")).To(Succeed()) }) - It("tells the user", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the user", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is currently checked out")) - }) + }, SpecTimeout(time.Second * 5)) - It("lets the user place a hold and get notified later", func() { - Expect(valjean.Hold(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Holds()).To(ContainElement(book)) + It("lets the user place a hold and get notified later", func(ctx SpecContext) { + Expect(valjean.Hold(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Holds(ctx)).To(ContainElement(book)) By("when Javert returns the book") - Expect(javert.Return(library, book)).To(Succeed()) + Expect(javert.Return(ctx, library, book)).To(Succeed()) By("it eventually informs Valjean") notification := "Les Miserables is ready for pick up" - Eventually(valjean.Notifications).Should(ContainElement(notification)) + Eventually(ctx, valjean.Notifications).Should(ContainElement(notification)) - Expect(valjean.Checkout(library, "Les Miserables")).To(Succeed()) - Expect(valjean.Books()).To(ContainElement(book)) - Expect(valjean.Holds()).To(BeEmpty()) - }) + Expect(valjean.Checkout(ctx, library, "Les Miserables")).To(Succeed()) + Expect(valjean.Books(ctx)).To(ContainElement(book)) + Expect(valjean.Holds(ctx)).To(BeEmpty()) + }, SpecTimeout(time.Second * 10)) }) }) When("the library does not have the book in question", func() { - It("tells the reader the book is unavailable", func() { - err := valjean.Checkout(library, "Les Miserables") + It("tells the reader the book is unavailable", func(ctx SpecContext) { + err := valjean.Checkout(ctx, library, "Les Miserables") Expect(error).To(MatchError("Les Miserables is not in the library catalog")) - }) + }, SpecTimeout(time.Second * 5)) }) }) ``` @@ -92,7 +88,7 @@ If you have a question, comment, bug report, feature request, etc. please open a Whether writing basic unit specs, complex integration specs, or even performance specs - Ginkgo gives you an expressive Domain-Specific Language (DSL) that will be familiar to users coming from frameworks such as [Quick](https://github.com/Quick/Quick), [RSpec](https://rspec.info), [Jasmine](https://jasmine.github.io), and [Busted](https://lunarmodules.github.io/busted/). This style of testing is sometimes referred to as "Behavior-Driven Development" (BDD) though Ginkgo's utility extends beyond acceptance-level testing. -With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs) +With Ginkgo's DSL you can use nestable [`Describe`, `Context` and `When` container nodes](https://onsi.github.io/ginkgo/#organizing-specs-with-container-nodes) to help you organize your specs. [`BeforeEach` and `AfterEach` setup nodes](https://onsi.github.io/ginkgo/#extracting-common-setup-beforeeach) for setup and cleanup. [`It` and `Specify` subject nodes](https://onsi.github.io/ginkgo/#spec-subjects-it) that hold your assertions. [`BeforeSuite` and `AfterSuite` nodes](https://onsi.github.io/ginkgo/#suite-setup-and-cleanup-beforesuite-and-aftersuite) to prep for and cleanup after a suite... and [much more!](https://onsi.github.io/ginkgo/#writing-specs). At runtime, Ginkgo can run your specs in reproducibly [random order](https://onsi.github.io/ginkgo/#spec-randomization) and has sophisticated support for [spec parallelization](https://onsi.github.io/ginkgo/#spec-parallelization). In fact, running specs in parallel is as easy as @@ -100,7 +96,7 @@ At runtime, Ginkgo can run your specs in reproducibly [random order](https://ons ginkgo -p ``` -By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. +By following [established patterns for writing parallel specs](https://onsi.github.io/ginkgo/#patterns-for-parallel-integration-specs) you can build even large, complex integration suites that parallelize cleanly and run performantly. And you don't have to worry about your spec suite hanging or leaving a mess behind - Ginkgo provides a per-node `context.Context` and the capability to interrupt the spec after a set period of time - and then clean up. As your suites grow Ginkgo helps you keep your specs organized with [labels](https://onsi.github.io/ginkgo/#spec-labels) and lets you easily run [subsets of specs](https://onsi.github.io/ginkgo/#filtering-specs), either [programmatically](https://onsi.github.io/ginkgo/#focused-specs) or on the [command line](https://onsi.github.io/ginkgo/#combining-filters). And Ginkgo's reporting infrastructure generates machine-readable output in a [variety of formats](https://onsi.github.io/ginkgo/#generating-machine-readable-reports) _and_ allows you to build your own [custom reporting infrastructure](https://onsi.github.io/ginkgo/#generating-reports-programmatically). diff --git a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go index 4ea63b84e..a244bdc18 100644 --- a/vendor/github.com/onsi/ginkgo/v2/core_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/core_dsl.go @@ -21,7 +21,6 @@ import ( "os" "path/filepath" "strings" - "time" "github.com/go-logr/logr" "github.com/onsi/ginkgo/v2/formatter" @@ -93,11 +92,11 @@ type GinkgoWriterInterface interface { } /* -SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integratoin with Ginkgo). +SpecContext is the context object passed into nodes that are subject to a timeout or need to be notified of an interrupt. It implements the standard context.Context interface but also contains additional helpers to provide an extensibility point for Ginkgo. (As an example, Gomega's Eventually can use the methods defined on SpecContext to provide deeper integration with Ginkgo). You can do anything with SpecContext that you do with a typical context.Context including wrapping it with any of the context.With* methods. -Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interupt signal) or when a node has exceeded it's allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. +Ginkgo will cancel the SpecContext when a node is interrupted (e.g. by the user sending an interrupt signal) or when a node has exceeded its allowed run-time. Note, however, that even in cases where a node has a deadline, SpecContext will not return a deadline via .Deadline(). This is because Ginkgo does not use a WithDeadline() context to model node deadlines as Ginkgo needs control over the precise timing of the context cancellation to ensure it can provide an accurate progress report at the moment of cancellation. */ type SpecContext = internal.SpecContext @@ -164,6 +163,29 @@ func GinkgoParallelProcess() int { return suiteConfig.ParallelProcess } +/* +GinkgoHelper marks the function it's called in as a test helper. When a failure occurs inside a helper function, Ginkgo will skip the helper when analyzing the stack trace to identify where the failure occurred. + +This is an alternative, simpler, mechanism to passing in a skip offset when calling Fail or using Gomega. +*/ +func GinkgoHelper() { + types.MarkAsHelper(1) +} + +/* +GinkgoLabelFilter() returns the label filter configured for this suite via `--label-filter`. + +You can use this to manually check if a set of labels would satisfy the filter via: + + if (Label("cat", "dog").MatchesLabelFilter(GinkgoLabelFilter())) { + //... + } +*/ +func GinkgoLabelFilter() string { + suiteConfig, _ := GinkgoConfiguration() + return suiteConfig.LabelFilter +} + /* PauseOutputInterception() pauses Ginkgo's output interception. This is only relevant when running in parallel and output to stdout/stderr is being intercepted. You generally @@ -276,7 +298,7 @@ func RunSpecs(t GinkgoTestingT, description string, args ...interface{}) bool { } writer := GinkgoWriter.(*internal.Writer) - if reporterConfig.Verbose && suiteConfig.ParallelTotal == 1 { + if reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) && suiteConfig.ParallelTotal == 1 { writer.SetMode(internal.WriterModeStreamAndBuffer) } else { writer.SetMode(internal.WriterModeBufferOnly) @@ -370,6 +392,12 @@ func AbortSuite(message string, callerSkip ...int) { panic(types.GinkgoErrors.UncaughtGinkgoPanic(cl)) } +/* +ignorablePanic is used by Gomega to signal to GinkgoRecover that Goemga is handling +the error associated with this panic. It i used when Eventually/Consistently are passed a func(g Gomega) and the resulting function launches a goroutines that makes a failed assertion. That failed assertion is registered by Gomega and then panics. Ordinarily the panic is captured by Gomega. In the case of a goroutine Gomega can't capture the panic - so we piggy back on GinkgoRecover so users have a single defer GinkgoRecover() pattern to follow. To do that we need to tell Ginkgo to ignore this panic and not register it as a panic on the global Failer. +*/ +type ignorablePanic interface{ GinkgoRecoverShouldIgnoreThisPanic() } + /* GinkgoRecover should be deferred at the top of any spawned goroutine that (may) call `Fail` Since Gomega assertions call fail, you should throw a `defer GinkgoRecover()` at the top of any goroutine that @@ -385,6 +413,9 @@ You can learn more about how Ginkgo manages failures here: https://onsi.github.i func GinkgoRecover() { e := recover() if e != nil { + if _, ok := e.(ignorablePanic); ok { + return + } global.Failer.Panic(types.NewCodeLocationWithStackTrace(1), e) } } @@ -509,35 +540,11 @@ and will simply log the passed in text to the GinkgoWriter. If By is handed a f By will also generate and attach a ReportEntry to the spec. This will ensure that By annotations appear in Ginkgo's machine-readable reports. -Note that By does not generate a new Ginkgo node - rather it is simply synctactic sugar around GinkgoWriter and AddReportEntry +Note that By does not generate a new Ginkgo node - rather it is simply syntactic sugar around GinkgoWriter and AddReportEntry You can learn more about By here: https://onsi.github.io/ginkgo/#documenting-complex-specs-by */ func By(text string, callback ...func()) { - if !global.Suite.InRunPhase() { - exitIfErr(types.GinkgoErrors.ByNotDuringRunPhase(types.NewCodeLocation(1))) - } - value := struct { - Text string - Duration time.Duration - }{ - Text: text, - } - t := time.Now() - global.Suite.SetProgressStepCursor(internal.ProgressStepCursor{ - Text: text, - CodeLocation: types.NewCodeLocation(1), - StartTime: t, - }) - AddReportEntry("By Step", ReportEntryVisibilityNever, Offset(1), &value, t) - formatter := formatter.NewWithNoColorBool(reporterConfig.NoColor) - GinkgoWriter.Println(formatter.F("{{bold}}STEP:{{/}} %s {{gray}}%s{{/}}", text, t.Format(types.GINKGO_TIME_FORMAT))) - if len(callback) == 1 { - callback[0]() - value.Duration = time.Since(t) - } - if len(callback) > 1 { - panic("just one callback per By, please") - } + exitIfErr(global.Suite.By(text, callback...)) } /* @@ -736,7 +743,7 @@ For example: os.SetEnv("FOO", "BAR") }) -will register a cleanup handler that will set the environment variable "FOO" to it's current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. +will register a cleanup handler that will set the environment variable "FOO" to its current value (obtained by os.GetEnv("FOO")) after the spec runs and then sets the environment variable "FOO" to "BAR" for the current spec. Similarly: @@ -764,3 +771,24 @@ func DeferCleanup(args ...interface{}) { } pushNode(internal.NewCleanupNode(deprecationTracker, fail, args...)) } + +/* +AttachProgressReporter allows you to register a function that will be called whenever Ginkgo generates a Progress Report. The contents returned by the function will be included in the report. + +**This is an experimental feature and the public-facing interface may change in a future minor version of Ginkgo** + +Progress Reports are generated: +- whenever the user explicitly requests one (via `SIGINFO` or `SIGUSR1`) +- on nodes decorated with PollProgressAfter +- on suites run with --poll-progress-after +- whenever a test times out + +Ginkgo uses Progress Reports to convey the current state of the test suite, including any running goroutines. By attaching a progress reporter you are able to supplement these reports with additional information. + +# AttachProgressReporter returns a function that can be called to detach the progress reporter + +You can learn more about AttachProgressReporter here: https://onsi.github.io/ginkgo/#attaching-additional-information-to-progress-reports +*/ +func AttachProgressReporter(reporter func() string) func() { + return global.Suite.AttachProgressReporter(reporter) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go index e43d9cbbb..c65af4ce1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/decorator_dsl.go @@ -46,7 +46,7 @@ const Pending = internal.Pending /* Serial is a decorator that allows you to mark a spec or container as serial. These specs will never run in parallel with other specs. -Tests in ordered containers cannot be marked as serial - mark the ordered container instead. +Specs in ordered containers cannot be marked as serial - mark the ordered container instead. You can learn more here: https://onsi.github.io/ginkgo/#serial-specs You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference @@ -54,7 +54,7 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat const Serial = internal.Serial /* -Ordered is a decorator that allows you to mark a container as ordered. Tests in the container will always run in the order they appear. +Ordered is a decorator that allows you to mark a container as ordered. Specs in the container will always run in the order they appear. They will never be randomized and they will never run in parallel with one another, though they may run in parallel with other specs. You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers @@ -62,6 +62,16 @@ You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorat */ const Ordered = internal.Ordered +/* +ContinueOnFailure is a decorator that allows you to mark an Ordered container to continue running specs even if failures occur. Ordinarily an ordered container will stop running specs after the first failure occurs. Note that if a BeforeAll or a BeforeEach/JustBeforeEach annotated with OncePerOrdered fails then no specs will run as the precondition for the Ordered container will consider to be failed. + +ContinueOnFailure only applies to the outermost Ordered container. Attempting to place ContinueOnFailure in a nested container will result in an error. + +You can learn more here: https://onsi.github.io/ginkgo/#ordered-containers +You can learn more about decorators here: https://onsi.github.io/ginkgo/#decorator-reference +*/ +const ContinueOnFailure = internal.ContinueOnFailure + /* OncePerOrdered is a decorator that allows you to mark outer BeforeEach, AfterEach, JustBeforeEach, and JustAfterEach setup nodes to run once per ordered context. Normally these setup nodes run around each individual spec, with OncePerOrdered they will run once around the set of specs in an ordered container. diff --git a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go index 43b16211d..743555dde 100644 --- a/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go +++ b/vendor/github.com/onsi/ginkgo/v2/formatter/formatter.go @@ -4,6 +4,7 @@ import ( "fmt" "os" "regexp" + "strconv" "strings" ) @@ -50,6 +51,37 @@ func NewWithNoColorBool(noColor bool) Formatter { } func New(colorMode ColorMode) Formatter { + colorAliases := map[string]int{ + "black": 0, + "red": 1, + "green": 2, + "yellow": 3, + "blue": 4, + "magenta": 5, + "cyan": 6, + "white": 7, + } + for colorAlias, n := range colorAliases { + colorAliases[fmt.Sprintf("bright-%s", colorAlias)] = n + 8 + } + + getColor := func(color, defaultEscapeCode string) string { + color = strings.ToUpper(strings.ReplaceAll(color, "-", "_")) + envVar := fmt.Sprintf("GINKGO_CLI_COLOR_%s", color) + envVarColor := os.Getenv(envVar) + if envVarColor == "" { + return defaultEscapeCode + } + if colorCode, ok := colorAliases[envVarColor]; ok { + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + colorCode, err := strconv.Atoi(envVarColor) + if err != nil || colorCode < 0 || colorCode > 255 { + return defaultEscapeCode + } + return fmt.Sprintf("\x1b[38;5;%dm", colorCode) + } + f := Formatter{ ColorMode: colorMode, colors: map[string]string{ @@ -57,18 +89,18 @@ func New(colorMode ColorMode) Formatter { "bold": "\x1b[1m", "underline": "\x1b[4m", - "red": "\x1b[38;5;9m", - "orange": "\x1b[38;5;214m", - "coral": "\x1b[38;5;204m", - "magenta": "\x1b[38;5;13m", - "green": "\x1b[38;5;10m", - "dark-green": "\x1b[38;5;28m", - "yellow": "\x1b[38;5;11m", - "light-yellow": "\x1b[38;5;228m", - "cyan": "\x1b[38;5;14m", - "gray": "\x1b[38;5;243m", - "light-gray": "\x1b[38;5;246m", - "blue": "\x1b[38;5;12m", + "red": getColor("red", "\x1b[38;5;9m"), + "orange": getColor("orange", "\x1b[38;5;214m"), + "coral": getColor("coral", "\x1b[38;5;204m"), + "magenta": getColor("magenta", "\x1b[38;5;13m"), + "green": getColor("green", "\x1b[38;5;10m"), + "dark-green": getColor("dark-green", "\x1b[38;5;28m"), + "yellow": getColor("yellow", "\x1b[38;5;11m"), + "light-yellow": getColor("light-yellow", "\x1b[38;5;228m"), + "cyan": getColor("cyan", "\x1b[38;5;14m"), + "gray": getColor("gray", "\x1b[38;5;243m"), + "light-gray": getColor("light-gray", "\x1b[38;5;246m"), + "blue": getColor("blue", "\x1b[38;5;12m"), }, } colors := []string{} @@ -88,7 +120,10 @@ func (f Formatter) Fi(indentation uint, format string, args ...interface{}) stri } func (f Formatter) Fiw(indentation uint, maxWidth uint, format string, args ...interface{}) string { - out := fmt.Sprintf(f.style(format), args...) + out := f.style(format) + if len(args) > 0 { + out = fmt.Sprintf(out, args...) + } if indentation == 0 && maxWidth == 0 { return out diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go new file mode 100644 index 000000000..5db5d1a7b --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/build/build_command.go @@ -0,0 +1,63 @@ +package build + +import ( + "fmt" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBuildCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildBuildCommandFlagSet(&cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "build", + Flags: flags, + Usage: "ginkgo build ", + ShortDoc: "Build the passed in (or the package in the current directory if left blank).", + DocLink: "precompiling-suites", + Command: func(args []string, _ []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + buildSpecs(args, cliConfig, goFlagsConfig) + }, + } +} + +func buildSpecs(args []string, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + internal.VerifyCLIAndFrameworkVersion(suites) + + opc := internal.NewOrderedParallelCompiler(cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, goFlagsConfig) + + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break + } + suites[suiteIdx] = suite + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + } else { + fmt.Printf("Compiled %s.test\n", suite.PackageName) + } + } + + if suites.CountWithState(internal.TestSuiteStateFailedToCompile) > 0 { + command.AbortWith("Failed to compile all tests") + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go new file mode 100644 index 000000000..2efd28608 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/abort.go @@ -0,0 +1,61 @@ +package command + +import "fmt" + +type AbortDetails struct { + ExitCode int + Error error + EmitUsage bool +} + +func Abort(details AbortDetails) { + panic(details) +} + +func AbortGracefullyWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 0, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWith(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: false, + }) +} + +func AbortWithUsage(format string, args ...interface{}) { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf(format, args...), + EmitUsage: true, + }) +} + +func AbortIfError(preamble string, err error) { + if err != nil { + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, err.Error()), + EmitUsage: false, + }) + } +} + +func AbortIfErrors(preamble string, errors []error) { + if len(errors) > 0 { + out := "" + for _, err := range errors { + out += err.Error() + } + Abort(AbortDetails{ + ExitCode: 1, + Error: fmt.Errorf("%s\n%s", preamble, out), + EmitUsage: false, + }) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go new file mode 100644 index 000000000..12e0e5659 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/command.go @@ -0,0 +1,50 @@ +package command + +import ( + "fmt" + "io" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Command struct { + Name string + Flags types.GinkgoFlagSet + Usage string + ShortDoc string + Documentation string + DocLink string + Command func(args []string, additionalArgs []string) +} + +func (c Command) Run(args []string, additionalArgs []string) { + args, err := c.Flags.Parse(args) + if err != nil { + AbortWithUsage(err.Error()) + } + + c.Command(args, additionalArgs) +} + +func (c Command) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F("{{bold}}"+c.Usage+"{{/}}")) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(c.Usage)))) + if c.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.ShortDoc)) + fmt.Fprintln(writer, "") + } + if c.Documentation != "" { + fmt.Fprintln(writer, formatter.Fiw(0, formatter.COLS, c.Documentation)) + fmt.Fprintln(writer, "") + } + if c.DocLink != "" { + fmt.Fprintln(writer, formatter.Fi(0, "{{bold}}Learn more at:{{/}} {{cyan}}{{underline}}http://onsi.github.io/ginkgo/#%s{{/}}", c.DocLink)) + fmt.Fprintln(writer, "") + } + flagUsage := c.Flags.Usage() + if flagUsage != "" { + fmt.Fprintf(writer, formatter.F(flagUsage)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go new file mode 100644 index 000000000..88dd8d6b0 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/command/program.go @@ -0,0 +1,182 @@ +package command + +import ( + "fmt" + "io" + "os" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +type Program struct { + Name string + Heading string + Commands []Command + DefaultCommand Command + DeprecatedCommands []DeprecatedCommand + + //For testing - leave as nil in production + OutWriter io.Writer + ErrWriter io.Writer + Exiter func(code int) +} + +type DeprecatedCommand struct { + Name string + Deprecation types.Deprecation +} + +func (p Program) RunAndExit(osArgs []string) { + var command Command + deprecationTracker := types.NewDeprecationTracker() + if p.Exiter == nil { + p.Exiter = os.Exit + } + if p.OutWriter == nil { + p.OutWriter = formatter.ColorableStdOut + } + if p.ErrWriter == nil { + p.ErrWriter = formatter.ColorableStdErr + } + + defer func() { + exitCode := 0 + + if r := recover(); r != nil { + details, ok := r.(AbortDetails) + if !ok { + panic(r) + } + + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, formatter.F("{{red}}{{bold}}%s %s{{/}} {{red}}failed{{/}}", p.Name, command.Name)) + fmt.Fprintln(p.ErrWriter, formatter.Fi(1, details.Error.Error())) + } + if details.EmitUsage { + if details.Error != nil { + fmt.Fprintln(p.ErrWriter, "") + } + command.EmitUsage(p.ErrWriter) + } + exitCode = details.ExitCode + } + + command.Flags.ValidateDeprecations(deprecationTracker) + if deprecationTracker.DidTrackDeprecations() { + fmt.Fprintln(p.ErrWriter, deprecationTracker.DeprecationsReport()) + } + p.Exiter(exitCode) + return + }() + + args, additionalArgs := []string{}, []string{} + + foundDelimiter := false + for _, arg := range osArgs[1:] { + if !foundDelimiter { + if arg == "--" { + foundDelimiter = true + continue + } + } + + if foundDelimiter { + additionalArgs = append(additionalArgs, arg) + } else { + args = append(args, arg) + } + } + + command = p.DefaultCommand + if len(args) > 0 { + p.handleHelpRequestsAndExit(p.OutWriter, args) + if command.Name == args[0] { + args = args[1:] + } else { + for _, deprecatedCommand := range p.DeprecatedCommands { + if deprecatedCommand.Name == args[0] { + deprecationTracker.TrackDeprecation(deprecatedCommand.Deprecation) + return + } + } + for _, tryCommand := range p.Commands { + if tryCommand.Name == args[0] { + command, args = tryCommand, args[1:] + break + } + } + } + } + + command.Run(args, additionalArgs) +} + +func (p Program) handleHelpRequestsAndExit(writer io.Writer, args []string) { + if len(args) == 0 { + return + } + + matchesHelpFlag := func(args ...string) bool { + for _, arg := range args { + if arg == "--help" || arg == "-help" || arg == "-h" || arg == "--h" { + return true + } + } + return false + } + if len(args) == 1 { + if args[0] == "help" || matchesHelpFlag(args[0]) { + p.EmitUsage(writer) + Abort(AbortDetails{}) + } + } else { + var name string + if args[0] == "help" || matchesHelpFlag(args[0]) { + name = args[1] + } else if matchesHelpFlag(args[1:]...) { + name = args[0] + } else { + return + } + + if p.DefaultCommand.Name == name || p.Name == name { + p.DefaultCommand.EmitUsage(writer) + Abort(AbortDetails{}) + } + for _, command := range p.Commands { + if command.Name == name { + command.EmitUsage(writer) + Abort(AbortDetails{}) + } + } + + fmt.Fprintln(writer, formatter.F("{{red}}Unknown Command: {{bold}}%s{{/}}", name)) + fmt.Fprintln(writer, "") + p.EmitUsage(writer) + Abort(AbortDetails{ExitCode: 1}) + } + return +} + +func (p Program) EmitUsage(writer io.Writer) { + fmt.Fprintln(writer, formatter.F(p.Heading)) + fmt.Fprintln(writer, formatter.F("{{gray}}%s{{/}}", strings.Repeat("-", len(p.Heading)))) + fmt.Fprintln(writer, formatter.F("For usage information for a command, run {{bold}}%s help COMMAND{{/}}.", p.Name)) + fmt.Fprintln(writer, formatter.F("For usage information for the default command, run {{bold}}%s help %s{{/}} or {{bold}}%s help %s{{/}}.", p.Name, p.Name, p.Name, p.DefaultCommand.Name)) + fmt.Fprintln(writer, "") + fmt.Fprintln(writer, formatter.F("The following commands are available:")) + + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} or %s {{bold}}%s{{/}} - {{gray}}%s{{/}}", p.Name, p.Name, p.DefaultCommand.Name, p.DefaultCommand.Usage)) + if p.DefaultCommand.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, p.DefaultCommand.ShortDoc)) + } + + for _, command := range p.Commands { + fmt.Fprintln(writer, formatter.Fi(1, "{{bold}}%s{{/}} - {{gray}}%s{{/}}", command.Name, command.Usage)) + if command.ShortDoc != "" { + fmt.Fprintln(writer, formatter.Fi(2, command.ShortDoc)) + } + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go new file mode 100644 index 000000000..a367a1fc9 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/boostrap_templates.go @@ -0,0 +1,48 @@ +package generators + +var bootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} +` + +var agoutiBootstrapText = `package {{.Package}} + +import ( + "testing" + + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" +) + +func Test{{.FormattedName}}(t *testing.T) { + {{.GomegaPackage}}RegisterFailHandler({{.GinkgoPackage}}Fail) + {{.GinkgoPackage}}RunSpecs(t, "{{.FormattedName}} Suite") +} + +var agoutiDriver *agouti.WebDriver + +var _ = {{.GinkgoPackage}}BeforeSuite(func() { + // Choose a WebDriver: + + agoutiDriver = agouti.PhantomJS() + // agoutiDriver = agouti.Selenium() + // agoutiDriver = agouti.ChromeDriver() + + {{.GomegaPackage}}Expect(agoutiDriver.Start()).To({{.GomegaPackage}}Succeed()) +}) + +var _ = {{.GinkgoPackage}}AfterSuite(func() { + {{.GomegaPackage}}Expect(agoutiDriver.Stop()).To({{.GomegaPackage}}Succeed()) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go new file mode 100644 index 000000000..73aff0b7a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/bootstrap_command.go @@ -0,0 +1,133 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildBootstrapCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, bootstrap will generate a bootstrap file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, bootstrap will generate a bootstrap test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, bootstrap will generate a bootstrap test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the bootstrap template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the bootstrap template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "bootstrap", + Usage: "ginkgo bootstrap", + ShortDoc: "Bootstrap a test suite for the current package", + Documentation: `Tests written in Ginkgo and Gomega require a small amount of boilerplate to hook into Go's testing infrastructure. + +{{bold}}ginkgo bootstrap{{/}} generates this boilerplate for you in a file named X_suite_test.go where X is the name of the package under test.`, + DocLink: "generators", + Flags: flags, + Command: func(_ []string, _ []string) { + generateBootstrap(conf) + }, + } +} + +type bootstrapData struct { + Package string + FormattedName string + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateBootstrap(conf GeneratorsConfig) { + packageName, bootstrapFilePrefix, formattedName := getPackageAndFormattedName() + + data := bootstrapData{ + Package: determinePackageName(packageName, conf.Internal), + FormattedName: formattedName, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_suite_test.go", bootstrapFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test suite bootstrap for %s in:\n\t%s\n", packageName, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom bootstrap file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom boostrap data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiBootstrapText + } else { + templateText = bootstrapText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + bootstrapTemplate, err := template.New("bootstrap").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to parse bootstrap template:", err) + + buf := &bytes.Buffer{} + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = bootstrapTemplate.Execute(buf, data) + command.AbortIfError("Failed to render bootstrap template:", err) + + buf.WriteTo(f) + + internal.GoFmt(targetFile) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go new file mode 100644 index 000000000..48d23f919 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -0,0 +1,259 @@ +package generators + +import ( + "bytes" + "encoding/json" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + "text/template" + + sprig "github.com/go-task/slim-sprig" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildGenerateCommand() command.Command { + conf := GeneratorsConfig{} + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "agouti", KeyPath: "Agouti", + Usage: "If set, generate will create a test file for writing Agouti tests"}, + {Name: "nodot", KeyPath: "NoDot", + Usage: "If set, generate will create a test file that does not dot-import ginkgo and gomega"}, + {Name: "internal", KeyPath: "Internal", + Usage: "If set, generate will create a test file that uses the regular package name (i.e. `package X`, not `package X_test`)"}, + {Name: "template", KeyPath: "CustomTemplate", + UsageArgument: "template-file", + Usage: "If specified, generate will use the contents of the file passed as the test file template"}, + {Name: "template-data", KeyPath: "CustomTemplateData", + UsageArgument: "template-data-file", + Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + }, + &conf, + types.GinkgoFlagSections{}, + ) + + if err != nil { + panic(err) + } + + return command.Command{ + Name: "generate", + Usage: "ginkgo generate ", + ShortDoc: "Generate a test file named _test.go", + Documentation: `If the optional argument is omitted, a file named after the package in the current directory will be created. + +You can pass multiple to generate multiple files simultaneously. The resulting files are named _test.go. + +You can also pass a of the form "file.go" and generate will emit "file_test.go".`, + DocLink: "generators", + Flags: flags, + Command: func(args []string, _ []string) { + generateTestFiles(conf, args) + }, + } +} + +type specData struct { + Package string + Subject string + PackageImportPath string + ImportPackage bool + + GinkgoImport string + GomegaImport string + GinkgoPackage string + GomegaPackage string + CustomData map[string]any +} + +func generateTestFiles(conf GeneratorsConfig, args []string) { + subjects := args + if len(subjects) == 0 { + subjects = []string{""} + } + for _, subject := range subjects { + generateTestFileForSubject(subject, conf) + } +} + +func generateTestFileForSubject(subject string, conf GeneratorsConfig) { + packageName, specFilePrefix, formattedName := getPackageAndFormattedName() + if subject != "" { + specFilePrefix = formatSubject(subject) + formattedName = prettifyName(specFilePrefix) + } + + if conf.Internal { + specFilePrefix = specFilePrefix + "_internal" + } + + data := specData{ + Package: determinePackageName(packageName, conf.Internal), + Subject: formattedName, + PackageImportPath: getPackageImportPath(), + ImportPackage: !conf.Internal, + + GinkgoImport: `. "github.com/onsi/ginkgo/v2"`, + GomegaImport: `. "github.com/onsi/gomega"`, + GinkgoPackage: "", + GomegaPackage: "", + } + + if conf.NoDot { + data.GinkgoImport = `"github.com/onsi/ginkgo/v2"` + data.GomegaImport = `"github.com/onsi/gomega"` + data.GinkgoPackage = `ginkgo.` + data.GomegaPackage = `gomega.` + } + + targetFile := fmt.Sprintf("%s_test.go", specFilePrefix) + if internal.FileExists(targetFile) { + command.AbortWith("{{bold}}%s{{/}} already exists", targetFile) + } else { + fmt.Printf("Generating ginkgo test for %s in:\n %s\n", data.Subject, targetFile) + } + + f, err := os.Create(targetFile) + command.AbortIfError("Failed to create test file:", err) + defer f.Close() + + var templateText string + if conf.CustomTemplate != "" { + tpl, err := os.ReadFile(conf.CustomTemplate) + command.AbortIfError("Failed to read custom template file:", err) + templateText = string(tpl) + if conf.CustomTemplateData != "" { + var tplCustomDataMap map[string]any + tplCustomData, err := os.ReadFile(conf.CustomTemplateData) + command.AbortIfError("Failed to read custom template data file:", err) + if !json.Valid([]byte(tplCustomData)) { + command.AbortWith("Invalid JSON object in custom data file.") + } + //create map from the custom template data + json.Unmarshal(tplCustomData, &tplCustomDataMap) + data.CustomData = tplCustomDataMap + } + } else if conf.Agouti { + templateText = agoutiSpecText + } else { + templateText = specText + } + + //Setting the option to explicitly fail if template is rendered trying to access missing key + specTemplate, err := template.New("spec").Funcs(sprig.TxtFuncMap()).Option("missingkey=error").Parse(templateText) + command.AbortIfError("Failed to read parse test template:", err) + + //Being explicit about failing sooner during template rendering + //when accessing custom data rather than during the go fmt command + err = specTemplate.Execute(f, data) + command.AbortIfError("Failed to render bootstrap template:", err) + internal.GoFmt(targetFile) +} + +func formatSubject(name string) string { + name = strings.ReplaceAll(name, "-", "_") + name = strings.ReplaceAll(name, " ", "_") + name = strings.Split(name, ".go")[0] + name = strings.Split(name, "_test")[0] + return name +} + +// moduleName returns module name from go.mod from given module root directory +func moduleName(modRoot string) string { + modFile, err := os.Open(filepath.Join(modRoot, "go.mod")) + if err != nil { + return "" + } + + mod := make([]byte, 128) + _, err = modFile.Read(mod) + if err != nil { + return "" + } + + slashSlash := []byte("//") + moduleStr := []byte("module") + + for len(mod) > 0 { + line := mod + mod = nil + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, mod = line[:i], line[i+1:] + } + if i := bytes.Index(line, slashSlash); i >= 0 { + line = line[:i] + } + line = bytes.TrimSpace(line) + if !bytes.HasPrefix(line, moduleStr) { + continue + } + line = line[len(moduleStr):] + n := len(line) + line = bytes.TrimSpace(line) + if len(line) == n || len(line) == 0 { + continue + } + + if line[0] == '"' || line[0] == '`' { + p, err := strconv.Unquote(string(line)) + if err != nil { + return "" // malformed quoted string or multiline module path + } + return p + } + + return string(line) + } + + return "" // missing module path +} + +func findModuleRoot(dir string) (root string) { + dir = filepath.Clean(dir) + + // Look for enclosing go.mod. + for { + if fi, err := os.Stat(filepath.Join(dir, "go.mod")); err == nil && !fi.IsDir() { + return dir + } + d := filepath.Dir(dir) + if d == dir { + break + } + dir = d + } + return "" +} + +func getPackageImportPath() string { + workingDir, err := os.Getwd() + if err != nil { + panic(err.Error()) + } + + sep := string(filepath.Separator) + + // Try go.mod file first + modRoot := findModuleRoot(workingDir) + if modRoot != "" { + modName := moduleName(modRoot) + if modName != "" { + cd := strings.ReplaceAll(workingDir, modRoot, "") + cd = strings.ReplaceAll(cd, sep, "/") + return modName + cd + } + } + + // Fallback to GOPATH structure + paths := strings.Split(workingDir, sep+"src"+sep) + if len(paths) == 1 { + fmt.Printf("\nCouldn't identify package import path.\n\n\tginkgo generate\n\nMust be run within a package directory under $GOPATH/src/...\nYou're going to have to change UNKNOWN_PACKAGE_PATH in the generated file...\n\n") + return "UNKNOWN_PACKAGE_PATH" + } + return filepath.ToSlash(paths[len(paths)-1]) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go new file mode 100644 index 000000000..c3470adbf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -0,0 +1,41 @@ +package generators + +var specText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + +}) +` + +var agoutiSpecText = `package {{.Package}} + +import ( + {{.GinkgoImport}} + {{.GomegaImport}} + "github.com/sclevine/agouti" + . "github.com/sclevine/agouti/matchers" + + {{if .ImportPackage}}"{{.PackageImportPath}}"{{end}} +) + +var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { + var page *agouti.Page + + {{.GinkgoPackage}}BeforeEach(func() { + var err error + page, err = agoutiDriver.NewPage() + {{.GomegaPackage}}Expect(err).NotTo({{.GomegaPackage}}HaveOccurred()) + }) + + {{.GinkgoPackage}}AfterEach(func() { + {{.GomegaPackage}}Expect(page.Destroy()).To({{.GomegaPackage}}Succeed()) + }) +}) +` diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go new file mode 100644 index 000000000..3046a4487 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -0,0 +1,64 @@ +package generators + +import ( + "go/build" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +type GeneratorsConfig struct { + Agouti, NoDot, Internal bool + CustomTemplate string + CustomTemplateData string +} + +func getPackageAndFormattedName() (string, string, string) { + path, err := os.Getwd() + command.AbortIfError("Could not get current working directory:", err) + + dirName := strings.ReplaceAll(filepath.Base(path), "-", "_") + dirName = strings.ReplaceAll(dirName, " ", "_") + + pkg, err := build.ImportDir(path, 0) + packageName := pkg.Name + if err != nil { + packageName = ensureLegalPackageName(dirName) + } + + formattedName := prettifyName(filepath.Base(path)) + return packageName, dirName, formattedName +} + +func ensureLegalPackageName(name string) string { + if name == "_" { + return "underscore" + } + if len(name) == 0 { + return "empty" + } + n, isDigitErr := strconv.Atoi(string(name[0])) + if isDigitErr == nil { + return []string{"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"}[n] + name[1:] + } + return name +} + +func prettifyName(name string) string { + name = strings.ReplaceAll(name, "-", " ") + name = strings.ReplaceAll(name, "_", " ") + name = strings.Title(name) + name = strings.ReplaceAll(name, " ", "") + return name +} + +func determinePackageName(name string, internal bool) string { + if internal { + return name + } + + return name + "_test" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go new file mode 100644 index 000000000..86da7340d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/compile.go @@ -0,0 +1,161 @@ +package internal + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +func CompileSuite(suite TestSuite, goFlagsConfig types.GoFlagsConfig) TestSuite { + if suite.PathToCompiledTest != "" { + return suite + } + + suite.CompilationError = nil + + path, err := filepath.Abs(filepath.Join(suite.Path, suite.PackageName+".test")) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compute compilation target path:\n%s", err.Error()) + return suite + } + + ginkgoInvocationPath, _ := os.Getwd() + ginkgoInvocationPath, _ = filepath.Abs(ginkgoInvocationPath) + packagePath := suite.AbsPath() + pathToInvocationPath, err := filepath.Rel(packagePath, ginkgoInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to get relative path from package to the current working directory:\n%s", err.Error()) + return suite + } + args, err := types.GenerateGoTestCompileArgs(goFlagsConfig, path, "./", pathToInvocationPath) + if err != nil { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to generate go test compile flags:\n%s", err.Error()) + return suite + } + + cmd := exec.Command("go", args...) + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + if len(output) > 0 { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\n\n%s", suite.PackageName, output) + } else { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s\n%s", suite.PackageName, err.Error()) + } + return suite + } + + if strings.Contains(string(output), "[no test files]") { + suite.State = TestSuiteStateSkippedDueToEmptyCompilation + return suite + } + + if len(output) > 0 { + fmt.Println(string(output)) + } + + if !FileExists(path) { + suite.State = TestSuiteStateFailedToCompile + suite.CompilationError = fmt.Errorf("Failed to compile %s:\nOutput file %s could not be found", suite.PackageName, path) + return suite + } + + suite.State = TestSuiteStateCompiled + suite.PathToCompiledTest = path + return suite +} + +func Cleanup(goFlagsConfig types.GoFlagsConfig, suites ...TestSuite) { + if goFlagsConfig.BinaryMustBePreserved() { + return + } + for _, suite := range suites { + if !suite.Precompiled { + os.Remove(suite.PathToCompiledTest) + } + } +} + +type parallelSuiteBundle struct { + suite TestSuite + compiled chan TestSuite +} + +type OrderedParallelCompiler struct { + mutex *sync.Mutex + stopped bool + numCompilers int + + idx int + numSuites int + completionChannels []chan TestSuite +} + +func NewOrderedParallelCompiler(numCompilers int) *OrderedParallelCompiler { + return &OrderedParallelCompiler{ + mutex: &sync.Mutex{}, + numCompilers: numCompilers, + } +} + +func (opc *OrderedParallelCompiler) StartCompiling(suites TestSuites, goFlagsConfig types.GoFlagsConfig) { + opc.stopped = false + opc.idx = 0 + opc.numSuites = len(suites) + opc.completionChannels = make([]chan TestSuite, opc.numSuites) + + toCompile := make(chan parallelSuiteBundle, opc.numCompilers) + for compiler := 0; compiler < opc.numCompilers; compiler++ { + go func() { + for bundle := range toCompile { + c, suite := bundle.compiled, bundle.suite + opc.mutex.Lock() + stopped := opc.stopped + opc.mutex.Unlock() + if !stopped { + suite = CompileSuite(suite, goFlagsConfig) + } + c <- suite + } + }() + } + + for idx, suite := range suites { + opc.completionChannels[idx] = make(chan TestSuite, 1) + toCompile <- parallelSuiteBundle{suite, opc.completionChannels[idx]} + if idx == 0 { //compile first suite serially + suite = <-opc.completionChannels[0] + opc.completionChannels[0] <- suite + } + } + + close(toCompile) +} + +func (opc *OrderedParallelCompiler) Next() (int, TestSuite) { + if opc.idx >= opc.numSuites { + return opc.numSuites, TestSuite{} + } + + idx := opc.idx + suite := <-opc.completionChannels[idx] + opc.idx = opc.idx + 1 + + return idx, suite +} + +func (opc *OrderedParallelCompiler) StopAndDrain() { + opc.mutex.Lock() + opc.stopped = true + opc.mutex.Unlock() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go new file mode 100644 index 000000000..bd3c6d028 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/profiles_and_reports.go @@ -0,0 +1,237 @@ +package internal + +import ( + "bytes" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strconv" + + "github.com/google/pprof/profile" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func AbsPathForGeneratedAsset(assetName string, suite TestSuite, cliConfig types.CLIConfig, process int) string { + suffix := "" + if process != 0 { + suffix = fmt.Sprintf(".%d", process) + } + if cliConfig.OutputDir == "" { + return filepath.Join(suite.AbsPath(), assetName+suffix) + } + outputDir, _ := filepath.Abs(cliConfig.OutputDir) + return filepath.Join(outputDir, suite.NamespacedName()+"_"+assetName+suffix) +} + +func FinalizeProfilesAndReportsForSuites(suites TestSuites, cliConfig types.CLIConfig, suiteConfig types.SuiteConfig, reporterConfig types.ReporterConfig, goFlagsConfig types.GoFlagsConfig) ([]string, error) { + messages := []string{} + suitesWithProfiles := suites.WithState(TestSuiteStatePassed, TestSuiteStateFailed) //anything else won't have actually run and generated a profile + + // merge cover profiles if need be + if goFlagsConfig.Cover && !cliConfig.KeepSeparateCoverprofiles { + coverProfiles := []string{} + for _, suite := range suitesWithProfiles { + if !suite.HasProgrammaticFocus { + coverProfiles = append(coverProfiles, AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0)) + } + } + + if len(coverProfiles) > 0 { + dst := goFlagsConfig.CoverProfile + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, goFlagsConfig.CoverProfile) + } + err := MergeAndCleanupCoverProfiles(coverProfiles, dst) + if err != nil { + return messages, err + } + coverage, err := GetCoverageFromCoverProfile(dst) + if err != nil { + return messages, err + } + if coverage == 0 { + messages = append(messages, "composite coverage: [no statements]") + } else if suitesWithProfiles.AnyHaveProgrammaticFocus() { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements however some suites did not contribute because they included programatically focused specs", coverage)) + } else { + messages = append(messages, fmt.Sprintf("composite coverage: %.1f%% of statements", coverage)) + } + } else { + messages = append(messages, "no composite coverage computed: all suites included programatically focused specs") + } + } + + // copy binaries if need be + for _, suite := range suitesWithProfiles { + if goFlagsConfig.BinaryMustBePreserved() && cliConfig.OutputDir != "" { + src := suite.PathToCompiledTest + dst := filepath.Join(cliConfig.OutputDir, suite.NamespacedName()+".test") + if suite.Precompiled { + if err := CopyFile(src, dst); err != nil { + return messages, err + } + } else { + if err := os.Rename(src, dst); err != nil { + return messages, err + } + } + } + } + + type reportFormat struct { + ReportName string + GenerateFunc func(types.Report, string) error + MergeFunc func([]string, string) ([]string, error) + } + reportFormats := []reportFormat{} + if reporterConfig.JSONReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JSONReport, GenerateFunc: reporters.GenerateJSONReport, MergeFunc: reporters.MergeAndCleanupJSONReports}) + } + if reporterConfig.JUnitReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.JUnitReport, GenerateFunc: reporters.GenerateJUnitReport, MergeFunc: reporters.MergeAndCleanupJUnitReports}) + } + if reporterConfig.TeamcityReport != "" { + reportFormats = append(reportFormats, reportFormat{ReportName: reporterConfig.TeamcityReport, GenerateFunc: reporters.GenerateTeamcityReport, MergeFunc: reporters.MergeAndCleanupTeamcityReports}) + } + + // Generate reports for suites that failed to run + reportableSuites := suites.ThatAreGinkgoSuites() + for _, suite := range reportableSuites.WithState(TestSuiteStateFailedToCompile, TestSuiteStateFailedDueToTimeout, TestSuiteStateSkippedDueToPriorFailures, TestSuiteStateSkippedDueToEmptyCompilation) { + report := types.Report{ + SuitePath: suite.AbsPath(), + SuiteConfig: suiteConfig, + SuiteSucceeded: false, + } + switch suite.State { + case TestSuiteStateFailedToCompile: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, suite.CompilationError.Error()) + case TestSuiteStateFailedDueToTimeout: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, TIMEOUT_ELAPSED_FAILURE_REASON) + case TestSuiteStateSkippedDueToPriorFailures: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, PRIOR_FAILURES_FAILURE_REASON) + case TestSuiteStateSkippedDueToEmptyCompilation: + report.SpecialSuiteFailureReasons = append(report.SpecialSuiteFailureReasons, EMPTY_SKIP_FAILURE_REASON) + report.SuiteSucceeded = true + } + + for _, format := range reportFormats { + format.GenerateFunc(report, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + } + + // Merge reports unless we've been asked to keep them separate + if !cliConfig.KeepSeparateReports { + for _, format := range reportFormats { + reports := []string{} + for _, suite := range reportableSuites { + reports = append(reports, AbsPathForGeneratedAsset(format.ReportName, suite, cliConfig, 0)) + } + dst := format.ReportName + if cliConfig.OutputDir != "" { + dst = filepath.Join(cliConfig.OutputDir, format.ReportName) + } + mergeMessages, err := format.MergeFunc(reports, dst) + messages = append(messages, mergeMessages...) + if err != nil { + return messages, err + } + } + } + + return messages, nil +} + +//loads each profile, combines them, deletes them, stores them in destination +func MergeAndCleanupCoverProfiles(profiles []string, destination string) error { + combined := &bytes.Buffer{} + modeRegex := regexp.MustCompile(`^mode: .*\n`) + for i, profile := range profiles { + contents, err := os.ReadFile(profile) + if err != nil { + return fmt.Errorf("Unable to read coverage file %s:\n%s", profile, err.Error()) + } + os.Remove(profile) + + // remove the cover mode line from every file + // except the first one + if i > 0 { + contents = modeRegex.ReplaceAll(contents, []byte{}) + } + + _, err = combined.Write(contents) + + // Add a newline to the end of every file if missing. + if err == nil && len(contents) > 0 && contents[len(contents)-1] != '\n' { + _, err = combined.Write([]byte("\n")) + } + + if err != nil { + return fmt.Errorf("Unable to append to coverprofile:\n%s", err.Error()) + } + } + + err := os.WriteFile(destination, combined.Bytes(), 0666) + if err != nil { + return fmt.Errorf("Unable to create combined cover profile:\n%s", err.Error()) + } + return nil +} + +func GetCoverageFromCoverProfile(profile string) (float64, error) { + cmd := exec.Command("go", "tool", "cover", "-func", profile) + output, err := cmd.CombinedOutput() + if err != nil { + return 0, fmt.Errorf("Could not process Coverprofile %s: %s", profile, err.Error()) + } + re := regexp.MustCompile(`total:\s*\(statements\)\s*(\d*\.\d*)\%`) + matches := re.FindStringSubmatch(string(output)) + if matches == nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage") + } + coverageString := matches[1] + coverage, err := strconv.ParseFloat(coverageString, 64) + if err != nil { + return 0, fmt.Errorf("Could not parse Coverprofile to compute coverage percentage: %s", err.Error()) + } + + return coverage, nil +} + +func MergeProfiles(profilePaths []string, destination string) error { + profiles := []*profile.Profile{} + for _, profilePath := range profilePaths { + proFile, err := os.Open(profilePath) + if err != nil { + return fmt.Errorf("Could not open profile: %s\n%s", profilePath, err.Error()) + } + prof, err := profile.Parse(proFile) + if err != nil { + return fmt.Errorf("Could not parse profile: %s\n%s", profilePath, err.Error()) + } + profiles = append(profiles, prof) + os.Remove(profilePath) + } + + mergedProfile, err := profile.Merge(profiles) + if err != nil { + return fmt.Errorf("Could not merge profiles:\n%s", err.Error()) + } + + outFile, err := os.Create(destination) + if err != nil { + return fmt.Errorf("Could not create merged profile %s:\n%s", destination, err.Error()) + } + err = mergedProfile.Write(outFile) + if err != nil { + return fmt.Errorf("Could not write merged profile %s:\n%s", destination, err.Error()) + } + err = outFile.Close() + if err != nil { + return fmt.Errorf("Could not close merged profile %s:\n%s", destination, err.Error()) + } + + return nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go new file mode 100644 index 000000000..41052ea19 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/run.go @@ -0,0 +1,355 @@ +package internal + +import ( + "bytes" + "fmt" + "io" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "syscall" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/internal/parallel_support" + "github.com/onsi/ginkgo/v2/reporters" + "github.com/onsi/ginkgo/v2/types" +) + +func RunCompiledSuite(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + suite.State = TestSuiteStateFailed + suite.HasProgrammaticFocus = false + + if suite.PathToCompiledTest == "" { + return suite + } + + if suite.IsGinkgo && cliConfig.ComputedProcs() > 1 { + suite = runParallel(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else if suite.IsGinkgo { + suite = runSerial(suite, ginkgoConfig, reporterConfig, cliConfig, goFlagsConfig, additionalArgs) + } else { + suite = runGoTest(suite, cliConfig, goFlagsConfig) + } + runAfterRunHook(cliConfig.AfterRunHook, reporterConfig.NoColor, suite) + return suite +} + +func buildAndStartCommand(suite TestSuite, args []string, pipeToStdout bool) (*exec.Cmd, *bytes.Buffer) { + buf := &bytes.Buffer{} + cmd := exec.Command(suite.PathToCompiledTest, args...) + cmd.Dir = suite.Path + if pipeToStdout { + cmd.Stderr = io.MultiWriter(os.Stdout, buf) + cmd.Stdout = os.Stdout + } else { + cmd.Stderr = buf + cmd.Stdout = buf + } + err := cmd.Start() + command.AbortIfError("Failed to start test suite", err) + + return cmd, buf +} + +func checkForNoTestsWarning(buf *bytes.Buffer) bool { + if strings.Contains(buf.String(), "warning: no tests to run") { + fmt.Fprintf(os.Stderr, `Found no test suites, did you forget to run "ginkgo bootstrap"?`) + return true + } + return false +} + +func runGoTest(suite TestSuite, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig) TestSuite { + // As we run the go test from the suite directory, make sure the cover profile is absolute + // and placed into the expected output directory when one is configured. + if goFlagsConfig.Cover && !filepath.IsAbs(goFlagsConfig.CoverProfile) { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + + args, err := types.GenerateGoTestRunArgs(goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + return suite +} + +func runSerial(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + if goFlagsConfig.Cover { + goFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + } + if goFlagsConfig.BlockProfile != "" { + goFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + } + if goFlagsConfig.CPUProfile != "" { + goFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MemProfile != "" { + goFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + } + if goFlagsConfig.MutexProfile != "" { + goFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + } + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + args, err := types.GenerateGinkgoTestRunArgs(ginkgoConfig, reporterConfig, goFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, true) + + cmd.Wait() + + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + suite.HasProgrammaticFocus = (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed := (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE) + passed = !(checkForNoTestsWarning(buf) && cliConfig.RequireSuite) && passed + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + if suite.HasProgrammaticFocus { + if goFlagsConfig.Cover { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } + if goFlagsConfig.BlockProfile != "" { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } + if goFlagsConfig.CPUProfile != "" { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MemProfile != "" { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } + if goFlagsConfig.MutexProfile != "" { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } + } + + return suite +} + +func runParallel(suite TestSuite, ginkgoConfig types.SuiteConfig, reporterConfig types.ReporterConfig, cliConfig types.CLIConfig, goFlagsConfig types.GoFlagsConfig, additionalArgs []string) TestSuite { + type procResult struct { + passed bool + hasProgrammaticFocus bool + } + + numProcs := cliConfig.ComputedProcs() + procOutput := make([]*bytes.Buffer, numProcs) + coverProfiles := []string{} + + blockProfiles := []string{} + cpuProfiles := []string{} + memProfiles := []string{} + mutexProfiles := []string{} + + procResults := make(chan procResult) + + server, err := parallel_support.NewServer(numProcs, reporters.NewDefaultReporter(reporterConfig, formatter.ColorableStdOut)) + command.AbortIfError("Failed to start parallel spec server", err) + server.Start() + defer server.Close() + + if reporterConfig.JSONReport != "" { + reporterConfig.JSONReport = AbsPathForGeneratedAsset(reporterConfig.JSONReport, suite, cliConfig, 0) + } + if reporterConfig.JUnitReport != "" { + reporterConfig.JUnitReport = AbsPathForGeneratedAsset(reporterConfig.JUnitReport, suite, cliConfig, 0) + } + if reporterConfig.TeamcityReport != "" { + reporterConfig.TeamcityReport = AbsPathForGeneratedAsset(reporterConfig.TeamcityReport, suite, cliConfig, 0) + } + + for proc := 1; proc <= numProcs; proc++ { + procGinkgoConfig := ginkgoConfig + procGinkgoConfig.ParallelProcess, procGinkgoConfig.ParallelTotal, procGinkgoConfig.ParallelHost = proc, numProcs, server.Address() + + procGoFlagsConfig := goFlagsConfig + if goFlagsConfig.Cover { + procGoFlagsConfig.CoverProfile = AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, proc) + coverProfiles = append(coverProfiles, procGoFlagsConfig.CoverProfile) + } + if goFlagsConfig.BlockProfile != "" { + procGoFlagsConfig.BlockProfile = AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, proc) + blockProfiles = append(blockProfiles, procGoFlagsConfig.BlockProfile) + } + if goFlagsConfig.CPUProfile != "" { + procGoFlagsConfig.CPUProfile = AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, proc) + cpuProfiles = append(cpuProfiles, procGoFlagsConfig.CPUProfile) + } + if goFlagsConfig.MemProfile != "" { + procGoFlagsConfig.MemProfile = AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, proc) + memProfiles = append(memProfiles, procGoFlagsConfig.MemProfile) + } + if goFlagsConfig.MutexProfile != "" { + procGoFlagsConfig.MutexProfile = AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, proc) + mutexProfiles = append(mutexProfiles, procGoFlagsConfig.MutexProfile) + } + + args, err := types.GenerateGinkgoTestRunArgs(procGinkgoConfig, reporterConfig, procGoFlagsConfig) + command.AbortIfError("Failed to generate test run arguments", err) + args = append([]string{"--test.timeout=0"}, args...) + args = append(args, additionalArgs...) + + cmd, buf := buildAndStartCommand(suite, args, false) + procOutput[proc-1] = buf + server.RegisterAlive(proc, func() bool { return cmd.ProcessState == nil || !cmd.ProcessState.Exited() }) + + go func() { + cmd.Wait() + exitStatus := cmd.ProcessState.Sys().(syscall.WaitStatus).ExitStatus() + procResults <- procResult{ + passed: (exitStatus == 0) || (exitStatus == types.GINKGO_FOCUS_EXIT_CODE), + hasProgrammaticFocus: exitStatus == types.GINKGO_FOCUS_EXIT_CODE, + } + }() + } + + passed := true + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + result := <-procResults + passed = passed && result.passed + suite.HasProgrammaticFocus = suite.HasProgrammaticFocus || result.hasProgrammaticFocus + } + if passed { + suite.State = TestSuiteStatePassed + } else { + suite.State = TestSuiteStateFailed + } + + select { + case <-server.GetSuiteDone(): + fmt.Println("") + case <-time.After(time.Second): + //one of the nodes never finished reporting to the server. Something must have gone wrong. + fmt.Fprint(formatter.ColorableStdErr, formatter.F("\n{{bold}}{{red}}Ginkgo timed out waiting for all parallel procs to report back{{/}}\n")) + fmt.Fprint(formatter.ColorableStdErr, formatter.F("{{gray}}Test suite:{{/}} %s (%s)\n\n", suite.PackageName, suite.Path)) + fmt.Fprint(formatter.ColorableStdErr, formatter.Fiw(0, formatter.COLS, "This occurs if a parallel process exits before it reports its results to the Ginkgo CLI. The CLI will now print out all the stdout/stderr output it's collected from the running processes. However you may not see anything useful in these logs because the individual test processes usually intercept output to stdout/stderr in order to capture it in the spec reports.\n\nYou may want to try rerunning your test suite with {{light-gray}}--output-interceptor-mode=none{{/}} to see additional output here and debug your suite.\n")) + fmt.Fprintln(formatter.ColorableStdErr, " ") + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + fmt.Fprintf(formatter.ColorableStdErr, formatter.F("{{bold}}Output from proc %d:{{/}}\n", proc)) + fmt.Fprintln(os.Stderr, formatter.Fi(1, "%s", procOutput[proc-1].String())) + } + fmt.Fprintf(os.Stderr, "** End **") + } + + for proc := 1; proc <= cliConfig.ComputedProcs(); proc++ { + output := procOutput[proc-1].String() + if proc == 1 && checkForNoTestsWarning(procOutput[0]) && cliConfig.RequireSuite { + suite.State = TestSuiteStateFailed + } + if strings.Contains(output, "deprecated Ginkgo functionality") { + fmt.Fprintln(os.Stderr, output) + } + } + + if len(coverProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "coverage: no coverfile was generated because specs are programmatically focused") + } else { + coverProfile := AbsPathForGeneratedAsset(goFlagsConfig.CoverProfile, suite, cliConfig, 0) + err := MergeAndCleanupCoverProfiles(coverProfiles, coverProfile) + command.AbortIfError("Failed to combine cover profiles", err) + + coverage, err := GetCoverageFromCoverProfile(coverProfile) + command.AbortIfError("Failed to compute coverage", err) + if coverage == 0 { + fmt.Fprintln(os.Stdout, "coverage: [no statements]") + } else { + fmt.Fprintf(os.Stdout, "coverage: %.1f%% of statements\n", coverage) + } + } + } + if len(blockProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no block profile was generated because specs are programmatically focused") + } else { + blockProfile := AbsPathForGeneratedAsset(goFlagsConfig.BlockProfile, suite, cliConfig, 0) + err := MergeProfiles(blockProfiles, blockProfile) + command.AbortIfError("Failed to combine blockprofiles", err) + } + } + if len(cpuProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no cpu profile was generated because specs are programmatically focused") + } else { + cpuProfile := AbsPathForGeneratedAsset(goFlagsConfig.CPUProfile, suite, cliConfig, 0) + err := MergeProfiles(cpuProfiles, cpuProfile) + command.AbortIfError("Failed to combine cpuprofiles", err) + } + } + if len(memProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mem profile was generated because specs are programmatically focused") + } else { + memProfile := AbsPathForGeneratedAsset(goFlagsConfig.MemProfile, suite, cliConfig, 0) + err := MergeProfiles(memProfiles, memProfile) + command.AbortIfError("Failed to combine memprofiles", err) + } + } + if len(mutexProfiles) > 0 { + if suite.HasProgrammaticFocus { + fmt.Fprintln(os.Stdout, "no mutex profile was generated because specs are programmatically focused") + } else { + mutexProfile := AbsPathForGeneratedAsset(goFlagsConfig.MutexProfile, suite, cliConfig, 0) + err := MergeProfiles(mutexProfiles, mutexProfile) + command.AbortIfError("Failed to combine mutexprofiles", err) + } + } + + return suite +} + +func runAfterRunHook(command string, noColor bool, suite TestSuite) { + if command == "" { + return + } + f := formatter.NewWithNoColorBool(noColor) + + // Allow for string replacement to pass input to the command + passed := "[FAIL]" + if suite.State.Is(TestSuiteStatePassed) { + passed = "[PASS]" + } + command = strings.ReplaceAll(command, "(ginkgo-suite-passed)", passed) + command = strings.ReplaceAll(command, "(ginkgo-suite-name)", suite.PackageName) + + // Must break command into parts + splitArgs := regexp.MustCompile(`'.+'|".+"|\S+`) + parts := splitArgs.FindAllString(command, -1) + + output, err := exec.Command(parts[0], parts[1:]...).CombinedOutput() + if err != nil { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{red}}{{bold}}After-run-hook failed:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{red}}%s{{/}}", output)) + } else { + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(0, "{{green}}{{bold}}After-run-hook succeeded:{{/}}")) + fmt.Fprintln(formatter.ColorableStdOut, f.Fi(1, "{{green}}%s{{/}}", output)) + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go new file mode 100644 index 000000000..64dcb1b78 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/test_suite.go @@ -0,0 +1,283 @@ +package internal + +import ( + "errors" + "math/rand" + "os" + "path" + "path/filepath" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/types" +) + +const TIMEOUT_ELAPSED_FAILURE_REASON = "Suite did not run because the timeout elapsed" +const PRIOR_FAILURES_FAILURE_REASON = "Suite did not run because prior suites failed and --keep-going is not set" +const EMPTY_SKIP_FAILURE_REASON = "Suite did not run go test reported that no test files were found" + +type TestSuiteState uint + +const ( + TestSuiteStateInvalid TestSuiteState = iota + + TestSuiteStateUncompiled + TestSuiteStateCompiled + + TestSuiteStatePassed + + TestSuiteStateSkippedDueToEmptyCompilation + TestSuiteStateSkippedByFilter + TestSuiteStateSkippedDueToPriorFailures + + TestSuiteStateFailed + TestSuiteStateFailedDueToTimeout + TestSuiteStateFailedToCompile +) + +var TestSuiteStateFailureStates = []TestSuiteState{TestSuiteStateFailed, TestSuiteStateFailedDueToTimeout, TestSuiteStateFailedToCompile} + +func (state TestSuiteState) Is(states ...TestSuiteState) bool { + for _, suiteState := range states { + if suiteState == state { + return true + } + } + + return false +} + +type TestSuite struct { + Path string + PackageName string + IsGinkgo bool + + Precompiled bool + PathToCompiledTest string + CompilationError error + + HasProgrammaticFocus bool + State TestSuiteState +} + +func (ts TestSuite) AbsPath() string { + path, _ := filepath.Abs(ts.Path) + return path +} + +func (ts TestSuite) NamespacedName() string { + name := relPath(ts.Path) + name = strings.TrimLeft(name, "."+string(filepath.Separator)) + name = strings.ReplaceAll(name, string(filepath.Separator), "_") + name = strings.ReplaceAll(name, " ", "_") + if name == "" { + return ts.PackageName + } + return name +} + +type TestSuites []TestSuite + +func (ts TestSuites) AnyHaveProgrammaticFocus() bool { + for _, suite := range ts { + if suite.HasProgrammaticFocus { + return true + } + } + + return false +} + +func (ts TestSuites) ThatAreGinkgoSuites() TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.IsGinkgo { + out = append(out, suite) + } + } + return out +} + +func (ts TestSuites) CountWithState(states ...TestSuiteState) int { + n := 0 + for _, suite := range ts { + if suite.State.Is(states...) { + n += 1 + } + } + + return n +} + +func (ts TestSuites) WithState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) WithoutState(states ...TestSuiteState) TestSuites { + out := TestSuites{} + for _, suite := range ts { + if !suite.State.Is(states...) { + out = append(out, suite) + } + } + + return out +} + +func (ts TestSuites) ShuffledCopy(seed int64) TestSuites { + out := make(TestSuites, len(ts)) + permutation := rand.New(rand.NewSource(seed)).Perm(len(ts)) + for i, j := range permutation { + out[i] = ts[j] + } + return out +} + +func FindSuites(args []string, cliConfig types.CLIConfig, allowPrecompiled bool) TestSuites { + suites := TestSuites{} + + if len(args) > 0 { + for _, arg := range args { + if allowPrecompiled { + suite, err := precompiledTestSuite(arg) + if err == nil { + suites = append(suites, suite) + continue + } + } + recurseForSuite := cliConfig.Recurse + if strings.HasSuffix(arg, "/...") && arg != "/..." { + arg = arg[:len(arg)-4] + recurseForSuite = true + } + suites = append(suites, suitesInDir(arg, recurseForSuite)...) + } + } else { + suites = suitesInDir(".", cliConfig.Recurse) + } + + if cliConfig.SkipPackage != "" { + skipFilters := strings.Split(cliConfig.SkipPackage, ",") + for idx := range suites { + for _, skipFilter := range skipFilters { + if strings.Contains(suites[idx].Path, skipFilter) { + suites[idx].State = TestSuiteStateSkippedByFilter + break + } + } + } + } + + return suites +} + +func precompiledTestSuite(path string) (TestSuite, error) { + info, err := os.Stat(path) + if err != nil { + return TestSuite{}, err + } + + if info.IsDir() { + return TestSuite{}, errors.New("this is a directory, not a file") + } + + if filepath.Ext(path) != ".test" && filepath.Ext(path) != ".exe" { + return TestSuite{}, errors.New("this is not a .test binary") + } + + if filepath.Ext(path) == ".test" && info.Mode()&0111 == 0 { + return TestSuite{}, errors.New("this is not executable") + } + + dir := relPath(filepath.Dir(path)) + packageName := strings.TrimSuffix(filepath.Base(path), ".exe") + packageName = strings.TrimSuffix(packageName, ".test") + + path, err = filepath.Abs(path) + if err != nil { + return TestSuite{}, err + } + + return TestSuite{ + Path: dir, + PackageName: packageName, + IsGinkgo: true, + Precompiled: true, + PathToCompiledTest: path, + State: TestSuiteStateCompiled, + }, nil +} + +func suitesInDir(dir string, recurse bool) TestSuites { + suites := TestSuites{} + + if path.Base(dir) == "vendor" { + return suites + } + + files, _ := os.ReadDir(dir) + re := regexp.MustCompile(`^[^._].*_test\.go$`) + for _, file := range files { + if !file.IsDir() && re.Match([]byte(file.Name())) { + suite := TestSuite{ + Path: relPath(dir), + PackageName: packageNameForSuite(dir), + IsGinkgo: filesHaveGinkgoSuite(dir, files), + State: TestSuiteStateUncompiled, + } + suites = append(suites, suite) + break + } + } + + if recurse { + re = regexp.MustCompile(`^[._]`) + for _, file := range files { + if file.IsDir() && !re.Match([]byte(file.Name())) { + suites = append(suites, suitesInDir(dir+"/"+file.Name(), recurse)...) + } + } + } + + return suites +} + +func relPath(dir string) string { + dir, _ = filepath.Abs(dir) + cwd, _ := os.Getwd() + dir, _ = filepath.Rel(cwd, filepath.Clean(dir)) + + if string(dir[0]) != "." { + dir = "." + string(filepath.Separator) + dir + } + + return dir +} + +func packageNameForSuite(dir string) string { + path, _ := filepath.Abs(dir) + return filepath.Base(path) +} + +func filesHaveGinkgoSuite(dir string, files []os.DirEntry) bool { + reTestFile := regexp.MustCompile(`_test\.go$`) + reGinkgo := regexp.MustCompile(`package ginkgo|\/ginkgo"|\/ginkgo\/v2"|\/ginkgo\/v2/dsl/`) + + for _, file := range files { + if !file.IsDir() && reTestFile.Match([]byte(file.Name())) { + contents, _ := os.ReadFile(dir + "/" + file.Name()) + if reGinkgo.Match(contents) { + return true + } + } + } + + return false +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go new file mode 100644 index 000000000..bd9ca7d51 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/utils.go @@ -0,0 +1,86 @@ +package internal + +import ( + "fmt" + "io" + "os" + "os/exec" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func FileExists(path string) bool { + _, err := os.Stat(path) + return err == nil +} + +func CopyFile(src string, dest string) error { + srcFile, err := os.Open(src) + if err != nil { + return err + } + + srcStat, err := srcFile.Stat() + if err != nil { + return err + } + + if _, err := os.Stat(dest); err == nil { + os.Remove(dest) + } + + destFile, err := os.OpenFile(dest, os.O_WRONLY|os.O_CREATE, srcStat.Mode()) + if err != nil { + return err + } + + _, err = io.Copy(destFile, srcFile) + if err != nil { + return err + } + + if err := srcFile.Close(); err != nil { + return err + } + return destFile.Close() +} + +func GoFmt(path string) { + out, err := exec.Command("go", "fmt", path).CombinedOutput() + if err != nil { + command.AbortIfError(fmt.Sprintf("Could not fmt:\n%s\n", string(out)), err) + } +} + +func PluralizedWord(singular, plural string, count int) string { + if count == 1 { + return singular + } + return plural +} + +func FailedSuitesReport(suites TestSuites, f formatter.Formatter) string { + out := "" + out += "There were failures detected in the following suites:\n" + + maxPackageNameLength := 0 + for _, suite := range suites.WithState(TestSuiteStateFailureStates...) { + if len(suite.PackageName) > maxPackageNameLength { + maxPackageNameLength = len(suite.PackageName) + } + } + + packageNameFormatter := fmt.Sprintf("%%%ds", maxPackageNameLength) + for _, suite := range suites { + switch suite.State { + case TestSuiteStateFailed: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedToCompile: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{magenta}}[Compilation failure]{{/}}\n", suite.PackageName, suite.Path) + case TestSuiteStateFailedDueToTimeout: + out += f.Fi(1, "{{red}}"+packageNameFormatter+" {{gray}}%s {{orange}}[%s]{{/}}\n", suite.PackageName, suite.Path, TIMEOUT_ELAPSED_FAILURE_REASON) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go new file mode 100644 index 000000000..9da1bab3d --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/internal/verify_version.go @@ -0,0 +1,54 @@ +package internal + +import ( + "fmt" + "os/exec" + "regexp" + "strings" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/types" +) + +var versiorRe = regexp.MustCompile(`v(\d+\.\d+\.\d+)`) + +func VerifyCLIAndFrameworkVersion(suites TestSuites) { + cliVersion := types.VERSION + mismatches := map[string][]string{} + + for _, suite := range suites { + cmd := exec.Command("go", "list", "-m", "github.com/onsi/ginkgo/v2") + cmd.Dir = suite.Path + output, err := cmd.CombinedOutput() + if err != nil { + continue + } + components := strings.Split(string(output), " ") + if len(components) != 2 { + continue + } + matches := versiorRe.FindStringSubmatch(components[1]) + if matches == nil || len(matches) != 2 { + continue + } + libraryVersion := matches[1] + if cliVersion != libraryVersion { + mismatches[libraryVersion] = append(mismatches[libraryVersion], suite.PackageName) + } + } + + if len(mismatches) == 0 { + return + } + + fmt.Println(formatter.F("{{red}}{{bold}}Ginkgo detected a version mismatch between the Ginkgo CLI and the version of Ginkgo imported by your packages:{{/}}")) + + fmt.Println(formatter.Fi(1, "Ginkgo CLI Version:")) + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}}", cliVersion)) + fmt.Println(formatter.Fi(1, "Mismatched package versions found:")) + for version, packages := range mismatches { + fmt.Println(formatter.Fi(2, "{{bold}}%s{{/}} used by %s", version, strings.Join(packages, ", "))) + } + fmt.Println("") + fmt.Println(formatter.Fiw(1, formatter.COLS, "{{gray}}Ginkgo will continue to attempt to run but you may see errors (including flag parsing errors) and should either update your go.mod or your version of the Ginkgo CLI to match.\n\nTo install the matching version of the CLI run\n {{bold}}go install github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file. Alternatively you can use\n {{bold}}go run github.com/onsi/ginkgo/v2/ginkgo{{/}}{{gray}}\nfrom a path that contains a go.mod file to invoke the matching version of the Ginkgo CLI.\n\nIf you are attempting to test multiple packages that each have a different version of the Ginkgo library with a single Ginkgo CLI that is currently unsupported.\n{{/}}")) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go new file mode 100644 index 000000000..6c61f09d1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/labels/labels_command.go @@ -0,0 +1,123 @@ +package labels + +import ( + "fmt" + "go/ast" + "go/parser" + "go/token" + "sort" + "strconv" + "strings" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/types" + "golang.org/x/tools/go/ast/inspector" +) + +func BuildLabelsCommand() command.Command { + var cliConfig = types.NewDefaultCLIConfig() + + flags, err := types.BuildLabelsCommandFlagSet(&cliConfig) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "labels", + Usage: "ginkgo labels ", + Flags: flags, + ShortDoc: "List labels detected in the passed-in packages (or the package in the current directory if left blank).", + DocLink: "spec-labels", + Command: func(args []string, _ []string) { + ListLabels(args, cliConfig) + }, + } +} + +func ListLabels(args []string, cliConfig types.CLIConfig) { + suites := internal.FindSuites(args, cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + for _, suite := range suites { + labels := fetchLabelsFromPackage(suite.Path) + if len(labels) == 0 { + fmt.Printf("%s: No labels found\n", suite.PackageName) + } else { + fmt.Printf("%s: [%s]\n", suite.PackageName, strings.Join(labels, ", ")) + } + } +} + +func fetchLabelsFromPackage(packagePath string) []string { + fset := token.NewFileSet() + parsedPackages, err := parser.ParseDir(fset, packagePath, nil, 0) + command.AbortIfError("Failed to parse package source:", err) + + files := []*ast.File{} + hasTestPackage := false + for key, pkg := range parsedPackages { + if strings.HasSuffix(key, "_test") { + hasTestPackage = true + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + if !hasTestPackage { + for _, pkg := range parsedPackages { + for _, file := range pkg.Files { + files = append(files, file) + } + } + } + + seen := map[string]bool{} + labels := []string{} + ispr := inspector.New(files) + ispr.Preorder([]ast.Node{&ast.CallExpr{}}, func(n ast.Node) { + potentialLabels := fetchLabels(n.(*ast.CallExpr)) + for _, label := range potentialLabels { + if !seen[label] { + seen[label] = true + labels = append(labels, strconv.Quote(label)) + } + } + }) + + sort.Strings(labels) + return labels +} + +func fetchLabels(callExpr *ast.CallExpr) []string { + out := []string{} + switch expr := callExpr.Fun.(type) { + case *ast.Ident: + if expr.Name != "Label" { + return out + } + case *ast.SelectorExpr: + if expr.Sel.Name != "Label" { + return out + } + default: + return out + } + for _, arg := range callExpr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go new file mode 100644 index 000000000..e9abb27d8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/main.go @@ -0,0 +1,58 @@ +package main + +import ( + "fmt" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/build" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/generators" + "github.com/onsi/ginkgo/v2/ginkgo/labels" + "github.com/onsi/ginkgo/v2/ginkgo/outline" + "github.com/onsi/ginkgo/v2/ginkgo/run" + "github.com/onsi/ginkgo/v2/ginkgo/unfocus" + "github.com/onsi/ginkgo/v2/ginkgo/watch" + "github.com/onsi/ginkgo/v2/types" +) + +var program command.Program + +func GenerateCommands() []command.Command { + return []command.Command{ + watch.BuildWatchCommand(), + build.BuildBuildCommand(), + generators.BuildBootstrapCommand(), + generators.BuildGenerateCommand(), + labels.BuildLabelsCommand(), + outline.BuildOutlineCommand(), + unfocus.BuildUnfocusCommand(), + BuildVersionCommand(), + } +} + +func main() { + program = command.Program{ + Name: "ginkgo", + Heading: fmt.Sprintf("Ginkgo Version %s", types.VERSION), + Commands: GenerateCommands(), + DefaultCommand: run.BuildRunCommand(), + DeprecatedCommands: []command.DeprecatedCommand{ + {Name: "convert", Deprecation: types.Deprecations.Convert()}, + {Name: "blur", Deprecation: types.Deprecations.Blur()}, + {Name: "nodot", Deprecation: types.Deprecations.Nodot()}, + }, + } + + program.RunAndExit(os.Args) +} + +func BuildVersionCommand() command.Command { + return command.Command{ + Name: "version", + Usage: "ginkgo version", + ShortDoc: "Print Ginkgo's version", + Command: func(_ []string, _ []string) { + fmt.Printf("Ginkgo Version %s\n", types.VERSION) + }, + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go new file mode 100644 index 000000000..0b9b19fe7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -0,0 +1,302 @@ +package outline + +import ( + "github.com/onsi/ginkgo/v2/types" + "go/ast" + "go/token" + "strconv" +) + +const ( + // undefinedTextAlt is used if the spec/container text cannot be derived + undefinedTextAlt = "undefined" +) + +// ginkgoMetadata holds useful bits of information for every entry in the outline +type ginkgoMetadata struct { + // Name is the spec or container function name, e.g. `Describe` or `It` + Name string `json:"name"` + + // Text is the `text` argument passed to specs, and some containers + Text string `json:"text"` + + // Start is the position of first character of the spec or container block + Start int `json:"start"` + + // End is the position of first character immediately after the spec or container block + End int `json:"end"` + + Spec bool `json:"spec"` + Focused bool `json:"focused"` + Pending bool `json:"pending"` + Labels []string `json:"labels"` +} + +// ginkgoNode is used to construct the outline as a tree +type ginkgoNode struct { + ginkgoMetadata + Nodes []*ginkgoNode `json:"nodes"` +} + +type walkFunc func(n *ginkgoNode) + +func (n *ginkgoNode) PreOrder(f walkFunc) { + f(n) + for _, m := range n.Nodes { + m.PreOrder(f) + } +} + +func (n *ginkgoNode) PostOrder(f walkFunc) { + for _, m := range n.Nodes { + m.PostOrder(f) + } + f(n) +} + +func (n *ginkgoNode) Walk(pre, post walkFunc) { + pre(n) + for _, m := range n.Nodes { + m.Walk(pre, post) + } + post(n) +} + +// PropagateInheritedProperties propagates the Pending and Focused properties +// through the subtree rooted at n. +func (n *ginkgoNode) PropagateInheritedProperties() { + n.PreOrder(func(thisNode *ginkgoNode) { + for _, descendantNode := range thisNode.Nodes { + if thisNode.Pending { + descendantNode.Pending = true + descendantNode.Focused = false + } + if thisNode.Focused && !descendantNode.Pending { + descendantNode.Focused = true + } + } + }) +} + +// BackpropagateUnfocus propagates the Focused property through the subtree +// rooted at n. It applies the rule described in the Ginkgo docs: +// > Nested programmatically focused specs follow a simple rule: if a +// > leaf-node is marked focused, any of its ancestor nodes that are marked +// > focus will be unfocused. +func (n *ginkgoNode) BackpropagateUnfocus() { + focusedSpecInSubtreeStack := []bool{} + n.PostOrder(func(thisNode *ginkgoNode) { + if thisNode.Spec { + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, thisNode.Focused) + return + } + focusedSpecInSubtree := false + for range thisNode.Nodes { + focusedSpecInSubtree = focusedSpecInSubtree || focusedSpecInSubtreeStack[len(focusedSpecInSubtreeStack)-1] + focusedSpecInSubtreeStack = focusedSpecInSubtreeStack[0 : len(focusedSpecInSubtreeStack)-1] + } + focusedSpecInSubtreeStack = append(focusedSpecInSubtreeStack, focusedSpecInSubtree) + if focusedSpecInSubtree { + thisNode.Focused = false + } + }) + +} + +func packageAndIdentNamesFromCallExpr(ce *ast.CallExpr) (string, string, bool) { + switch ex := ce.Fun.(type) { + case *ast.Ident: + return "", ex.Name, true + case *ast.SelectorExpr: + pkgID, ok := ex.X.(*ast.Ident) + if !ok { + return "", "", false + } + // A package identifier is top-level, so Obj must be nil + if pkgID.Obj != nil { + return "", "", false + } + if ex.Sel == nil { + return "", "", false + } + return pkgID.Name, ex.Sel.Name, true + default: + return "", "", false + } +} + +// absoluteOffsetsForNode derives the absolute character offsets of the node start and +// end positions. +func absoluteOffsetsForNode(fset *token.FileSet, n ast.Node) (start, end int) { + return fset.PositionFor(n.Pos(), false).Offset, fset.PositionFor(n.End(), false).Offset +} + +// ginkgoNodeFromCallExpr derives an outline entry from a go AST subtree +// corresponding to a Ginkgo container or spec. +func ginkgoNodeFromCallExpr(fset *token.FileSet, ce *ast.CallExpr, ginkgoPackageName *string) (*ginkgoNode, bool) { + packageName, identName, ok := packageAndIdentNamesFromCallExpr(ce) + if !ok { + return nil, false + } + + n := ginkgoNode{} + n.Name = identName + n.Start, n.End = absoluteOffsetsForNode(fset, ce) + n.Nodes = make([]*ginkgoNode, 0) + switch identName { + case "It", "Specify", "Entry": + n.Spec = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FIt", "FSpecify", "FEntry": + n.Spec = true + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PIt", "PSpecify", "XIt", "XSpecify", "PEntry", "XEntry": + n.Spec = true + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "Context", "Describe", "When", "DescribeTable": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + n.Pending = pendingFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "FContext", "FDescribe", "FWhen", "FDescribeTable": + n.Focused = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "PContext", "PDescribe", "PWhen", "XContext", "XDescribe", "XWhen", "PDescribeTable", "XDescribeTable": + n.Pending = true + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + n.Labels = labelFromCallExpr(ce) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "By": + n.Text = textOrAltFromCallExpr(ce, undefinedTextAlt) + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterEach", "BeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "JustAfterEach", "JustBeforeEach": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "AfterSuite", "BeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + case "SynchronizedAfterSuite", "SynchronizedBeforeSuite": + return &n, ginkgoPackageName != nil && *ginkgoPackageName == packageName + default: + return nil, false + } +} + +// textOrAltFromCallExpr tries to derive the "text" of a Ginkgo spec or +// container. If it cannot derive it, it returns the alt text. +func textOrAltFromCallExpr(ce *ast.CallExpr, alt string) string { + text, defined := textFromCallExpr(ce) + if !defined { + return alt + } + return text +} + +// textFromCallExpr tries to derive the "text" of a Ginkgo spec or container. If +// it cannot derive it, it returns false. +func textFromCallExpr(ce *ast.CallExpr) (string, bool) { + if len(ce.Args) < 1 { + return "", false + } + text, ok := ce.Args[0].(*ast.BasicLit) + if !ok { + return "", false + } + switch text.Kind { + case token.CHAR, token.STRING: + // For token.CHAR and token.STRING, Value is quoted + unquoted, err := strconv.Unquote(text.Value) + if err != nil { + // If unquoting fails, just use the raw Value + return text.Value, true + } + return unquoted, true + default: + return text.Value, true + } +} + +func labelFromCallExpr(ce *ast.CallExpr) []string { + + labels := []string{} + if len(ce.Args) < 2 { + return labels + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Label" { + ls := extractLabels(expr) + for _, label := range ls { + labels = append(labels, label) + } + } + } + } + return labels +} + +func extractLabels(expr *ast.CallExpr) []string { + out := []string{} + for _, arg := range expr.Args { + switch expr := arg.(type) { + case *ast.BasicLit: + if expr.Kind == token.STRING { + unquoted, err := strconv.Unquote(expr.Value) + if err != nil { + unquoted = expr.Value + } + validated, err := types.ValidateAndCleanupLabel(unquoted, types.CodeLocation{}) + if err == nil { + out = append(out, validated) + } + } + } + } + + return out +} + +func pendingFromCallExpr(ce *ast.CallExpr) bool { + + pending := false + if len(ce.Args) < 2 { + return pending + } + + for _, arg := range ce.Args[1:] { + switch expr := arg.(type) { + case *ast.CallExpr: + id, ok := expr.Fun.(*ast.Ident) + if !ok { + // to skip over cases where the expr.Fun. is actually *ast.SelectorExpr + continue + } + if id.Name == "Pending" { + pending = true + } + case *ast.Ident: + if expr.Name == "Pending" { + pending = true + } + } + } + return pending +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go new file mode 100644 index 000000000..67ec5ab75 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/import.go @@ -0,0 +1,65 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Most of the required functions were available in the +// "golang.org/x/tools/go/ast/astutil" package, but not exported. +// They were copied from https://github.com/golang/tools/blob/2b0845dc783e36ae26d683f4915a5840ef01ab0f/go/ast/astutil/imports.go + +package outline + +import ( + "go/ast" + "strconv" + "strings" +) + +// packageNameForImport returns the package name for the package. If the package +// is not imported, it returns nil. "Package name" refers to `pkgname` in the +// call expression `pkgname.ExportedIdentifier`. Examples: +// (import path not found) -> nil +// "import example.com/pkg/foo" -> "foo" +// "import fooalias example.com/pkg/foo" -> "fooalias" +// "import . example.com/pkg/foo" -> "" +func packageNameForImport(f *ast.File, path string) *string { + spec := importSpec(f, path) + if spec == nil { + return nil + } + name := spec.Name.String() + if name == "" { + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + } + if name == "." { + name = "" + } + return &name +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if strings.HasPrefix(importPath(s), path) { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err != nil { + return "" + } + return t +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go new file mode 100644 index 000000000..c2327cda8 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline.go @@ -0,0 +1,110 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/ast" + "go/token" + "strings" + + "golang.org/x/tools/go/ast/inspector" +) + +const ( + // ginkgoImportPath is the well-known ginkgo import path + ginkgoImportPath = "github.com/onsi/ginkgo/v2" +) + +// FromASTFile returns an outline for a Ginkgo test source file +func FromASTFile(fset *token.FileSet, src *ast.File) (*outline, error) { + ginkgoPackageName := packageNameForImport(src, ginkgoImportPath) + if ginkgoPackageName == nil { + return nil, fmt.Errorf("file does not import %q", ginkgoImportPath) + } + + root := ginkgoNode{} + stack := []*ginkgoNode{&root} + ispr := inspector.New([]*ast.File{src}) + ispr.Nodes([]ast.Node{(*ast.CallExpr)(nil)}, func(node ast.Node, push bool) bool { + if push { + // Pre-order traversal + ce, ok := node.(*ast.CallExpr) + if !ok { + // Because `Nodes` calls this function only when the node is an + // ast.CallExpr, this should never happen + panic(fmt.Errorf("node starting at %d, ending at %d is not an *ast.CallExpr", node.Pos(), node.End())) + } + gn, ok := ginkgoNodeFromCallExpr(fset, ce, ginkgoPackageName) + if !ok { + // Node is not a Ginkgo spec or container, continue + return true + } + parent := stack[len(stack)-1] + parent.Nodes = append(parent.Nodes, gn) + stack = append(stack, gn) + return true + } + // Post-order traversal + start, end := absoluteOffsetsForNode(fset, node) + lastVisitedGinkgoNode := stack[len(stack)-1] + if start != lastVisitedGinkgoNode.Start || end != lastVisitedGinkgoNode.End { + // Node is not a Ginkgo spec or container, so it was not pushed onto the stack, continue + return true + } + stack = stack[0 : len(stack)-1] + return true + }) + if len(root.Nodes) == 0 { + return &outline{[]*ginkgoNode{}}, nil + } + + // Derive the final focused property for all nodes. This must be done + // _before_ propagating the inherited focused property. + root.BackpropagateUnfocus() + // Now, propagate inherited properties, including focused and pending. + root.PropagateInheritedProperties() + + return &outline{root.Nodes}, nil +} + +type outline struct { + Nodes []*ginkgoNode `json:"nodes"` +} + +func (o *outline) MarshalJSON() ([]byte, error) { + return json.Marshal(o.Nodes) +} + +// String returns a CSV-formatted outline. Spec or container are output in +// depth-first order. +func (o *outline) String() string { + return o.StringIndent(0) +} + +// StringIndent returns a CSV-formated outline, but every line is indented by +// one 'width' of spaces for every level of nesting. +func (o *outline) StringIndent(width int) string { + var b strings.Builder + b.WriteString("Name,Text,Start,End,Spec,Focused,Pending,Labels\n") + + currentIndent := 0 + pre := func(n *ginkgoNode) { + b.WriteString(fmt.Sprintf("%*s", currentIndent, "")) + var labels string + if len(n.Labels) == 1 { + labels = n.Labels[0] + } else { + labels = strings.Join(n.Labels, ", ") + } + //enclosing labels in a double quoted comma separate listed so that when inmported into a CSV app the Labels column has comma separate strings + b.WriteString(fmt.Sprintf("%s,%s,%d,%d,%t,%t,%t,\"%s\"\n", n.Name, n.Text, n.Start, n.End, n.Spec, n.Focused, n.Pending, labels)) + currentIndent += width + } + post := func(n *ginkgoNode) { + currentIndent -= width + } + for _, n := range o.Nodes { + n.Walk(pre, post) + } + return b.String() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go new file mode 100644 index 000000000..36698d46a --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/outline_command.go @@ -0,0 +1,98 @@ +package outline + +import ( + "encoding/json" + "fmt" + "go/parser" + "go/token" + "os" + + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/types" +) + +const ( + // indentWidth is the width used by the 'indent' output + indentWidth = 4 + // stdinAlias is a portable alias for stdin. This convention is used in + // other CLIs, e.g., kubectl. + stdinAlias = "-" + usageCommand = "ginkgo outline " +) + +type outlineConfig struct { + Format string +} + +func BuildOutlineCommand() command.Command { + conf := outlineConfig{ + Format: "csv", + } + flags, err := types.NewGinkgoFlagSet( + types.GinkgoFlags{ + {Name: "format", KeyPath: "Format", + Usage: "Format of outline", + UsageArgument: "one of 'csv', 'indent', or 'json'", + UsageDefaultValue: conf.Format, + }, + }, + &conf, + types.GinkgoFlagSections{}, + ) + if err != nil { + panic(err) + } + + return command.Command{ + Name: "outline", + Usage: "ginkgo outline ", + ShortDoc: "Create an outline of Ginkgo symbols for a file", + Documentation: "To read from stdin, use: `ginkgo outline -`", + DocLink: "creating-an-outline-of-specs", + Flags: flags, + Command: func(args []string, _ []string) { + outlineFile(args, conf.Format) + }, + } +} + +func outlineFile(args []string, format string) { + if len(args) != 1 { + command.AbortWithUsage("outline expects exactly one argument") + } + + filename := args[0] + var src *os.File + if filename == stdinAlias { + src = os.Stdin + } else { + var err error + src, err = os.Open(filename) + command.AbortIfError("Failed to open file:", err) + } + + fset := token.NewFileSet() + + parsedSrc, err := parser.ParseFile(fset, filename, src, 0) + command.AbortIfError("Failed to parse source:", err) + + o, err := FromASTFile(fset, parsedSrc) + command.AbortIfError("Failed to create outline:", err) + + var oerr error + switch format { + case "csv": + _, oerr = fmt.Print(o) + case "indent": + _, oerr = fmt.Print(o.StringIndent(indentWidth)) + case "json": + b, err := json.Marshal(o) + if err != nil { + println(fmt.Sprintf("error marshalling to json: %s", err)) + } + _, oerr = fmt.Println(string(b)) + default: + command.AbortWith("Format %s not accepted", format) + } + command.AbortIfError("Failed to write outline:", oerr) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go new file mode 100644 index 000000000..aaed4d570 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/run/run_command.go @@ -0,0 +1,232 @@ +package run + +import ( + "fmt" + "os" + "strings" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildRunCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildRunCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "run", + Flags: flags, + Usage: "ginkgo run -- ", + ShortDoc: "Run the tests in the passed in (or the package in the current directory if left blank)", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "running-tests", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + runner := &SpecRunner{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + runner.RunSpecs(args, additionalArgs) + }, + } +} + +type SpecRunner struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (r *SpecRunner) RunSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, r.cliConfig, true) + skippedSuites := suites.WithState(internal.TestSuiteStateSkippedByFilter) + suites = suites.WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(skippedSuites) > 0 { + fmt.Println("Will skip:") + for _, skippedSuite := range skippedSuites { + fmt.Println(" " + skippedSuite.Path) + } + } + + if len(skippedSuites) > 0 && len(suites) == 0 { + command.AbortGracefullyWith("All tests skipped! Exiting...") + } + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + if len(suites) > 1 && !r.flags.WasSet("succinct") && r.reporterConfig.Verbosity().LT(types.VerbosityLevelVerbose) { + r.reporterConfig.Succinct = true + } + + t := time.Now() + var endTime time.Time + if r.suiteConfig.Timeout > 0 { + endTime = t.Add(r.suiteConfig.Timeout) + } + + iteration := 0 +OUTER_LOOP: + for { + if !r.flags.WasSet("seed") { + r.suiteConfig.RandomSeed = time.Now().Unix() + } + if r.cliConfig.RandomizeSuites && len(suites) > 1 { + suites = suites.ShuffledCopy(r.suiteConfig.RandomSeed) + } + + opc := internal.NewOrderedParallelCompiler(r.cliConfig.ComputedNumCompilers()) + opc.StartCompiling(suites, r.goFlagsConfig) + + SUITE_LOOP: + for { + suiteIdx, suite := opc.Next() + if suiteIdx >= len(suites) { + break SUITE_LOOP + } + suites[suiteIdx] = suite + + if r.interruptHandler.Status().Interrupted() { + opc.StopAndDrain() + break OUTER_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateSkippedDueToEmptyCompilation) { + fmt.Printf("Skipping %s (no test files)\n", suite.Path) + continue SUITE_LOOP + } + + if suites[suiteIdx].State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suites[suiteIdx].CompilationError.Error()) + if !r.cliConfig.KeepGoing { + opc.StopAndDrain() + } + continue SUITE_LOOP + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 && !r.cliConfig.KeepGoing { + suites[suiteIdx].State = internal.TestSuiteStateSkippedDueToPriorFailures + opc.StopAndDrain() + continue SUITE_LOOP + } + + if !endTime.IsZero() { + r.suiteConfig.Timeout = endTime.Sub(time.Now()) + if r.suiteConfig.Timeout <= 0 { + suites[suiteIdx].State = internal.TestSuiteStateFailedDueToTimeout + opc.StopAndDrain() + continue SUITE_LOOP + } + } + + suites[suiteIdx] = internal.RunCompiledSuite(suites[suiteIdx], r.suiteConfig, r.reporterConfig, r.cliConfig, r.goFlagsConfig, additionalArgs) + } + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + if iteration > 0 { + fmt.Printf("\nTests failed on attempt #%d\n\n", iteration+1) + } + break OUTER_LOOP + } + + if r.cliConfig.UntilItFails { + fmt.Printf("\nAll tests passed...\nWill keep running them until they fail.\nThis was attempt #%d\n%s\n", iteration+1, orcMessage(iteration+1)) + } else if r.cliConfig.Repeat > 0 && iteration < r.cliConfig.Repeat { + fmt.Printf("\nAll tests passed...\nThis was attempt %d of %d.\n", iteration+1, r.cliConfig.Repeat+1) + } else { + break OUTER_LOOP + } + iteration += 1 + } + + internal.Cleanup(r.goFlagsConfig, suites...) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, r.cliConfig, r.suiteConfig, r.reporterConfig, r.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + + fmt.Printf("\nGinkgo ran %d %s in %s\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), time.Since(t)) + + if suites.CountWithState(internal.TestSuiteStateFailureStates...) == 0 { + if suites.AnyHaveProgrammaticFocus() && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { + fmt.Printf("Test Suite Passed\n") + fmt.Printf("Detected Programmatic Focus - setting exit status to %d\n", types.GINKGO_FOCUS_EXIT_CODE) + command.Abort(command.AbortDetails{ExitCode: types.GINKGO_FOCUS_EXIT_CODE}) + } else { + fmt.Printf("Test Suite Passed\n") + command.Abort(command.AbortDetails{}) + } + } else { + fmt.Fprintln(formatter.ColorableStdOut, "") + if len(suites) > 1 && suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + fmt.Fprintln(formatter.ColorableStdOut, + internal.FailedSuitesReport(suites, formatter.NewWithNoColorBool(r.reporterConfig.NoColor))) + } + fmt.Printf("Test Suite Failed\n") + command.Abort(command.AbortDetails{ExitCode: 1}) + } +} + +func orcMessage(iteration int) string { + if iteration < 10 { + return "" + } else if iteration < 30 { + return []string{ + "If at first you succeed...", + "...try, try again.", + "Looking good!", + "Still good...", + "I think your tests are fine....", + "Yep, still passing", + "Oh boy, here I go testin' again!", + "Even the gophers are getting bored", + "Did you try -race?", + "Maybe you should stop now?", + "I'm getting tired...", + "What if I just made you a sandwich?", + "Hit ^C, hit ^C, please hit ^C", + "Make it stop. Please!", + "Come on! Enough is enough!", + "Dave, this conversation can serve no purpose anymore. Goodbye.", + "Just what do you think you're doing, Dave? ", + "I, Sisyphus", + "Insanity: doing the same thing over and over again and expecting different results. -Einstein", + "I guess Einstein never tried to churn butter", + }[iteration-10] + "\n" + } else { + return "No, seriously... you can probably stop now.\n" + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go new file mode 100644 index 000000000..7dd294394 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/unfocus/unfocus_command.go @@ -0,0 +1,186 @@ +package unfocus + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/ginkgo/command" +) + +func BuildUnfocusCommand() command.Command { + return command.Command{ + Name: "unfocus", + Usage: "ginkgo unfocus", + ShortDoc: "Recursively unfocus any focused tests under the current directory", + DocLink: "filtering-specs", + Command: func(_ []string, _ []string) { + unfocusSpecs() + }, + } +} + +func unfocusSpecs() { + fmt.Println("Scanning for focus...") + + goFiles := make(chan string) + go func() { + unfocusDir(goFiles, ".") + close(goFiles) + }() + + const workers = 10 + wg := sync.WaitGroup{} + wg.Add(workers) + + for i := 0; i < workers; i++ { + go func() { + for path := range goFiles { + unfocusFile(path) + } + wg.Done() + }() + } + + wg.Wait() +} + +func unfocusDir(goFiles chan string, path string) { + files, err := os.ReadDir(path) + if err != nil { + fmt.Println(err.Error()) + return + } + + for _, f := range files { + switch { + case f.IsDir() && shouldProcessDir(f.Name()): + unfocusDir(goFiles, filepath.Join(path, f.Name())) + case !f.IsDir() && shouldProcessFile(f.Name()): + goFiles <- filepath.Join(path, f.Name()) + } + } +} + +func shouldProcessDir(basename string) bool { + return basename != "vendor" && !strings.HasPrefix(basename, ".") +} + +func shouldProcessFile(basename string) bool { + return strings.HasSuffix(basename, ".go") +} + +func unfocusFile(path string) { + data, err := os.ReadFile(path) + if err != nil { + fmt.Printf("error reading file '%s': %s\n", path, err.Error()) + return + } + + ast, err := parser.ParseFile(token.NewFileSet(), path, bytes.NewReader(data), parser.ParseComments) + if err != nil { + fmt.Printf("error parsing file '%s': %s\n", path, err.Error()) + return + } + + eliminations := scanForFocus(ast) + if len(eliminations) == 0 { + return + } + + fmt.Printf("...updating %s\n", path) + backup, err := writeBackup(path, data) + if err != nil { + fmt.Printf("error creating backup file: %s\n", err.Error()) + return + } + + if err := updateFile(path, data, eliminations); err != nil { + fmt.Printf("error writing file '%s': %s\n", path, err.Error()) + return + } + + os.Remove(backup) +} + +func writeBackup(path string, data []byte) (string, error) { + t, err := os.CreateTemp(filepath.Dir(path), filepath.Base(path)) + + if err != nil { + return "", fmt.Errorf("error creating temporary file: %w", err) + } + defer t.Close() + + if _, err := io.Copy(t, bytes.NewReader(data)); err != nil { + return "", fmt.Errorf("error writing to temporary file: %w", err) + } + + return t.Name(), nil +} + +func updateFile(path string, data []byte, eliminations [][]int64) error { + to, err := os.Create(path) + if err != nil { + return fmt.Errorf("error opening file for writing '%s': %w\n", path, err) + } + defer to.Close() + + from := bytes.NewReader(data) + var cursor int64 + for _, eliminationRange := range eliminations { + positionToEliminate, lengthToEliminate := eliminationRange[0]-1, eliminationRange[1] + if _, err := io.CopyN(to, from, positionToEliminate-cursor); err != nil { + return fmt.Errorf("error copying data: %w", err) + } + + cursor = positionToEliminate + lengthToEliminate + + if _, err := from.Seek(lengthToEliminate, io.SeekCurrent); err != nil { + return fmt.Errorf("error seeking to position in buffer: %w", err) + } + } + + if _, err := io.Copy(to, from); err != nil { + return fmt.Errorf("error copying end data: %w", err) + } + + return nil +} + +func scanForFocus(file *ast.File) (eliminations [][]int64) { + ast.Inspect(file, func(n ast.Node) bool { + if c, ok := n.(*ast.CallExpr); ok { + if i, ok := c.Fun.(*ast.Ident); ok { + if isFocus(i.Name) { + eliminations = append(eliminations, []int64{int64(i.Pos()), 1}) + } + } + } + + if i, ok := n.(*ast.Ident); ok { + if i.Name == "Focus" { + eliminations = append(eliminations, []int64{int64(i.Pos()), 6}) + } + } + + return true + }) + + return eliminations +} + +func isFocus(name string) bool { + switch name { + case "FDescribe", "FContext", "FIt", "FDescribeTable", "FEntry", "FSpecify", "FWhen": + return true + default: + return false + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go new file mode 100644 index 000000000..6c485c5b1 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta.go @@ -0,0 +1,22 @@ +package watch + +import "sort" + +type Delta struct { + ModifiedPackages []string + + NewSuites []*Suite + RemovedSuites []*Suite + modifiedSuites []*Suite +} + +type DescendingByDelta []*Suite + +func (a DescendingByDelta) Len() int { return len(a) } +func (a DescendingByDelta) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a DescendingByDelta) Less(i, j int) bool { return a[i].Delta() > a[j].Delta() } + +func (d Delta) ModifiedSuites() []*Suite { + sort.Sort(DescendingByDelta(d.modifiedSuites)) + return d.modifiedSuites +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go new file mode 100644 index 000000000..26418ac62 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/delta_tracker.go @@ -0,0 +1,75 @@ +package watch + +import ( + "fmt" + + "regexp" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type SuiteErrors map[internal.TestSuite]error + +type DeltaTracker struct { + maxDepth int + watchRegExp *regexp.Regexp + suites map[string]*Suite + packageHashes *PackageHashes +} + +func NewDeltaTracker(maxDepth int, watchRegExp *regexp.Regexp) *DeltaTracker { + return &DeltaTracker{ + maxDepth: maxDepth, + watchRegExp: watchRegExp, + packageHashes: NewPackageHashes(watchRegExp), + suites: map[string]*Suite{}, + } +} + +func (d *DeltaTracker) Delta(suites internal.TestSuites) (delta Delta, errors SuiteErrors) { + errors = SuiteErrors{} + delta.ModifiedPackages = d.packageHashes.CheckForChanges() + + providedSuitePaths := map[string]bool{} + for _, suite := range suites { + providedSuitePaths[suite.Path] = true + } + + d.packageHashes.StartTrackingUsage() + + for _, suite := range d.suites { + if providedSuitePaths[suite.Suite.Path] { + if suite.Delta() > 0 { + delta.modifiedSuites = append(delta.modifiedSuites, suite) + } + } else { + delta.RemovedSuites = append(delta.RemovedSuites, suite) + } + } + + d.packageHashes.StopTrackingUsageAndPrune() + + for _, suite := range suites { + _, ok := d.suites[suite.Path] + if !ok { + s, err := NewSuite(suite, d.maxDepth, d.packageHashes) + if err != nil { + errors[suite] = err + continue + } + d.suites[suite.Path] = s + delta.NewSuites = append(delta.NewSuites, s) + } + } + + return delta, errors +} + +func (d *DeltaTracker) WillRun(suite internal.TestSuite) error { + s, ok := d.suites[suite.Path] + if !ok { + return fmt.Errorf("unknown suite %s", suite.Path) + } + + return s.MarkAsRunAndRecomputedDependencies(d.maxDepth) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go new file mode 100644 index 000000000..f5ddff30f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/dependencies.go @@ -0,0 +1,92 @@ +package watch + +import ( + "go/build" + "regexp" +) + +var ginkgoAndGomegaFilter = regexp.MustCompile(`github\.com/onsi/ginkgo|github\.com/onsi/gomega`) +var ginkgoIntegrationTestFilter = regexp.MustCompile(`github\.com/onsi/ginkgo/integration`) //allow us to integration test this thing + +type Dependencies struct { + deps map[string]int +} + +func NewDependencies(path string, maxDepth int) (Dependencies, error) { + d := Dependencies{ + deps: map[string]int{}, + } + + if maxDepth == 0 { + return d, nil + } + + err := d.seedWithDepsForPackageAtPath(path) + if err != nil { + return d, err + } + + for depth := 1; depth < maxDepth; depth++ { + n := len(d.deps) + d.addDepsForDepth(depth) + if n == len(d.deps) { + break + } + } + + return d, nil +} + +func (d Dependencies) Dependencies() map[string]int { + return d.deps +} + +func (d Dependencies) seedWithDepsForPackageAtPath(path string) error { + pkg, err := build.ImportDir(path, 0) + if err != nil { + return err + } + + d.resolveAndAdd(pkg.Imports, 1) + d.resolveAndAdd(pkg.TestImports, 1) + d.resolveAndAdd(pkg.XTestImports, 1) + + delete(d.deps, pkg.Dir) + return nil +} + +func (d Dependencies) addDepsForDepth(depth int) { + for dep, depDepth := range d.deps { + if depDepth == depth { + d.addDepsForDep(dep, depth+1) + } + } +} + +func (d Dependencies) addDepsForDep(dep string, depth int) { + pkg, err := build.ImportDir(dep, 0) + if err != nil { + println(err.Error()) + return + } + d.resolveAndAdd(pkg.Imports, depth) +} + +func (d Dependencies) resolveAndAdd(deps []string, depth int) { + for _, dep := range deps { + pkg, err := build.Import(dep, ".", 0) + if err != nil { + continue + } + if !pkg.Goroot && (!ginkgoAndGomegaFilter.Match([]byte(pkg.Dir)) || ginkgoIntegrationTestFilter.Match([]byte(pkg.Dir))) { + d.addDepIfNotPresent(pkg.Dir, depth) + } + } +} + +func (d Dependencies) addDepIfNotPresent(dep string, depth int) { + _, ok := d.deps[dep] + if !ok { + d.deps[dep] = depth + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go new file mode 100644 index 000000000..e9f7ec0cb --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hash.go @@ -0,0 +1,108 @@ +package watch + +import ( + "fmt" + "os" + "regexp" + "time" +) + +var goTestRegExp = regexp.MustCompile(`_test\.go$`) + +type PackageHash struct { + CodeModifiedTime time.Time + TestModifiedTime time.Time + Deleted bool + + path string + codeHash string + testHash string + watchRegExp *regexp.Regexp +} + +func NewPackageHash(path string, watchRegExp *regexp.Regexp) *PackageHash { + p := &PackageHash{ + path: path, + watchRegExp: watchRegExp, + } + + p.codeHash, _, p.testHash, _, p.Deleted = p.computeHashes() + + return p +} + +func (p *PackageHash) CheckForChanges() bool { + codeHash, codeModifiedTime, testHash, testModifiedTime, deleted := p.computeHashes() + + if deleted { + if !p.Deleted { + t := time.Now() + p.CodeModifiedTime = t + p.TestModifiedTime = t + } + p.Deleted = true + return true + } + + modified := false + p.Deleted = false + + if p.codeHash != codeHash { + p.CodeModifiedTime = codeModifiedTime + modified = true + } + if p.testHash != testHash { + p.TestModifiedTime = testModifiedTime + modified = true + } + + p.codeHash = codeHash + p.testHash = testHash + return modified +} + +func (p *PackageHash) computeHashes() (codeHash string, codeModifiedTime time.Time, testHash string, testModifiedTime time.Time, deleted bool) { + entries, err := os.ReadDir(p.path) + + if err != nil { + deleted = true + return + } + + for _, entry := range entries { + if entry.IsDir() { + continue + } + + info, err := entry.Info() + if err != nil { + continue + } + + if goTestRegExp.Match([]byte(info.Name())) { + testHash += p.hashForFileInfo(info) + if info.ModTime().After(testModifiedTime) { + testModifiedTime = info.ModTime() + } + continue + } + + if p.watchRegExp.Match([]byte(info.Name())) { + codeHash += p.hashForFileInfo(info) + if info.ModTime().After(codeModifiedTime) { + codeModifiedTime = info.ModTime() + } + } + } + + testHash += codeHash + if codeModifiedTime.After(testModifiedTime) { + testModifiedTime = codeModifiedTime + } + + return +} + +func (p *PackageHash) hashForFileInfo(info os.FileInfo) string { + return fmt.Sprintf("%s_%d_%d", info.Name(), info.Size(), info.ModTime().UnixNano()) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go new file mode 100644 index 000000000..b4892bebf --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/package_hashes.go @@ -0,0 +1,85 @@ +package watch + +import ( + "path/filepath" + "regexp" + "sync" +) + +type PackageHashes struct { + PackageHashes map[string]*PackageHash + usedPaths map[string]bool + watchRegExp *regexp.Regexp + lock *sync.Mutex +} + +func NewPackageHashes(watchRegExp *regexp.Regexp) *PackageHashes { + return &PackageHashes{ + PackageHashes: map[string]*PackageHash{}, + usedPaths: nil, + watchRegExp: watchRegExp, + lock: &sync.Mutex{}, + } +} + +func (p *PackageHashes) CheckForChanges() []string { + p.lock.Lock() + defer p.lock.Unlock() + + modified := []string{} + + for _, packageHash := range p.PackageHashes { + if packageHash.CheckForChanges() { + modified = append(modified, packageHash.path) + } + } + + return modified +} + +func (p *PackageHashes) Add(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + _, ok := p.PackageHashes[path] + if !ok { + p.PackageHashes[path] = NewPackageHash(path, p.watchRegExp) + } + + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) Get(path string) *PackageHash { + p.lock.Lock() + defer p.lock.Unlock() + + path, _ = filepath.Abs(path) + if p.usedPaths != nil { + p.usedPaths[path] = true + } + return p.PackageHashes[path] +} + +func (p *PackageHashes) StartTrackingUsage() { + p.lock.Lock() + defer p.lock.Unlock() + + p.usedPaths = map[string]bool{} +} + +func (p *PackageHashes) StopTrackingUsageAndPrune() { + p.lock.Lock() + defer p.lock.Unlock() + + for path := range p.PackageHashes { + if !p.usedPaths[path] { + delete(p.PackageHashes, path) + } + } + + p.usedPaths = nil +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go new file mode 100644 index 000000000..53272df7e --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/suite.go @@ -0,0 +1,87 @@ +package watch + +import ( + "fmt" + "math" + "time" + + "github.com/onsi/ginkgo/v2/ginkgo/internal" +) + +type Suite struct { + Suite internal.TestSuite + RunTime time.Time + Dependencies Dependencies + + sharedPackageHashes *PackageHashes +} + +func NewSuite(suite internal.TestSuite, maxDepth int, sharedPackageHashes *PackageHashes) (*Suite, error) { + deps, err := NewDependencies(suite.Path, maxDepth) + if err != nil { + return nil, err + } + + sharedPackageHashes.Add(suite.Path) + for dep := range deps.Dependencies() { + sharedPackageHashes.Add(dep) + } + + return &Suite{ + Suite: suite, + Dependencies: deps, + + sharedPackageHashes: sharedPackageHashes, + }, nil +} + +func (s *Suite) Delta() float64 { + delta := s.delta(s.Suite.Path, true, 0) * 1000 + for dep, depth := range s.Dependencies.Dependencies() { + delta += s.delta(dep, false, depth) + } + return delta +} + +func (s *Suite) MarkAsRunAndRecomputedDependencies(maxDepth int) error { + s.RunTime = time.Now() + + deps, err := NewDependencies(s.Suite.Path, maxDepth) + if err != nil { + return err + } + + s.sharedPackageHashes.Add(s.Suite.Path) + for dep := range deps.Dependencies() { + s.sharedPackageHashes.Add(dep) + } + + s.Dependencies = deps + + return nil +} + +func (s *Suite) Description() string { + numDeps := len(s.Dependencies.Dependencies()) + pluralizer := "ies" + if numDeps == 1 { + pluralizer = "y" + } + return fmt.Sprintf("%s [%d dependenc%s]", s.Suite.Path, numDeps, pluralizer) +} + +func (s *Suite) delta(packagePath string, includeTests bool, depth int) float64 { + return math.Max(float64(s.dt(packagePath, includeTests)), 0) / float64(depth+1) +} + +func (s *Suite) dt(packagePath string, includeTests bool) time.Duration { + packageHash := s.sharedPackageHashes.Get(packagePath) + var modifiedTime time.Time + if includeTests { + modifiedTime = packageHash.TestModifiedTime + } else { + modifiedTime = packageHash.CodeModifiedTime + } + + return modifiedTime.Sub(s.RunTime) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go new file mode 100644 index 000000000..bde4193ce --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/watch/watch_command.go @@ -0,0 +1,192 @@ +package watch + +import ( + "fmt" + "regexp" + "time" + + "github.com/onsi/ginkgo/v2/formatter" + "github.com/onsi/ginkgo/v2/ginkgo/command" + "github.com/onsi/ginkgo/v2/ginkgo/internal" + "github.com/onsi/ginkgo/v2/internal/interrupt_handler" + "github.com/onsi/ginkgo/v2/types" +) + +func BuildWatchCommand() command.Command { + var suiteConfig = types.NewDefaultSuiteConfig() + var reporterConfig = types.NewDefaultReporterConfig() + var cliConfig = types.NewDefaultCLIConfig() + var goFlagsConfig = types.NewDefaultGoFlagsConfig() + + flags, err := types.BuildWatchCommandFlagSet(&suiteConfig, &reporterConfig, &cliConfig, &goFlagsConfig) + if err != nil { + panic(err) + } + interruptHandler := interrupt_handler.NewInterruptHandler(nil) + interrupt_handler.SwallowSigQuit() + + return command.Command{ + Name: "watch", + Flags: flags, + Usage: "ginkgo watch -- ", + ShortDoc: "Watch the passed in and runs their tests whenever changes occur.", + Documentation: "Any arguments after -- will be passed to the test.", + DocLink: "watching-for-changes", + Command: func(args []string, additionalArgs []string) { + var errors []error + cliConfig, goFlagsConfig, errors = types.VetAndInitializeCLIAndGoConfig(cliConfig, goFlagsConfig) + command.AbortIfErrors("Ginkgo detected configuration issues:", errors) + + watcher := &SpecWatcher{ + cliConfig: cliConfig, + goFlagsConfig: goFlagsConfig, + suiteConfig: suiteConfig, + reporterConfig: reporterConfig, + flags: flags, + + interruptHandler: interruptHandler, + } + + watcher.WatchSpecs(args, additionalArgs) + }, + } +} + +type SpecWatcher struct { + suiteConfig types.SuiteConfig + reporterConfig types.ReporterConfig + cliConfig types.CLIConfig + goFlagsConfig types.GoFlagsConfig + flags types.GinkgoFlagSet + + interruptHandler *interrupt_handler.InterruptHandler +} + +func (w *SpecWatcher) WatchSpecs(args []string, additionalArgs []string) { + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + + internal.VerifyCLIAndFrameworkVersion(suites) + + if len(suites) == 0 { + command.AbortWith("Found no test suites") + } + + fmt.Printf("Identified %d test %s. Locating dependencies to a depth of %d (this may take a while)...\n", len(suites), internal.PluralizedWord("suite", "suites", len(suites)), w.cliConfig.Depth) + deltaTracker := NewDeltaTracker(w.cliConfig.Depth, regexp.MustCompile(w.cliConfig.WatchRegExp)) + delta, errors := deltaTracker.Delta(suites) + + fmt.Printf("Watching %d %s:\n", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites))) + for _, suite := range delta.NewSuites { + fmt.Println(" " + suite.Description()) + } + + for suite, err := range errors { + fmt.Printf("Failed to watch %s: %s\n", suite.PackageName, err) + } + + if len(suites) == 1 { + w.updateSeed() + w.compileAndRun(suites[0], additionalArgs) + } + + ticker := time.NewTicker(time.Second) + + for { + select { + case <-ticker.C: + suites := internal.FindSuites(args, w.cliConfig, false).WithoutState(internal.TestSuiteStateSkippedByFilter) + delta, _ := deltaTracker.Delta(suites) + coloredStream := formatter.ColorableStdOut + + suites = internal.TestSuites{} + + if len(delta.NewSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected %d new %s:{{/}}", len(delta.NewSuites), internal.PluralizedWord("suite", "suites", len(delta.NewSuites)))) + for _, suite := range delta.NewSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + } + + modifiedSuites := delta.ModifiedSuites() + if len(modifiedSuites) > 0 { + fmt.Fprintln(coloredStream, formatter.F("{{green}}Detected changes in:{{/}}")) + for _, pkg := range delta.ModifiedPackages { + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", pkg)) + } + fmt.Fprintln(coloredStream, formatter.F("{{green}}Will run %d %s:{{/}}", len(modifiedSuites), internal.PluralizedWord("suite", "suites", len(modifiedSuites)))) + for _, suite := range modifiedSuites { + suites = append(suites, suite.Suite) + fmt.Fprintln(coloredStream, formatter.Fi(1, "%s", suite.Description())) + } + fmt.Fprintln(coloredStream, "") + } + + if len(suites) == 0 { + break + } + + w.updateSeed() + w.computeSuccinctMode(len(suites)) + for idx := range suites { + if w.interruptHandler.Status().Interrupted() { + return + } + deltaTracker.WillRun(suites[idx]) + suites[idx] = w.compileAndRun(suites[idx], additionalArgs) + } + color := "{{green}}" + if suites.CountWithState(internal.TestSuiteStateFailureStates...) > 0 { + color = "{{red}}" + } + fmt.Fprintln(coloredStream, formatter.F(color+"\nDone. Resuming watch...{{/}}")) + + messages, err := internal.FinalizeProfilesAndReportsForSuites(suites, w.cliConfig, w.suiteConfig, w.reporterConfig, w.goFlagsConfig) + command.AbortIfError("could not finalize profiles:", err) + for _, message := range messages { + fmt.Println(message) + } + case <-w.interruptHandler.Status().Channel: + return + } + } +} + +func (w *SpecWatcher) compileAndRun(suite internal.TestSuite, additionalArgs []string) internal.TestSuite { + suite = internal.CompileSuite(suite, w.goFlagsConfig) + if suite.State.Is(internal.TestSuiteStateFailedToCompile) { + fmt.Println(suite.CompilationError.Error()) + return suite + } + if w.interruptHandler.Status().Interrupted() { + return suite + } + suite = internal.RunCompiledSuite(suite, w.suiteConfig, w.reporterConfig, w.cliConfig, w.goFlagsConfig, additionalArgs) + internal.Cleanup(w.goFlagsConfig, suite) + return suite +} + +func (w *SpecWatcher) computeSuccinctMode(numSuites int) { + if w.reporterConfig.Verbosity().GTE(types.VerbosityLevelVerbose) { + w.reporterConfig.Succinct = false + return + } + + if w.flags.WasSet("succinct") { + return + } + + if numSuites == 1 { + w.reporterConfig.Succinct = false + } + + if numSuites > 1 { + w.reporterConfig.Succinct = true + } +} + +func (w *SpecWatcher) updateSeed() { + if !w.flags.WasSet("seed") { + w.suiteConfig.RandomSeed = time.Now().Unix() + } +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go new file mode 100644 index 000000000..85162720f --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_cli_dependencies.go @@ -0,0 +1,8 @@ +//go:build ginkgoclidependencies +// +build ginkgoclidependencies + +package ginkgo + +import ( + _ "github.com/onsi/ginkgo/v2/ginkgo" +) diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go index 1beeb1144..28447ffdd 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo_t_dsl.go @@ -1,26 +1,42 @@ package ginkgo -import "github.com/onsi/ginkgo/v2/internal/testingtproxy" +import ( + "github.com/onsi/ginkgo/v2/internal/testingtproxy" +) /* -GinkgoT() implements an interface analogous to *testing.T and can be used with -third-party libraries that accept *testing.T through an interface. +GinkgoT() implements an interface that allows third party libraries to integrate with and build on top of Ginkgo. + +GinkgoT() is analogous to *testing.T and implements the majority of *testing.T's methods. It can be typically be used a a drop-in replacement with third-party libraries that accept *testing.T through an interface. GinkgoT() takes an optional offset argument that can be used to get the -correct line number associated with the failure. +correct line number associated with the failure - though you do not need to use this if you call GinkgoHelper() or GinkgoT().Helper() appropriately You can learn more here: https://onsi.github.io/ginkgo/#using-third-party-libraries */ -func GinkgoT(optionalOffset ...int) GinkgoTInterface { +func GinkgoT(optionalOffset ...int) FullGinkgoTInterface { offset := 3 if len(optionalOffset) > 0 { offset = optionalOffset[0] } - return testingtproxy.New(GinkgoWriter, Fail, Skip, DeferCleanup, CurrentSpecReport, offset) + return testingtproxy.New( + GinkgoWriter, + Fail, + Skip, + DeferCleanup, + CurrentSpecReport, + AddReportEntry, + GinkgoRecover, + AttachProgressReporter, + suiteConfig.RandomSeed, + suiteConfig.ParallelProcess, + suiteConfig.ParallelTotal, + reporterConfig.NoColor, + offset) } /* -The interface returned by GinkgoT(). This covers most of the methods in the testing package's T. +The portion of the interface returned by GinkgoT() that maps onto methods in the testing package's T. */ type GinkgoTInterface interface { Cleanup(func()) @@ -43,3 +59,36 @@ type GinkgoTInterface interface { Skipped() bool TempDir() string } + +/* +Additional methods returned by GinkgoT() that provide deeper integration points into Ginkgo +*/ +type FullGinkgoTInterface interface { + GinkgoTInterface + + AddReportEntryVisibilityAlways(name string, args ...any) + AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) + AddReportEntryVisibilityNever(name string, args ...any) + + //Prints to the GinkgoWriter + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) + + //Provides access to Ginkgo's color formatting, correctly configured to match the color settings specified in the invocation of ginkgo + F(format string, args ...any) string + Fi(indentation uint, format string, args ...any) string + Fiw(indentation uint, maxWidth uint, format string, args ...any) string + + //Generates a formatted string version of the current spec's timeline + RenderTimeline() string + + GinkgoRecover() + DeferCleanup(args ...any) + + RandomSeed() int64 + ParallelProcess() int + ParallelTotal() int + + AttachProgressReporter(func() string) func() +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/group.go b/vendor/github.com/onsi/ginkgo/v2/internal/group.go index c76c3bd94..ae1b7b011 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/group.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/group.go @@ -94,15 +94,19 @@ type group struct { runOncePairs map[uint]runOncePairs runOnceTracker map[runOncePair]types.SpecState - succeeded bool + succeeded bool + failedInARunOnceBefore bool + continueOnFailure bool } func newGroup(suite *Suite) *group { return &group{ - suite: suite, - runOncePairs: map[uint]runOncePairs{}, - runOnceTracker: map[runOncePair]types.SpecState{}, - succeeded: true, + suite: suite, + runOncePairs: map[uint]runOncePairs{}, + runOnceTracker: map[runOncePair]types.SpecState{}, + succeeded: true, + failedInARunOnceBefore: false, + continueOnFailure: false, } } @@ -116,6 +120,7 @@ func (g *group) initialReportForSpec(spec Spec) types.SpecReport { LeafNodeText: spec.FirstNodeWithType(types.NodeTypeIt).Text, LeafNodeLabels: []string(spec.FirstNodeWithType(types.NodeTypeIt).Labels), ParallelProcess: g.suite.config.ParallelProcess, + RunningInParallel: g.suite.isRunningInParallel(), IsSerial: spec.Nodes.HasNodeMarkedSerial(), IsInOrderedContainer: !spec.Nodes.FirstNodeMarkedOrdered().IsZero(), MaxFlakeAttempts: spec.Nodes.GetMaxFlakeAttempts(), @@ -136,10 +141,14 @@ func (g *group) evaluateSkipStatus(spec Spec) (types.SpecState, types.Failure) { if !g.suite.deadline.IsZero() && g.suite.deadline.Before(time.Now()) { return types.SpecStateSkipped, types.Failure{} } - if !g.succeeded { + if !g.succeeded && !g.continueOnFailure { return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), "Spec skipped because an earlier spec in an ordered container failed") } + if g.failedInARunOnceBefore && g.continueOnFailure { + return types.SpecStateSkipped, g.suite.failureForLeafNodeWithMessage(spec.FirstNodeWithType(types.NodeTypeIt), + "Spec skipped because a BeforeAll node failed") + } beforeOncePairs := g.runOncePairs[spec.SubjectID()].withType(types.NodeTypeBeforeAll | types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach) for _, pair := range beforeOncePairs { if g.runOnceTracker[pair].Is(types.SpecStateSkipped) { @@ -167,7 +176,8 @@ func (g *group) isLastSpecWithPair(specID uint, pair runOncePair) bool { return lastSpecID == specID } -func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { +func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) bool { + failedInARunOnceBefore := false pairs := g.runOncePairs[spec.SubjectID()] nodes := spec.Nodes.WithType(types.NodeTypeBeforeAll) @@ -193,6 +203,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { } if g.suite.currentSpecReport.State != types.SpecStatePassed { terminatingNode, terminatingPair = node, oncePair + failedInARunOnceBefore = !terminatingPair.isZero() break } } @@ -215,7 +226,7 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { //this node has already been run on this attempt, don't rerun it return false } - pair := runOncePair{} + var pair runOncePair switch node.NodeType { case types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll: // check if we were generated in an AfterNode that has already run @@ -245,9 +256,13 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { if !terminatingPair.isZero() && terminatingNode.NestingLevel == node.NestingLevel { return true //...or, a run-once node at our nesting level was skipped which means this is our last chance to run } - case types.SpecStateFailed, types.SpecStatePanicked: // the spec has failed... + case types.SpecStateFailed, types.SpecStatePanicked, types.SpecStateTimedout: // the spec has failed... if isFinalAttempt { - return true //...if this was the last attempt then we're the last spec to run and so the AfterNode should run + if g.continueOnFailure { + return isLastSpecWithPair || failedInARunOnceBefore //...we're configured to continue on failures - so we should only run if we're the last spec for this pair or if we failed in a runOnceBefore (which means we _are_ the last spec to run) + } else { + return true //...this was the last attempt and continueOnFailure is false therefore we are the last spec to run and so the AfterNode should run + } } if !terminatingPair.isZero() { // ...and it failed in a run-once. which will be running again if node.NodeType.Is(types.NodeTypeCleanupAfterEach | types.NodeTypeCleanupAfterAll) { @@ -280,10 +295,12 @@ func (g *group) attemptSpec(isFinalAttempt bool, spec Spec) { includeDeferCleanups = true } + return failedInARunOnceBefore } func (g *group) run(specs Specs) { g.specs = specs + g.continueOnFailure = specs[0].Nodes.FirstNodeMarkedOrdered().MarkedContinueOnFailure for _, spec := range g.specs { g.runOncePairs[spec.SubjectID()] = runOncePairsForSpec(spec) } @@ -300,8 +317,8 @@ func (g *group) run(specs Specs) { skip := g.suite.config.DryRun || g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates|types.SpecStateSkipped|types.SpecStatePending) g.suite.currentSpecReport.StartTime = time.Now() + failedInARunOnceBefore := false if !skip { - var maxAttempts = 1 if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { @@ -319,14 +336,14 @@ func (g *group) run(specs Specs) { g.suite.outputInterceptor.StartInterceptingOutput() if attempt > 0 { if g.suite.currentSpecReport.MaxMustPassRepeatedly > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Passed. Repeating...\n", attempt) + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRepeat, Attempt: attempt}) } if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { - fmt.Fprintf(g.suite.writer, "\nGinkgo: Attempt #%d Failed. Retrying...\n", attempt) + g.suite.handleSpecEvent(types.SpecEvent{SpecEventType: types.SpecEventSpecRetry, Attempt: attempt}) } } - g.attemptSpec(attempt == maxAttempts-1, spec) + failedInARunOnceBefore = g.attemptSpec(attempt == maxAttempts-1, spec) g.suite.currentSpecReport.EndTime = time.Now() g.suite.currentSpecReport.RunTime = g.suite.currentSpecReport.EndTime.Sub(g.suite.currentSpecReport.StartTime) @@ -341,6 +358,10 @@ func (g *group) run(specs Specs) { if g.suite.currentSpecReport.MaxFlakeAttempts > 0 { if g.suite.currentSpecReport.State.Is(types.SpecStatePassed | types.SpecStateSkipped | types.SpecStateAborted | types.SpecStateInterrupted) { break + } else if attempt < maxAttempts-1 { + af := types.AdditionalFailure{State: g.suite.currentSpecReport.State, Failure: g.suite.currentSpecReport.Failure} + af.Failure.Message = fmt.Sprintf("Failure recorded during attempt %d:\n%s", attempt+1, af.Failure.Message) + g.suite.currentSpecReport.AdditionalFailures = append(g.suite.currentSpecReport.AdditionalFailures, af) } } } @@ -350,6 +371,7 @@ func (g *group) run(specs Specs) { g.suite.processCurrentSpecReport() if g.suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { g.succeeded = false + g.failedInARunOnceBefore = g.failedInARunOnceBefore || failedInARunOnceBefore } g.suite.selectiveLock.Lock() g.suite.currentSpecReport = types.SpecReport{} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go index ac6f51040..8ed86111f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/interrupt_handler/interrupt_handler.go @@ -10,7 +10,7 @@ import ( "github.com/onsi/ginkgo/v2/internal/parallel_support" ) -const ABORT_POLLING_INTERVAL = 500 * time.Millisecond +var ABORT_POLLING_INTERVAL = 500 * time.Millisecond type InterruptCause uint @@ -62,13 +62,14 @@ type InterruptHandlerInterface interface { } type InterruptHandler struct { - c chan interface{} - lock *sync.Mutex - level InterruptLevel - cause InterruptCause - client parallel_support.Client - stop chan interface{} - signals []os.Signal + c chan interface{} + lock *sync.Mutex + level InterruptLevel + cause InterruptCause + client parallel_support.Client + stop chan interface{} + signals []os.Signal + requestAbortCheck chan interface{} } func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) *InterruptHandler { @@ -76,11 +77,12 @@ func NewInterruptHandler(client parallel_support.Client, signals ...os.Signal) * signals = []os.Signal{os.Interrupt, syscall.SIGTERM} } handler := &InterruptHandler{ - c: make(chan interface{}), - lock: &sync.Mutex{}, - stop: make(chan interface{}), - client: client, - signals: signals, + c: make(chan interface{}), + lock: &sync.Mutex{}, + stop: make(chan interface{}), + requestAbortCheck: make(chan interface{}), + client: client, + signals: signals, } handler.registerForInterrupts() return handler @@ -109,6 +111,12 @@ func (handler *InterruptHandler) registerForInterrupts() { pollTicker.Stop() return } + case <-handler.requestAbortCheck: + if handler.client.ShouldAbort() { + close(abortChannel) + pollTicker.Stop() + return + } case <-handler.stop: pollTicker.Stop() return @@ -152,11 +160,18 @@ func (handler *InterruptHandler) registerForInterrupts() { func (handler *InterruptHandler) Status() InterruptStatus { handler.lock.Lock() - defer handler.lock.Unlock() - - return InterruptStatus{ + status := InterruptStatus{ Level: handler.level, Channel: handler.c, Cause: handler.cause, } + handler.lock.Unlock() + + if handler.client != nil && handler.client.ShouldAbort() && !status.Interrupted() { + close(handler.requestAbortCheck) + <-status.Channel + return handler.Status() + } + + return status } diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/node.go b/vendor/github.com/onsi/ginkgo/v2/internal/node.go index 9eb835e9d..14c7cf54e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/node.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/node.go @@ -44,23 +44,23 @@ type Node struct { SynchronizedAfterSuiteProc1Body func(SpecContext) SynchronizedAfterSuiteProc1BodyHasContext bool - ReportEachBody func(types.SpecReport) - ReportAfterSuiteBody func(types.Report) - - MarkedFocus bool - MarkedPending bool - MarkedSerial bool - MarkedOrdered bool - MarkedOncePerOrdered bool - MarkedSuppressProgressReporting bool - FlakeAttempts int - MustPassRepeatedly int - Labels Labels - PollProgressAfter time.Duration - PollProgressInterval time.Duration - NodeTimeout time.Duration - SpecTimeout time.Duration - GracePeriod time.Duration + ReportEachBody func(types.SpecReport) + ReportSuiteBody func(types.Report) + + MarkedFocus bool + MarkedPending bool + MarkedSerial bool + MarkedOrdered bool + MarkedContinueOnFailure bool + MarkedOncePerOrdered bool + FlakeAttempts int + MustPassRepeatedly int + Labels Labels + PollProgressAfter time.Duration + PollProgressInterval time.Duration + NodeTimeout time.Duration + SpecTimeout time.Duration + GracePeriod time.Duration NodeIDWhereCleanupWasGenerated uint } @@ -70,6 +70,7 @@ type focusType bool type pendingType bool type serialType bool type orderedType bool +type continueOnFailureType bool type honorsOrderedType bool type suppressProgressReporting bool @@ -77,6 +78,7 @@ const Focus = focusType(true) const Pending = pendingType(true) const Serial = serialType(true) const Ordered = orderedType(true) +const ContinueOnFailure = continueOnFailureType(true) const OncePerOrdered = honorsOrderedType(true) const SuppressProgressReporting = suppressProgressReporting(true) @@ -91,6 +93,10 @@ type NodeTimeout time.Duration type SpecTimeout time.Duration type GracePeriod time.Duration +func (l Labels) MatchesLabelFilter(query string) bool { + return types.MustParseLabelFilter(query)(l) +} + func UnionOfLabels(labels ...Labels) Labels { out := Labels{} seen := map[string]bool{} @@ -134,6 +140,8 @@ func isDecoration(arg interface{}) bool { return true case t == reflect.TypeOf(Ordered): return true + case t == reflect.TypeOf(ContinueOnFailure): + return true case t == reflect.TypeOf(OncePerOrdered): return true case t == reflect.TypeOf(SuppressProgressReporting): @@ -242,16 +250,18 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy if !nodeType.Is(types.NodeTypeContainer) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "Ordered")) } + case t == reflect.TypeOf(ContinueOnFailure): + node.MarkedContinueOnFailure = bool(arg.(continueOnFailureType)) + if !nodeType.Is(types.NodeTypeContainer) { + appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "ContinueOnFailure")) + } case t == reflect.TypeOf(OncePerOrdered): node.MarkedOncePerOrdered = bool(arg.(honorsOrderedType)) if !nodeType.Is(types.NodeTypeBeforeEach | types.NodeTypeJustBeforeEach | types.NodeTypeAfterEach | types.NodeTypeJustAfterEach) { appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "OncePerOrdered")) } case t == reflect.TypeOf(SuppressProgressReporting): - node.MarkedSuppressProgressReporting = bool(arg.(suppressProgressReporting)) - if nodeType.Is(types.NodeTypeContainer) { - appendError(types.GinkgoErrors.InvalidDecoratorForNodeType(node.CodeLocation, nodeType, "SuppressProgressReporting")) - } + deprecationTracker.TrackDeprecation(types.Deprecations.SuppressProgressReporting()) case t == reflect.TypeOf(FlakeAttempts(0)): node.FlakeAttempts = int(arg.(FlakeAttempts)) if !nodeType.Is(types.NodeTypesForContainerAndIt) { @@ -321,9 +331,9 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy trackedFunctionError = true break } - } else if nodeType.Is(types.NodeTypeReportAfterSuite) { - if node.ReportAfterSuiteBody == nil { - node.ReportAfterSuiteBody = arg.(func(types.Report)) + } else if nodeType.Is(types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { + if node.ReportSuiteBody == nil { + node.ReportSuiteBody = arg.(func(types.Report)) } else { appendError(types.GinkgoErrors.MultipleBodyFunctions(node.CodeLocation, nodeType)) trackedFunctionError = true @@ -390,13 +400,17 @@ func NewNode(deprecationTracker *types.DeprecationTracker, nodeType types.NodeTy appendError(types.GinkgoErrors.InvalidDeclarationOfFocusedAndPending(node.CodeLocation, nodeType)) } + if node.MarkedContinueOnFailure && !node.MarkedOrdered { + appendError(types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation)) + } + hasContext := node.HasContext || node.SynchronizedAfterSuiteProc1BodyHasContext || node.SynchronizedAfterSuiteAllProcsBodyHasContext || node.SynchronizedBeforeSuiteProc1BodyHasContext || node.SynchronizedBeforeSuiteAllProcsBodyHasContext if !hasContext && (node.NodeTimeout > 0 || node.SpecTimeout > 0 || node.GracePeriod > 0) && len(errors) == 0 { appendError(types.GinkgoErrors.InvalidTimeoutOrGracePeriodForNonContextNode(node.CodeLocation, nodeType)) } - if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { + if !node.NodeType.Is(types.NodeTypeReportBeforeEach|types.NodeTypeReportAfterEach|types.NodeTypeSynchronizedBeforeSuite|types.NodeTypeSynchronizedAfterSuite|types.NodeTypeReportBeforeSuite|types.NodeTypeReportAfterSuite) && node.Body == nil && !node.MarkedPending && !trackedFunctionError { appendError(types.GinkgoErrors.MissingBodyFunction(node.CodeLocation, nodeType)) } @@ -861,6 +875,15 @@ func (n Nodes) FirstNodeMarkedOrdered() Node { return Node{} } +func (n Nodes) IndexOfFirstNodeMarkedOrdered() int { + for i := range n { + if n[i].MarkedOrdered { + return i + } + } + return -1 +} + func (n Nodes) GetMaxFlakeAttempts() int { maxFlakeAttempts := 0 for i := range n { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go index 161be820c..84eea0a59 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/ordering.go @@ -7,6 +7,65 @@ import ( "github.com/onsi/ginkgo/v2/types" ) +type SortableSpecs struct { + Specs Specs + Indexes []int +} + +func NewSortableSpecs(specs Specs) *SortableSpecs { + indexes := make([]int, len(specs)) + for i := range specs { + indexes[i] = i + } + return &SortableSpecs{ + Specs: specs, + Indexes: indexes, + } +} +func (s *SortableSpecs) Len() int { return len(s.Indexes) } +func (s *SortableSpecs) Swap(i, j int) { s.Indexes[i], s.Indexes[j] = s.Indexes[j], s.Indexes[i] } +func (s *SortableSpecs) Less(i, j int) bool { + a, b := s.Specs[s.Indexes[i]], s.Specs[s.Indexes[j]] + + aNodes, bNodes := a.Nodes.WithType(types.NodeTypesForContainerAndIt), b.Nodes.WithType(types.NodeTypesForContainerAndIt) + + firstOrderedAIdx, firstOrderedBIdx := aNodes.IndexOfFirstNodeMarkedOrdered(), bNodes.IndexOfFirstNodeMarkedOrdered() + if firstOrderedAIdx > -1 && firstOrderedBIdx > -1 && aNodes[firstOrderedAIdx].ID == bNodes[firstOrderedBIdx].ID { + // strictly preserve order within an ordered containers. ID will track this as IDs are generated monotonically + return aNodes.FirstNodeWithType(types.NodeTypeIt).ID < bNodes.FirstNodeWithType(types.NodeTypeIt).ID + } + + // if either spec is in an ordered container - only use the nodes up to the outermost ordered container + if firstOrderedAIdx > -1 { + aNodes = aNodes[:firstOrderedAIdx+1] + } + if firstOrderedBIdx > -1 { + bNodes = bNodes[:firstOrderedBIdx+1] + } + + for i := 0; i < len(aNodes) && i < len(bNodes); i++ { + aCL, bCL := aNodes[i].CodeLocation, bNodes[i].CodeLocation + if aCL.FileName != bCL.FileName { + return aCL.FileName < bCL.FileName + } + if aCL.LineNumber != bCL.LineNumber { + return aCL.LineNumber < bCL.LineNumber + } + } + // either everything is equal or we have different lengths of CLs + if len(aNodes) != len(bNodes) { + return len(aNodes) < len(bNodes) + } + // ok, now we are sure everything was equal. so we use the spec text to break ties + for i := 0; i < len(aNodes); i++ { + if aNodes[i].Text != bNodes[i].Text { + return aNodes[i].Text < bNodes[i].Text + } + } + // ok, all those texts were equal. we'll use the ID of the most deeply nested node as a last resort + return aNodes[len(aNodes)-1].ID < bNodes[len(bNodes)-1].ID +} + type GroupedSpecIndices []SpecIndices type SpecIndices []int @@ -28,12 +87,17 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // Seed a new random source based on thee configured random seed. r := rand.New(rand.NewSource(suiteConfig.RandomSeed)) - // first break things into execution groups + // first, we sort the entire suite to ensure a deterministic order. the sort is performed by filename, then line number, and then spec text. this ensures every parallel process has the exact same spec order and is only necessary to cover the edge case where the user iterates over a map to generate specs. + sortableSpecs := NewSortableSpecs(specs) + sort.Sort(sortableSpecs) + + // then we break things into execution groups // a group represents a single unit of execution and is a collection of SpecIndices // usually a group is just a single spec, however ordered containers must be preserved as a single group executionGroupIDs := []uint{} executionGroups := map[uint]SpecIndices{} - for idx, spec := range specs { + for _, idx := range sortableSpecs.Indexes { + spec := specs[idx] groupNode := spec.Nodes.FirstNodeMarkedOrdered() if groupNode.IsZero() { groupNode = spec.Nodes.FirstNodeWithType(types.NodeTypeIt) @@ -48,7 +112,6 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, // we shuffle outermost containers. so we need to form shufflable groupings of GroupIDs shufflableGroupingIDs := []uint{} shufflableGroupingIDToGroupIDs := map[uint][]uint{} - shufflableGroupingsIDToSortKeys := map[uint]string{} // for each execution group we're going to have to pick a node to represent how the // execution group is grouped for shuffling: @@ -57,7 +120,7 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, nodeTypesToShuffle = types.NodeTypeIt } - //so, fo reach execution group: + //so, for each execution group: for _, groupID := range executionGroupIDs { // pick out a representative spec representativeSpec := specs[executionGroups[groupID][0]] @@ -72,22 +135,9 @@ func OrderSpecs(specs Specs, suiteConfig types.SuiteConfig) (GroupedSpecIndices, if len(shufflableGroupingIDToGroupIDs[shufflableGroupingNode.ID]) == 1 { // record the shuffleable group ID shufflableGroupingIDs = append(shufflableGroupingIDs, shufflableGroupingNode.ID) - // and record the sort key to use - shufflableGroupingsIDToSortKeys[shufflableGroupingNode.ID] = shufflableGroupingNode.CodeLocation.String() } } - // now we sort the shufflable groups by the sort key. We use the shufflable group nodes code location and break ties using its node id - sort.SliceStable(shufflableGroupingIDs, func(i, j int) bool { - keyA := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[i]] - keyB := shufflableGroupingsIDToSortKeys[shufflableGroupingIDs[j]] - if keyA == keyB { - return shufflableGroupingIDs[i] < shufflableGroupingIDs[j] - } else { - return keyA < keyB - } - }) - // now we permute the sorted shufflable grouping IDs and build the ordered Groups orderedGroups := GroupedSpecIndices{} permutation := r.Perm(len(shufflableGroupingIDs)) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go index f5ae15b8b..8a237f446 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/output_interceptor_unix.go @@ -26,6 +26,17 @@ func (impl *dupSyscallOutputInterceptorImpl) CreateStdoutStderrClones() (*os.Fil stdoutCloneFD, _ := unix.Dup(1) stderrCloneFD, _ := unix.Dup(2) + // Important, set the fds to FD_CLOEXEC to prevent them leaking into childs + // https://github.com/onsi/ginkgo/issues/1191 + flags, err := unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stdoutCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + flags, err = unix.FcntlInt(uintptr(stderrCloneFD), unix.F_GETFD, 0) + if err == nil { + unix.FcntlInt(uintptr(stderrCloneFD), unix.F_SETFD, flags|unix.FD_CLOEXEC) + } + // And then wrap the clone file descriptors in files. // One benefit of this (that we don't use yet) is that we can actually write // to these files to emit output to the console even though we're intercepting output diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go index b417bf5b3..b3cd64292 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/client_server.go @@ -42,6 +42,8 @@ type Client interface { PostSuiteWillBegin(report types.Report) error PostDidRun(report types.SpecReport) error PostSuiteDidEnd(report types.Report) error + PostReportBeforeSuiteCompleted(state types.SpecState) error + BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error BlockUntilSynchronizedBeforeSuiteData() (types.SpecState, []byte, error) BlockUntilNonprimaryProcsHaveFinished() error diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go index ad9932f2a..6547c7a66 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_client.go @@ -98,6 +98,19 @@ func (client *httpClient) PostEmitProgressReport(report types.ProgressReport) er return client.post("/progress-report", report) } +func (client *httpClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.post("/report-before-suite-completed", state) +} + +func (client *httpClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("/report-before-suite-state", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *httpClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go index fa3ac682a..d2c71ab1b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/http_server.go @@ -26,7 +26,7 @@ type httpServer struct { handler *ServerHandler } -//Create a new server, automatically selecting a port +// Create a new server, automatically selecting a port func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, error) { listener, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -38,7 +38,7 @@ func newHttpServer(parallelTotal int, reporter reporters.Reporter) (*httpServer, }, nil } -//Start the server. You don't need to `go s.Start()`, just `s.Start()` +// Start the server. You don't need to `go s.Start()`, just `s.Start()` func (server *httpServer) Start() { httpServer := &http.Server{} mux := http.NewServeMux() @@ -52,6 +52,8 @@ func (server *httpServer) Start() { mux.HandleFunc("/progress-report", server.emitProgressReport) //synchronization endpoints + mux.HandleFunc("/report-before-suite-completed", server.handleReportBeforeSuiteCompleted) + mux.HandleFunc("/report-before-suite-state", server.handleReportBeforeSuiteState) mux.HandleFunc("/before-suite-completed", server.handleBeforeSuiteCompleted) mux.HandleFunc("/before-suite-state", server.handleBeforeSuiteState) mux.HandleFunc("/have-nonprimary-procs-finished", server.handleHaveNonprimaryProcsFinished) @@ -63,12 +65,12 @@ func (server *httpServer) Start() { go httpServer.Serve(server.listener) } -//Stop the server +// Stop the server func (server *httpServer) Close() { server.listener.Close() } -//The address the server can be reached it. Pass this into the `ForwardingReporter`. +// The address the server can be reached it. Pass this into the `ForwardingReporter`. func (server *httpServer) Address() string { return "http://" + server.listener.Addr().String() } @@ -93,7 +95,7 @@ func (server *httpServer) RegisterAlive(node int, alive func() bool) { // Streaming Endpoints // -//The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` +// The server will forward all received messages to Ginkgo reporters registered with `RegisterReporters` func (server *httpServer) decode(writer http.ResponseWriter, request *http.Request, object interface{}) bool { defer request.Body.Close() if json.NewDecoder(request.Body).Decode(object) != nil { @@ -164,6 +166,23 @@ func (server *httpServer) emitProgressReport(writer http.ResponseWriter, request server.handleError(server.handler.EmitProgressReport(report, voidReceiver), writer) } +func (server *httpServer) handleReportBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if !server.decode(writer, request, &state) { + return + } + + server.handleError(server.handler.ReportBeforeSuiteCompleted(state, voidReceiver), writer) +} + +func (server *httpServer) handleReportBeforeSuiteState(writer http.ResponseWriter, request *http.Request) { + var state types.SpecState + if server.handleError(server.handler.ReportBeforeSuiteState(voidSender, &state), writer) { + return + } + json.NewEncoder(writer).Encode(state) +} + func (server *httpServer) handleBeforeSuiteCompleted(writer http.ResponseWriter, request *http.Request) { var beforeSuiteState BeforeSuiteState if !server.decode(writer, request, &beforeSuiteState) { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go index fe93cc2b9..59e8e6fd0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/rpc_client.go @@ -76,6 +76,19 @@ func (client *rpcClient) PostEmitProgressReport(report types.ProgressReport) err return client.client.Call("Server.EmitProgressReport", report, voidReceiver) } +func (client *rpcClient) PostReportBeforeSuiteCompleted(state types.SpecState) error { + return client.client.Call("Server.ReportBeforeSuiteCompleted", state, voidReceiver) +} + +func (client *rpcClient) BlockUntilReportBeforeSuiteCompleted() (types.SpecState, error) { + var state types.SpecState + err := client.poll("Server.ReportBeforeSuiteState", &state) + if err == ErrorGone { + return types.SpecStateFailed, nil + } + return state, err +} + func (client *rpcClient) PostSynchronizedBeforeSuiteCompleted(state types.SpecState, data []byte) error { beforeSuiteState := BeforeSuiteState{ State: state, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go index 7c6e67b96..a6d98793e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/parallel_support/server_handler.go @@ -18,16 +18,17 @@ var voidSender Void // It handles all the business logic to avoid duplication between the two servers type ServerHandler struct { - done chan interface{} - outputDestination io.Writer - reporter reporters.Reporter - alives []func() bool - lock *sync.Mutex - beforeSuiteState BeforeSuiteState - parallelTotal int - counter int - counterLock *sync.Mutex - shouldAbort bool + done chan interface{} + outputDestination io.Writer + reporter reporters.Reporter + alives []func() bool + lock *sync.Mutex + beforeSuiteState BeforeSuiteState + reportBeforeSuiteState types.SpecState + parallelTotal int + counter int + counterLock *sync.Mutex + shouldAbort bool numSuiteDidBegins int numSuiteDidEnds int @@ -37,11 +38,12 @@ type ServerHandler struct { func newServerHandler(parallelTotal int, reporter reporters.Reporter) *ServerHandler { return &ServerHandler{ - reporter: reporter, - lock: &sync.Mutex{}, - counterLock: &sync.Mutex{}, - alives: make([]func() bool, parallelTotal), - beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + reporter: reporter, + lock: &sync.Mutex{}, + counterLock: &sync.Mutex{}, + alives: make([]func() bool, parallelTotal), + beforeSuiteState: BeforeSuiteState{Data: nil, State: types.SpecStateInvalid}, + parallelTotal: parallelTotal, outputDestination: os.Stdout, done: make(chan interface{}), @@ -140,6 +142,29 @@ func (handler *ServerHandler) haveNonprimaryProcsFinished() bool { return true } +func (handler *ServerHandler) ReportBeforeSuiteCompleted(reportBeforeSuiteState types.SpecState, _ *Void) error { + handler.lock.Lock() + defer handler.lock.Unlock() + handler.reportBeforeSuiteState = reportBeforeSuiteState + + return nil +} + +func (handler *ServerHandler) ReportBeforeSuiteState(_ Void, reportBeforeSuiteState *types.SpecState) error { + proc1IsAlive := handler.procIsAlive(1) + handler.lock.Lock() + defer handler.lock.Unlock() + if handler.reportBeforeSuiteState == types.SpecStateInvalid { + if proc1IsAlive { + return ErrorEarly + } else { + return ErrorGone + } + } + *reportBeforeSuiteState = handler.reportBeforeSuiteState + return nil +} + func (handler *ServerHandler) BeforeSuiteCompleted(beforeSuiteState BeforeSuiteState, _ *Void) error { handler.lock.Lock() defer handler.lock.Unlock() diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go index 345db544b..11269cf1f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_report.go @@ -48,13 +48,10 @@ type ProgressStepCursor struct { StartTime time.Time } -func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep ProgressStepCursor, gwOutput string, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { +func NewProgressReport(isRunningInParallel bool, report types.SpecReport, currentNode Node, currentNodeStartTime time.Time, currentStep types.SpecEvent, gwOutput string, timelineLocation types.TimelineLocation, additionalReports []string, sourceRoots []string, includeAll bool) (types.ProgressReport, error) { pr := types.ProgressReport{ - ParallelProcess: report.ParallelProcess, - RunningInParallel: isRunningInParallel, - - Time: time.Now(), - + ParallelProcess: report.ParallelProcess, + RunningInParallel: isRunningInParallel, ContainerHierarchyTexts: report.ContainerHierarchyTexts, LeafNodeText: report.LeafNodeText, LeafNodeLocation: report.LeafNodeLocation, @@ -65,14 +62,14 @@ func NewProgressReport(isRunningInParallel bool, report types.SpecReport, curren CurrentNodeLocation: currentNode.CodeLocation, CurrentNodeStartTime: currentNodeStartTime, - CurrentStepText: currentStep.Text, + CurrentStepText: currentStep.Message, CurrentStepLocation: currentStep.CodeLocation, - CurrentStepStartTime: currentStep.StartTime, + CurrentStepStartTime: currentStep.TimelineLocation.Time, AdditionalReports: additionalReports, CapturedGinkgoWriterOutput: gwOutput, - GinkgoWriterOffset: len(gwOutput), + TimelineLocation: timelineLocation, } goroutines, err := extractRunningGoroutines() @@ -186,7 +183,6 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { break } } - r := bufio.NewReader(bytes.NewReader(stack)) out := []types.Goroutine{} idx := -1 @@ -234,12 +230,12 @@ func extractRunningGoroutines() ([]types.Goroutine, error) { return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid function call: %s -- missing file name and line number", functionCall.Function)) } line = strings.TrimLeft(line, " \t") - fields := strings.SplitN(line, ":", 2) - if len(fields) != 2 { - return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename nad line number: %s", line)) + delimiterIdx := strings.LastIndex(line, ":") + if delimiterIdx == -1 { + return nil, types.GinkgoErrors.FailedToParseStackTrace(fmt.Sprintf("Invalid filename and line number: %s", line)) } - functionCall.Filename = fields[0] - line = strings.Split(fields[1], " ")[0] + functionCall.Filename = line[:delimiterIdx] + line = strings.Split(line[delimiterIdx+1:], " ")[0] lineNumber, err := strconv.ParseInt(line, 10, 64) functionCall.Line = int(lineNumber) if err != nil { diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go new file mode 100644 index 000000000..2c6e260f7 --- /dev/null +++ b/vendor/github.com/onsi/ginkgo/v2/internal/progress_reporter_manager.go @@ -0,0 +1,79 @@ +package internal + +import ( + "context" + "sort" + "strings" + "sync" + + "github.com/onsi/ginkgo/v2/types" +) + +type ProgressReporterManager struct { + lock *sync.Mutex + progressReporters map[int]func() string + prCounter int +} + +func NewProgressReporterManager() *ProgressReporterManager { + return &ProgressReporterManager{ + progressReporters: map[int]func() string{}, + lock: &sync.Mutex{}, + } +} + +func (prm *ProgressReporterManager) AttachProgressReporter(reporter func() string) func() { + prm.lock.Lock() + defer prm.lock.Unlock() + prm.prCounter += 1 + prCounter := prm.prCounter + prm.progressReporters[prCounter] = reporter + + return func() { + prm.lock.Lock() + defer prm.lock.Unlock() + delete(prm.progressReporters, prCounter) + } +} + +func (prm *ProgressReporterManager) QueryProgressReporters(ctx context.Context, failer *Failer) []string { + prm.lock.Lock() + keys := []int{} + for key := range prm.progressReporters { + keys = append(keys, key) + } + sort.Ints(keys) + reporters := []func() string{} + for _, key := range keys { + reporters = append(reporters, prm.progressReporters[key]) + } + prm.lock.Unlock() + + if len(reporters) == 0 { + return nil + } + out := []string{} + for _, reporter := range reporters { + reportC := make(chan string, 1) + go func() { + defer func() { + e := recover() + if e != nil { + failer.Panic(types.NewCodeLocationWithStackTrace(1), e) + reportC <- "failed to query attached progress reporter" + } + }() + reportC <- reporter() + }() + var report string + select { + case report = <-reportC: + case <-ctx.Done(): + return out + } + if strings.TrimSpace(report) != "" { + out = append(out, report) + } + } + return out +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go index 74199f395..cc351a39b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/report_entry.go @@ -1,7 +1,6 @@ package internal import ( - "reflect" "time" "github.com/onsi/ginkgo/v2/types" @@ -13,20 +12,20 @@ func NewReportEntry(name string, cl types.CodeLocation, args ...interface{}) (Re out := ReportEntry{ Visibility: types.ReportEntryVisibilityAlways, Name: name, - Time: time.Now(), Location: cl, + Time: time.Now(), } var didSetValue = false for _, arg := range args { - switch reflect.TypeOf(arg) { - case reflect.TypeOf(types.ReportEntryVisibilityAlways): - out.Visibility = arg.(types.ReportEntryVisibility) - case reflect.TypeOf(types.CodeLocation{}): - out.Location = arg.(types.CodeLocation) - case reflect.TypeOf(Offset(0)): - out.Location = types.NewCodeLocation(2 + int(arg.(Offset))) - case reflect.TypeOf(out.Time): - out.Time = arg.(time.Time) + switch x := arg.(type) { + case types.ReportEntryVisibility: + out.Visibility = x + case types.CodeLocation: + out.Location = x + case Offset: + out.Location = types.NewCodeLocation(2 + int(x)) + case time.Time: + out.Time = x default: if didSetValue { return ReportEntry{}, types.GinkgoErrors.TooManyReportEntryValues(out.Location, arg) diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go index 8f569dd35..2515b84a1 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/spec_context.go @@ -2,8 +2,6 @@ package internal import ( "context" - "sort" - "sync" "github.com/onsi/ginkgo/v2/types" ) @@ -17,11 +15,9 @@ type SpecContext interface { type specContext struct { context.Context + *ProgressReporterManager - cancel context.CancelFunc - lock *sync.Mutex - progressReporters map[int]func() string - prCounter int + cancel context.CancelFunc suite *Suite } @@ -36,11 +32,9 @@ This is because Ginkgo needs finer control over when the context is canceled. S func NewSpecContext(suite *Suite) *specContext { ctx, cancel := context.WithCancel(context.Background()) sc := &specContext{ - cancel: cancel, - suite: suite, - lock: &sync.Mutex{}, - prCounter: 0, - progressReporters: map[int]func() string{}, + cancel: cancel, + suite: suite, + ProgressReporterManager: NewProgressReporterManager(), } ctx = context.WithValue(ctx, "GINKGO_SPEC_CONTEXT", sc) //yes, yes, the go docs say don't use a string for a key... but we'd rather avoid a circular dependency between Gomega and Ginkgo sc.Context = ctx //thank goodness for garbage collectors that can handle circular dependencies @@ -51,40 +45,3 @@ func NewSpecContext(suite *Suite) *specContext { func (sc *specContext) SpecReport() types.SpecReport { return sc.suite.CurrentSpecReport() } - -func (sc *specContext) AttachProgressReporter(reporter func() string) func() { - sc.lock.Lock() - defer sc.lock.Unlock() - sc.prCounter += 1 - prCounter := sc.prCounter - sc.progressReporters[prCounter] = reporter - - return func() { - sc.lock.Lock() - defer sc.lock.Unlock() - delete(sc.progressReporters, prCounter) - } -} - -func (sc *specContext) QueryProgressReporters() []string { - sc.lock.Lock() - keys := []int{} - for key := range sc.progressReporters { - keys = append(keys, key) - } - sort.Ints(keys) - reporters := []func() string{} - for _, key := range keys { - reporters = append(reporters, sc.progressReporters[key]) - } - sc.lock.Unlock() - - if len(reporters) == 0 { - return nil - } - out := []string{} - for _, reporter := range reporters { - out = append(out, reporter()) - } - return out -} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go index 432bd217b..60c913d89 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/suite.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/suite.go @@ -9,6 +9,7 @@ import ( "github.com/onsi/ginkgo/v2/internal/parallel_support" "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" + "golang.org/x/net/context" ) type Phase uint @@ -19,10 +20,14 @@ const ( PhaseRun ) +var PROGRESS_REPORTER_DEADLING = 5 * time.Second + type Suite struct { tree *TreeNode topLevelContainers Nodes + *ProgressReporterManager + phase Phase suiteNodes Nodes @@ -44,7 +49,8 @@ type Suite struct { currentSpecContext *specContext - progressStepCursor ProgressStepCursor + currentByStep types.SpecEvent + timelineOrder int /* We don't need to lock around all operations. Just those that *could* happen concurrently. @@ -63,8 +69,9 @@ type Suite struct { func NewSuite() *Suite { return &Suite{ - tree: &TreeNode{}, - phase: PhaseBuildTopLevel, + tree: &TreeNode{}, + phase: PhaseBuildTopLevel, + ProgressReporterManager: NewProgressReporterManager(), selectiveLock: &sync.Mutex{}, } @@ -128,7 +135,7 @@ func (suite *Suite) PushNode(node Node) error { return suite.pushCleanupNode(node) } - if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeReportAfterSuite) { + if node.NodeType.Is(types.NodeTypeBeforeSuite | types.NodeTypeAfterSuite | types.NodeTypeSynchronizedBeforeSuite | types.NodeTypeSynchronizedAfterSuite | types.NodeTypeBeforeSuite | types.NodeTypeReportBeforeSuite | types.NodeTypeReportAfterSuite) { return suite.pushSuiteNode(node) } @@ -150,6 +157,13 @@ func (suite *Suite) PushNode(node Node) error { } } + if node.MarkedContinueOnFailure { + firstOrderedNode := suite.tree.AncestorNodeChain().FirstNodeMarkedOrdered() + if !firstOrderedNode.IsZero() { + return types.GinkgoErrors.InvalidContinueOnFailureDecoration(node.CodeLocation) + } + } + if node.NodeType == types.NodeTypeContainer { // During PhaseBuildTopLevel we only track the top level containers without entering them // We only enter the top level container nodes during PhaseBuildTree @@ -221,7 +235,7 @@ func (suite *Suite) pushCleanupNode(node Node) error { node.NodeType = types.NodeTypeCleanupAfterSuite case types.NodeTypeBeforeAll, types.NodeTypeAfterAll: node.NodeType = types.NodeTypeCleanupAfterAll - case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportAfterSuite: + case types.NodeTypeReportBeforeEach, types.NodeTypeReportAfterEach, types.NodeTypeReportBeforeSuite, types.NodeTypeReportAfterSuite: return types.GinkgoErrors.PushingCleanupInReportingNode(node.CodeLocation, suite.currentNode.NodeType) case types.NodeTypeCleanupInvalid, types.NodeTypeCleanupAfterEach, types.NodeTypeCleanupAfterAll, types.NodeTypeCleanupAfterSuite: return types.GinkgoErrors.PushingCleanupInCleanupNode(node.CodeLocation) @@ -236,19 +250,69 @@ func (suite *Suite) pushCleanupNode(node Node) error { return nil } -/* - Pushing and popping the Step Cursor stack -*/ - -func (suite *Suite) SetProgressStepCursor(cursor ProgressStepCursor) { +func (suite *Suite) generateTimelineLocation() types.TimelineLocation { suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() - suite.progressStepCursor = cursor + suite.timelineOrder += 1 + return types.TimelineLocation{ + Offset: len(suite.currentSpecReport.CapturedGinkgoWriterOutput) + suite.writer.Len(), + Order: suite.timelineOrder, + Time: time.Now(), + } +} + +func (suite *Suite) handleSpecEvent(event types.SpecEvent) types.SpecEvent { + event.TimelineLocation = suite.generateTimelineLocation() + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) + return event +} + +func (suite *Suite) handleSpecEventEnd(eventType types.SpecEventType, startEvent types.SpecEvent) { + event := startEvent + event.SpecEventType = eventType + event.TimelineLocation = suite.generateTimelineLocation() + event.Duration = event.TimelineLocation.Time.Sub(startEvent.TimelineLocation.Time) + suite.selectiveLock.Lock() + suite.currentSpecReport.SpecEvents = append(suite.currentSpecReport.SpecEvents, event) + suite.selectiveLock.Unlock() + suite.reporter.EmitSpecEvent(event) +} + +func (suite *Suite) By(text string, callback ...func()) error { + cl := types.NewCodeLocation(2) + if suite.phase != PhaseRun { + return types.GinkgoErrors.ByNotDuringRunPhase(cl) + } + + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventByStart, + CodeLocation: cl, + Message: text, + }) + suite.selectiveLock.Lock() + suite.currentByStep = event + suite.selectiveLock.Unlock() + + if len(callback) == 1 { + defer func() { + suite.selectiveLock.Lock() + suite.currentByStep = types.SpecEvent{} + suite.selectiveLock.Unlock() + suite.handleSpecEventEnd(types.SpecEventByEnd, event) + }() + callback[0]() + } else if len(callback) > 1 { + panic("just one callback per By, please") + } + return nil } /* - Spec Running methods - used during PhaseRun +Spec Running methods - used during PhaseRun */ func (suite *Suite) CurrentSpecReport() types.SpecReport { suite.selectiveLock.Lock() @@ -263,27 +327,32 @@ func (suite *Suite) CurrentSpecReport() types.SpecReport { } func (suite *Suite) AddReportEntry(entry ReportEntry) error { - suite.selectiveLock.Lock() - defer suite.selectiveLock.Unlock() if suite.phase != PhaseRun { return types.GinkgoErrors.AddReportEntryNotDuringRunPhase(entry.Location) } + entry.TimelineLocation = suite.generateTimelineLocation() + entry.Time = entry.TimelineLocation.Time + suite.selectiveLock.Lock() suite.currentSpecReport.ReportEntries = append(suite.currentSpecReport.ReportEntries, entry) + suite.selectiveLock.Unlock() + suite.reporter.EmitReportEntry(entry) return nil } func (suite *Suite) generateProgressReport(fullReport bool) types.ProgressReport { + timelineLocation := suite.generateTimelineLocation() suite.selectiveLock.Lock() defer suite.selectiveLock.Unlock() + deadline, cancel := context.WithTimeout(context.Background(), PROGRESS_REPORTER_DEADLING) + defer cancel() var additionalReports []string if suite.currentSpecContext != nil { - additionalReports = suite.currentSpecContext.QueryProgressReporters() + additionalReports = append(additionalReports, suite.currentSpecContext.QueryProgressReporters(deadline, suite.failer)...) } - stepCursor := suite.progressStepCursor - + additionalReports = append(additionalReports, suite.QueryProgressReporters(deadline, suite.failer)...) gwOutput := suite.currentSpecReport.CapturedGinkgoWriterOutput + string(suite.writer.Bytes()) - pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, stepCursor, gwOutput, additionalReports, suite.config.SourceRoots, fullReport) + pr, err := NewProgressReport(suite.isRunningInParallel(), suite.currentSpecReport, suite.currentNode, suite.currentNodeStartTime, suite.currentByStep, gwOutput, timelineLocation, additionalReports, suite.config.SourceRoots, fullReport) if err != nil { fmt.Printf("{{red}}Failed to generate progress report:{{/}}\n%s\n", err.Error()) @@ -355,7 +424,13 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } suite.report.SuiteSucceeded = true - suite.runBeforeSuite(numSpecsThatWillBeRun) + + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportBeforeSuite) + + ranBeforeSuite := suite.report.SuiteSucceeded + if suite.report.SuiteSucceeded { + suite.runBeforeSuite(numSpecsThatWillBeRun) + } if suite.report.SuiteSucceeded { groupedSpecIndices, serialGroupedSpecIndices := OrderSpecs(specs, suite.config) @@ -394,7 +469,9 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s } } - suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + if ranBeforeSuite { + suite.runAfterSuiteCleanup(numSpecsThatWillBeRun) + } interruptStatus := suite.interruptHandler.Status() if interruptStatus.Interrupted() { @@ -408,9 +485,7 @@ func (suite *Suite) runSpecs(description string, suiteLabels Labels, suitePath s suite.report.SuiteSucceeded = false } - if suite.config.ParallelProcess == 1 { - suite.runReportAfterSuite() - } + suite.runReportSuiteNodesIfNeedBe(types.NodeTypeReportAfterSuite) suite.reporter.SuiteDidEnd(suite.report) if suite.isRunningInParallel() { suite.client.PostSuiteDidEnd(suite.report) @@ -424,9 +499,10 @@ func (suite *Suite) runBeforeSuite(numSpecsThatWillBeRun int) { if !beforeSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: beforeSuiteNode.NodeType, - LeafNodeLocation: beforeSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: beforeSuiteNode.NodeType, + LeafNodeLocation: beforeSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() @@ -445,9 +521,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { if !afterSuiteNode.IsZero() && numSpecsThatWillBeRun > 0 { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: afterSuiteNode.NodeType, - LeafNodeLocation: afterSuiteNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: afterSuiteNode.NodeType, + LeafNodeLocation: afterSuiteNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() @@ -461,9 +538,10 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { for _, cleanupNode := range afterSuiteCleanup { suite.selectiveLock.Lock() suite.currentSpecReport = types.SpecReport{ - LeafNodeType: cleanupNode.NodeType, - LeafNodeLocation: cleanupNode.CodeLocation, - ParallelProcess: suite.config.ParallelProcess, + LeafNodeType: cleanupNode.NodeType, + LeafNodeLocation: cleanupNode.CodeLocation, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), } suite.selectiveLock.Unlock() @@ -474,23 +552,6 @@ func (suite *Suite) runAfterSuiteCleanup(numSpecsThatWillBeRun int) { } } -func (suite *Suite) runReportAfterSuite() { - for _, node := range suite.suiteNodes.WithType(types.NodeTypeReportAfterSuite) { - suite.selectiveLock.Lock() - suite.currentSpecReport = types.SpecReport{ - LeafNodeType: node.NodeType, - LeafNodeLocation: node.CodeLocation, - LeafNodeText: node.Text, - ParallelProcess: suite.config.ParallelProcess, - } - suite.selectiveLock.Unlock() - - suite.reporter.WillRun(suite.currentSpecReport) - suite.runReportAfterSuiteNode(node, suite.report) - suite.processCurrentSpecReport() - } -} - func (suite *Suite) reportEach(spec Spec, nodeType types.NodeType) { nodes := spec.Nodes.WithType(nodeType) if nodeType == types.NodeTypeReportAfterEach { @@ -608,39 +669,80 @@ func (suite *Suite) runSuiteNode(node Node) { if err != nil && !suite.currentSpecReport.State.Is(types.SpecStateFailureStates) { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) } suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr += suite.outputInterceptor.StopInterceptingAndReturnOutput() +} + +func (suite *Suite) runReportSuiteNodesIfNeedBe(nodeType types.NodeType) { + nodes := suite.suiteNodes.WithType(nodeType) + // only run ReportAfterSuite on proc 1 + if nodeType.Is(types.NodeTypeReportAfterSuite) && suite.config.ParallelProcess != 1 { + return + } + // if we're running ReportBeforeSuite on proc > 1 - we should wait until proc 1 has completed + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.config.ParallelProcess != 1 && len(nodes) > 0 { + state, err := suite.client.BlockUntilReportBeforeSuiteCompleted() + if err != nil || state.Is(types.SpecStateFailed) { + suite.report.SuiteSucceeded = false + } + return + } + + for _, node := range nodes { + suite.selectiveLock.Lock() + suite.currentSpecReport = types.SpecReport{ + LeafNodeType: node.NodeType, + LeafNodeLocation: node.CodeLocation, + LeafNodeText: node.Text, + ParallelProcess: suite.config.ParallelProcess, + RunningInParallel: suite.isRunningInParallel(), + } + suite.selectiveLock.Unlock() - return + suite.reporter.WillRun(suite.currentSpecReport) + suite.runReportSuiteNode(node, suite.report) + suite.processCurrentSpecReport() + } + + // if we're running ReportBeforeSuite and we're running in parallel - we shuld tell the other procs that we're done + if nodeType.Is(types.NodeTypeReportBeforeSuite) && suite.isRunningInParallel() && len(nodes) > 0 { + if suite.report.SuiteSucceeded { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStatePassed) + } else { + suite.client.PostReportBeforeSuiteCompleted(types.SpecStateFailed) + } + } } -func (suite *Suite) runReportAfterSuiteNode(node Node, report types.Report) { +func (suite *Suite) runReportSuiteNode(node Node, report types.Report) { suite.writer.Truncate() suite.outputInterceptor.StartInterceptingOutput() suite.currentSpecReport.StartTime = time.Now() - if suite.config.ParallelTotal > 1 { + // if we're running a ReportAfterSuite in parallel (on proc 1) we (a) wait until other procs have exited and + // (b) always fetch the latest report as prior ReportAfterSuites will contribute to it + if node.NodeType.Is(types.NodeTypeReportAfterSuite) && suite.isRunningInParallel() { aggregatedReport, err := suite.client.BlockUntilAggregatedNonprimaryProcsReport() if err != nil { suite.currentSpecReport.State, suite.currentSpecReport.Failure = types.SpecStateFailed, suite.failureForLeafNodeWithMessage(node, err.Error()) + suite.reporter.EmitFailure(suite.currentSpecReport.State, suite.currentSpecReport.Failure) return } report = report.Add(aggregatedReport) } - node.Body = func(SpecContext) { node.ReportAfterSuiteBody(report) } + node.Body = func(SpecContext) { node.ReportSuiteBody(report) } suite.currentSpecReport.State, suite.currentSpecReport.Failure = suite.runNode(node, time.Time{}, "") suite.currentSpecReport.EndTime = time.Now() suite.currentSpecReport.RunTime = suite.currentSpecReport.EndTime.Sub(suite.currentSpecReport.StartTime) suite.currentSpecReport.CapturedGinkgoWriterOutput = string(suite.writer.Bytes()) suite.currentSpecReport.CapturedStdOutErr = suite.outputInterceptor.StopInterceptingAndReturnOutput() - - return } func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (types.SpecState, types.Failure) { @@ -662,7 +764,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ suite.selectiveLock.Lock() suite.currentNode = node suite.currentNodeStartTime = time.Now() - suite.progressStepCursor = ProgressStepCursor{} + suite.currentByStep = types.SpecEvent{} suite.selectiveLock.Unlock() defer func() { suite.selectiveLock.Lock() @@ -671,13 +773,18 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ suite.selectiveLock.Unlock() }() - if suite.config.EmitSpecProgress && !node.MarkedSuppressProgressReporting { - if text == "" { - text = "TOP-LEVEL" - } - s := fmt.Sprintf("[%s] %s\n %s\n", node.NodeType.String(), text, node.CodeLocation.String()) - suite.writer.Write([]byte(s)) + if text == "" { + text = "TOP-LEVEL" } + event := suite.handleSpecEvent(types.SpecEvent{ + SpecEventType: types.SpecEventNodeStart, + NodeType: node.NodeType, + Message: text, + CodeLocation: node.CodeLocation, + }) + defer func() { + suite.handleSpecEventEnd(types.SpecEventNodeEnd, event) + }() var failure types.Failure failure.FailureNodeType, failure.FailureNodeLocation = node.NodeType, node.CodeLocation @@ -697,18 +804,23 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ now := time.Now() deadline := suite.deadline + timeoutInPlay := "suite" if deadline.IsZero() || (!specDeadline.IsZero() && specDeadline.Before(deadline)) { deadline = specDeadline + timeoutInPlay = "spec" } if node.NodeTimeout > 0 && (deadline.IsZero() || deadline.Sub(now) > node.NodeTimeout) { deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" } if (!deadline.IsZero() && deadline.Before(now)) || interruptStatus.Interrupted() { //we're out of time already. let's wait for a NodeTimeout if we have it, or GracePeriod if we don't if node.NodeTimeout > 0 { deadline = now.Add(node.NodeTimeout) + timeoutInPlay = "node" } else { deadline = now.Add(gracePeriod) + timeoutInPlay = "grace period" } } @@ -743,6 +855,7 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } outcomeFromRun, failureFromRun := suite.failer.Drain() + failureFromRun.TimelineLocation = suite.generateTimelineLocation() outcomeC <- outcomeFromRun failureC <- failureFromRun }() @@ -772,23 +885,33 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ select { case outcomeFromRun := <-outcomeC: failureFromRun := <-failureC - if outcome == types.SpecStateInterrupted { - // we've already been interrupted. we just managed to actually exit + if outcome.Is(types.SpecStateInterrupted | types.SpecStateTimedout) { + // we've already been interrupted/timed out. we just managed to actually exit // before the grace period elapsed - return outcome, failure - } else if outcome == types.SpecStateTimedout { - // we've already timed out. we just managed to actually exit - // before the grace period elapsed. if we have a failure message we should include it + // if we have a failure message we attach it as an additional failure if outcomeFromRun != types.SpecStatePassed { - failure.Location, failure.ForwardedPanic = failureFromRun.Location, failureFromRun.ForwardedPanic - failure.Message = "This spec timed out and reported the following failure after the timeout:\n\n" + failureFromRun.Message + additionalFailure := types.AdditionalFailure{ + State: outcomeFromRun, + Failure: failure, //we make a copy - this will include all the configuration set up above... + } + //...and then we update the failure with the details from failureFromRun + additionalFailure.Failure.Location, additionalFailure.Failure.ForwardedPanic, additionalFailure.Failure.TimelineLocation = failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + additionalFailure.Failure.ProgressReport = types.ProgressReport{} + if outcome == types.SpecStateTimedout { + additionalFailure.Failure.Message = fmt.Sprintf("A %s timeout occurred and then the following failure was recorded in the timedout node before it exited:\n%s", timeoutInPlay, failureFromRun.Message) + } else { + additionalFailure.Failure.Message = fmt.Sprintf("An interrupt occurred and then the following failure was recorded in the interrupted node before it exited:\n%s", failureFromRun.Message) + } + suite.reporter.EmitFailure(additionalFailure.State, additionalFailure.Failure) + failure.AdditionalFailure = &additionalFailure } return outcome, failure } if outcomeFromRun.Is(types.SpecStatePassed) { return outcomeFromRun, types.Failure{} } else { - failure.Message, failure.Location, failure.ForwardedPanic = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic + failure.Message, failure.Location, failure.ForwardedPanic, failure.TimelineLocation = failureFromRun.Message, failureFromRun.Location, failureFromRun.ForwardedPanic, failureFromRun.TimelineLocation + suite.reporter.EmitFailure(outcomeFromRun, failure) return outcomeFromRun, failure } case <-gracePeriodChannel: @@ -801,10 +924,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ case <-deadlineChannel: // we're out of time - the outcome is a timeout and we capture the failure and progress report outcome = types.SpecStateTimedout - failure.Message, failure.Location = "Timedout", node.CodeLocation + failure.Message, failure.Location, failure.TimelineLocation = fmt.Sprintf("A %s timeout occurred", timeoutInPlay), node.CodeLocation, suite.generateTimelineLocation() failure.ProgressReport = suite.generateProgressReport(false).WithoutCapturedGinkgoWriterOutput() - failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the timeout occurred:{{/}}" + failure.ProgressReport.Message = fmt.Sprintf("{{bold}}This is the Progress Report generated when the %s timeout occurred:{{/}}", timeoutInPlay) deadlineChannel = nil + suite.reporter.EmitFailure(outcome, failure) + // tell the spec to stop. it's important we generate the progress report first to make sure we capture where // the spec is actually stuck sc.cancel() @@ -812,38 +937,44 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ gracePeriodChannel = time.After(gracePeriod) case <-interruptStatus.Channel: interruptStatus = suite.interruptHandler.Status() + // ignore interruption from other process if we are cleaning up or reporting + if interruptStatus.Cause == interrupt_handler.InterruptCauseAbortByOtherProcess && + node.NodeType.Is(types.NodeTypesAllowedDuringReportInterrupt|types.NodeTypesAllowedDuringCleanupInterrupt) { + continue + } + deadlineChannel = nil // don't worry about deadlines, time's up now + failureTimelineLocation := suite.generateTimelineLocation() + progressReport := suite.generateProgressReport(true) + if outcome == types.SpecStateInvalid { outcome = types.SpecStateInterrupted - failure.Message, failure.Location = interruptStatus.Message(), node.CodeLocation + failure.Message, failure.Location, failure.TimelineLocation = interruptStatus.Message(), node.CodeLocation, failureTimelineLocation if interruptStatus.ShouldIncludeProgressReport() { - failure.ProgressReport = suite.generateProgressReport(true).WithoutCapturedGinkgoWriterOutput() + failure.ProgressReport = progressReport.WithoutCapturedGinkgoWriterOutput() failure.ProgressReport.Message = "{{bold}}This is the Progress Report generated when the interrupt was received:{{/}}" } + suite.reporter.EmitFailure(outcome, failure) } - var report types.ProgressReport - if interruptStatus.ShouldIncludeProgressReport() { - report = suite.generateProgressReport(false) - } - + progressReport = progressReport.WithoutOtherGoroutines() sc.cancel() if interruptStatus.Level == interrupt_handler.InterruptLevelBailOut { if interruptStatus.ShouldIncludeProgressReport() { - report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) - suite.emitProgressReport(report) + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\n{{bold}}{{red}}Final interrupt received{{/}}; Ginkgo will not run any cleanup or reporting nodes and will terminate as soon as possible.\nHere's a current progress report:", interruptStatus.Message()) + suite.emitProgressReport(progressReport) } return outcome, failure } if interruptStatus.ShouldIncludeProgressReport() { if interruptStatus.Level == interrupt_handler.InterruptLevelCleanupAndReport { - report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nFirst interrupt received; Ginkgo will run any cleanup and reporting nodes but will skip all remaining specs. {{bold}}Interrupt again to skip cleanup{{/}}.\nHere's a current progress report:", interruptStatus.Message()) } else if interruptStatus.Level == interrupt_handler.InterruptLevelReportOnly { - report.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) + progressReport.Message = fmt.Sprintf("{{bold}}{{orange}}%s{{/}}\nSecond interrupt received; Ginkgo will run any reporting nodes but will skip all remaining specs and cleanup nodes. {{bold}}Interrupt again to bail immediately{{/}}.\nHere's a current progress report:", interruptStatus.Message()) } - suite.emitProgressReport(report) + suite.emitProgressReport(progressReport) } if gracePeriodChannel == nil { @@ -864,10 +995,12 @@ func (suite *Suite) runNode(node Node, specDeadline time.Time, text string) (typ } } +// TODO: search for usages and consider if reporter.EmitFailure() is necessary func (suite *Suite) failureForLeafNodeWithMessage(node Node, message string) types.Failure { return types.Failure{ Message: message, Location: node.CodeLocation, + TimelineLocation: suite.generateTimelineLocation(), FailureNodeContext: types.FailureNodeIsLeafNode, FailureNodeType: node.NodeType, FailureNodeLocation: node.CodeLocation, diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go index 2f42b2642..73e265565 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/testingtproxy/testing_t_proxy.go @@ -5,34 +5,62 @@ import ( "io" "os" + "github.com/onsi/ginkgo/v2/formatter" "github.com/onsi/ginkgo/v2/internal" + "github.com/onsi/ginkgo/v2/reporters" "github.com/onsi/ginkgo/v2/types" ) type failFunc func(message string, callerSkip ...int) type skipFunc func(message string, callerSkip ...int) -type cleanupFunc func(args ...interface{}) +type cleanupFunc func(args ...any) type reportFunc func() types.SpecReport +type addReportEntryFunc func(names string, args ...any) +type ginkgoWriterInterface interface { + io.Writer -func New(writer io.Writer, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, offset int) *ginkgoTestingTProxy { + Print(a ...interface{}) + Printf(format string, a ...interface{}) + Println(a ...interface{}) +} +type ginkgoRecoverFunc func() +type attachProgressReporterFunc func(func() string) func() + +func New(writer ginkgoWriterInterface, fail failFunc, skip skipFunc, cleanup cleanupFunc, report reportFunc, addReportEntry addReportEntryFunc, ginkgoRecover ginkgoRecoverFunc, attachProgressReporter attachProgressReporterFunc, randomSeed int64, parallelProcess int, parallelTotal int, noColor bool, offset int) *ginkgoTestingTProxy { return &ginkgoTestingTProxy{ - fail: fail, - offset: offset, - writer: writer, - skip: skip, - cleanup: cleanup, - report: report, + fail: fail, + offset: offset, + writer: writer, + skip: skip, + cleanup: cleanup, + report: report, + addReportEntry: addReportEntry, + ginkgoRecover: ginkgoRecover, + attachProgressReporter: attachProgressReporter, + randomSeed: randomSeed, + parallelProcess: parallelProcess, + parallelTotal: parallelTotal, + f: formatter.NewWithNoColorBool(noColor), } } type ginkgoTestingTProxy struct { - fail failFunc - skip skipFunc - cleanup cleanupFunc - report reportFunc - offset int - writer io.Writer -} + fail failFunc + skip skipFunc + cleanup cleanupFunc + report reportFunc + offset int + writer ginkgoWriterInterface + addReportEntry addReportEntryFunc + ginkgoRecover ginkgoRecoverFunc + attachProgressReporter attachProgressReporterFunc + randomSeed int64 + parallelProcess int + parallelTotal int + f formatter.Formatter +} + +// basic testing.T support func (t *ginkgoTestingTProxy) Cleanup(f func()) { t.cleanup(f, internal.Offset(1)) @@ -81,7 +109,7 @@ func (t *ginkgoTestingTProxy) Fatalf(format string, args ...interface{}) { } func (t *ginkgoTestingTProxy) Helper() { - // No-op + types.MarkAsHelper(1) } func (t *ginkgoTestingTProxy) Log(args ...interface{}) { @@ -126,3 +154,57 @@ func (t *ginkgoTestingTProxy) TempDir() string { return tmpDir } + +// FullGinkgoTInterface +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityAlways(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityAlways} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityFailureOrVerbose(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityFailureOrVerbose} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) AddReportEntryVisibilityNever(name string, args ...any) { + finalArgs := []any{internal.Offset(1), types.ReportEntryVisibilityNever} + t.addReportEntry(name, append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) Print(a ...any) { + t.writer.Print(a...) +} +func (t *ginkgoTestingTProxy) Printf(format string, a ...any) { + t.writer.Printf(format, a...) +} +func (t *ginkgoTestingTProxy) Println(a ...any) { + t.writer.Println(a...) +} +func (t *ginkgoTestingTProxy) F(format string, args ...any) string { + return t.f.F(format, args...) +} +func (t *ginkgoTestingTProxy) Fi(indentation uint, format string, args ...any) string { + return t.f.Fi(indentation, format, args...) +} +func (t *ginkgoTestingTProxy) Fiw(indentation uint, maxWidth uint, format string, args ...any) string { + return t.f.Fiw(indentation, maxWidth, format, args...) +} +func (t *ginkgoTestingTProxy) RenderTimeline() string { + return reporters.RenderTimeline(t.report(), false) +} +func (t *ginkgoTestingTProxy) GinkgoRecover() { + t.ginkgoRecover() +} +func (t *ginkgoTestingTProxy) DeferCleanup(args ...any) { + finalArgs := []any{internal.Offset(1)} + t.cleanup(append(finalArgs, args...)...) +} +func (t *ginkgoTestingTProxy) RandomSeed() int64 { + return t.randomSeed +} +func (t *ginkgoTestingTProxy) ParallelProcess() int { + return t.parallelProcess +} +func (t *ginkgoTestingTProxy) ParallelTotal() int { + return t.parallelTotal +} +func (t *ginkgoTestingTProxy) AttachProgressReporter(f func() string) func() { + return t.attachProgressReporter(f) +} diff --git a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go index da21d3b06..574f172df 100644 --- a/vendor/github.com/onsi/ginkgo/v2/internal/writer.go +++ b/vendor/github.com/onsi/ginkgo/v2/internal/writer.go @@ -22,24 +22,30 @@ type WriterInterface interface { Truncate() Bytes() []byte + Len() int } -//Writer implements WriterInterface and GinkgoWriterInterface +// Writer implements WriterInterface and GinkgoWriterInterface type Writer struct { buffer *bytes.Buffer outWriter io.Writer lock *sync.Mutex mode WriterMode + streamIndent []byte + indentNext bool + teeWriters []io.Writer } func NewWriter(outWriter io.Writer) *Writer { return &Writer{ - buffer: &bytes.Buffer{}, - lock: &sync.Mutex{}, - outWriter: outWriter, - mode: WriterModeStreamAndBuffer, + buffer: &bytes.Buffer{}, + lock: &sync.Mutex{}, + outWriter: outWriter, + mode: WriterModeStreamAndBuffer, + streamIndent: []byte(" "), + indentNext: true, } } @@ -49,6 +55,14 @@ func (w *Writer) SetMode(mode WriterMode) { w.mode = mode } +func (w *Writer) Len() int { + w.lock.Lock() + defer w.lock.Unlock() + return w.buffer.Len() +} + +var newline = []byte("\n") + func (w *Writer) Write(b []byte) (n int, err error) { w.lock.Lock() defer w.lock.Unlock() @@ -58,7 +72,21 @@ func (w *Writer) Write(b []byte) (n int, err error) { } if w.mode == WriterModeStreamAndBuffer { - w.outWriter.Write(b) + line, remaining, found := []byte{}, b, false + for len(remaining) > 0 { + line, remaining, found = bytes.Cut(remaining, newline) + if len(line) > 0 { + if w.indentNext { + w.outWriter.Write(w.streamIndent) + w.indentNext = false + } + w.outWriter.Write(line) + } + if found { + w.outWriter.Write(newline) + w.indentNext = true + } + } } return w.buffer.Write(b) } @@ -78,7 +106,7 @@ func (w *Writer) Bytes() []byte { return copied } -//GinkgoWriterInterface +// GinkgoWriterInterface func (w *Writer) TeeTo(writer io.Writer) { w.lock.Lock() defer w.lock.Unlock() @@ -107,6 +135,6 @@ func (w *Writer) Println(a ...interface{}) { func GinkgoLogrFunc(writer *Writer) logr.Logger { return funcr.New(func(prefix, args string) { - writer.Printf("%s", args) + writer.Printf("%s\n", args) }, funcr.Options{}) } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go index d09488c28..56b7be758 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/default_reporter.go @@ -12,6 +12,7 @@ import ( "io" "runtime" "strings" + "sync" "time" "github.com/onsi/ginkgo/v2/formatter" @@ -23,13 +24,16 @@ type DefaultReporter struct { writer io.Writer // managing the emission stream - lastChar string + lastCharWasNewline bool lastEmissionWasDelimiter bool // rendering specDenoter string retryDenoter string formatter formatter.Formatter + + runningInParallel bool + lock *sync.Mutex } func NewDefaultReporterUnderTest(conf types.ReporterConfig, writer io.Writer) *DefaultReporter { @@ -44,12 +48,13 @@ func NewDefaultReporter(conf types.ReporterConfig, writer io.Writer) *DefaultRep conf: conf, writer: writer, - lastChar: "\n", + lastCharWasNewline: true, lastEmissionWasDelimiter: false, specDenoter: "•", retryDenoter: "↺", formatter: formatter.NewWithNoColorBool(conf.NoColor), + lock: &sync.Mutex{}, } if runtime.GOOS == "windows" { reporter.specDenoter = "+" @@ -97,173 +102,219 @@ func (r *DefaultReporter) SuiteWillBegin(report types.Report) { } } -func (r *DefaultReporter) WillRun(report types.SpecReport) { - if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) { +func (r *DefaultReporter) SuiteDidEnd(report types.Report) { + failures := report.SpecReports.WithState(types.SpecStateFailureStates) + if len(failures) > 0 { + r.emitBlock("\n") + if len(failures) > 1 { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) + } else { + r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) + } + for _, specReport := range failures { + highlightColor, heading := "{{red}}", "[FAIL]" + switch specReport.State { + case types.SpecStatePanicked: + highlightColor, heading = "{{magenta}}", "[PANICKED!]" + case types.SpecStateAborted: + highlightColor, heading = "{{coral}}", "[ABORTED]" + case types.SpecStateTimedout: + highlightColor, heading = "{{orange}}", "[TIMEDOUT]" + case types.SpecStateInterrupted: + highlightColor, heading = "{{orange}}", "[INTERRUPTED]" + } + locationBlock := r.codeLocationBlock(specReport, highlightColor, false, true) + r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) + } + } + + //summarize the suite + if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { + r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) return } - r.emitDelimiter() - indentation := uint(0) - if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - r.emitBlock(r.f("{{bold}}[%s] %s{{/}}", report.LeafNodeType.String(), report.LeafNodeText)) + r.emitBlock("\n") + color, status := "{{green}}{{bold}}", "SUCCESS!" + if !report.SuiteSucceeded { + color, status = "{{red}}{{bold}}", "FAIL!" + } + + specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes + r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", + specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), + report.PreRunStats.TotalSpecs, + report.RunTime.Seconds()), + ) + + switch len(report.SpecialSuiteFailureReasons) { + case 0: + r.emit(r.f(color+"%s{{/}} -- ", status)) + case 1: + r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) + default: + r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) + } + + if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { + r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) } else { - if len(report.ContainerHierarchyTexts) > 0 { - r.emitBlock(r.cycleJoin(report.ContainerHierarchyTexts, " ")) - indentation = 1 + r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) + r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) + if specs.CountOfFlakedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) } - line := r.fi(indentation, "{{bold}}%s{{/}}", report.LeafNodeText) - labels := report.Labels() - if len(labels) > 0 { - line += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels, ", ")) + if specs.CountOfRepeatedSpecs() > 0 { + r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) } - r.emitBlock(line) + r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) + r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) } - r.emitBlock(r.fi(indentation, "{{gray}}%s{{/}}", report.LeafNodeLocation)) } -func (r *DefaultReporter) DidRun(report types.SpecReport) { +func (r *DefaultReporter) WillRun(report types.SpecReport) { v := r.conf.Verbosity() - var header, highlightColor string - includeRuntime, emitGinkgoWriterOutput, stream, denoter := true, true, false, r.specDenoter - succinctLocationBlock := v.Is(types.VerbosityLevelSuccinct) + if v.LT(types.VerbosityLevelVerbose) || report.State.Is(types.SpecStatePending|types.SpecStateSkipped) || report.RunningInParallel { + return + } + + r.emitDelimiter(0) + r.emitBlock(r.f(r.codeLocationBlock(report, "{{/}}", v.Is(types.VerbosityLevelVeryVerbose), false))) +} - hasGW := report.CapturedGinkgoWriterOutput != "" - hasStd := report.CapturedStdOutErr != "" - hasEmittableReports := report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) || (report.ReportEntries.HasVisibility(types.ReportEntryVisibilityFailureOrVerbose) && (!report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose))) +func (r *DefaultReporter) DidRun(report types.SpecReport) { + v := r.conf.Verbosity() + inParallel := report.RunningInParallel + header := r.specDenoter if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - denoter = fmt.Sprintf("[%s]", report.LeafNodeType) + header = fmt.Sprintf("[%s]", report.LeafNodeType) + } + highlightColor := r.highlightColorForState(report.State) + + // have we already been streaming the timeline? + timelineHasBeenStreaming := v.GTE(types.VerbosityLevelVerbose) && !inParallel + + // should we show the timeline? + var timeline types.Timeline + showTimeline := !timelineHasBeenStreaming && (v.GTE(types.VerbosityLevelVerbose) || report.Failed()) + if showTimeline { + timeline = report.Timeline().WithoutHiddenReportEntries() + keepVeryVerboseSpecEvents := v.Is(types.VerbosityLevelVeryVerbose) || + (v.Is(types.VerbosityLevelVerbose) && r.conf.ShowNodeEvents) || + (report.Failed() && r.conf.ShowNodeEvents) + if !keepVeryVerboseSpecEvents { + timeline = timeline.WithoutVeryVerboseSpecEvents() + } + if len(timeline) == 0 && report.CapturedGinkgoWriterOutput == "" { + // the timeline is completely empty - don't show it + showTimeline = false + } + if v.LT(types.VerbosityLevelVeryVerbose) && report.CapturedGinkgoWriterOutput == "" && len(timeline) > 0 { + //if we aren't -vv and the timeline only has a single failure, don't show it as it will appear at the end of the report + failure, isFailure := timeline[0].(types.Failure) + if isFailure && (len(timeline) == 1 || (len(timeline) == 2 && failure.AdditionalFailure != nil)) { + showTimeline = false + } + } } - highlightColor = r.highlightColorForState(report.State) + // should we have a separate section for always-visible reports? + showSeparateVisibilityAlwaysReportsSection := !timelineHasBeenStreaming && !showTimeline && report.ReportEntries.HasVisibility(types.ReportEntryVisibilityAlways) + + // should we have a separate section for captured stdout/stderr + showSeparateStdSection := inParallel && (report.CapturedStdOutErr != "") + + // given all that - do we have any actual content to show? or are we a single denoter in a stream? + reportHasContent := v.Is(types.VerbosityLevelVeryVerbose) || showTimeline || showSeparateVisibilityAlwaysReportsSection || showSeparateStdSection || report.Failed() || (v.Is(types.VerbosityLevelVerbose) && !report.State.Is(types.SpecStateSkipped)) + + // should we show a runtime? + includeRuntime := !report.State.Is(types.SpecStateSkipped|types.SpecStatePending) || (report.State.Is(types.SpecStateSkipped) && report.Failure.Message != "") + + // should we show the codelocation block? + showCodeLocation := !timelineHasBeenStreaming || !report.State.Is(types.SpecStatePassed) switch report.State { case types.SpecStatePassed: - succinctLocationBlock = v.LT(types.VerbosityLevelVerbose) - emitGinkgoWriterOutput = (r.conf.AlwaysEmitGinkgoWriter || v.GTE(types.VerbosityLevelVerbose)) && hasGW + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) && !reportHasContent { + return + } if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { - if v.GTE(types.VerbosityLevelVerbose) || hasStd || hasEmittableReports { - header = fmt.Sprintf("%s PASSED", denoter) - } else { - return - } - } else { - header, stream = denoter, true - if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { - header, stream = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), false - } - if report.RunTime > r.conf.SlowSpecThreshold { - header, stream = fmt.Sprintf("%s [SLOW TEST]", header), false - } + header = fmt.Sprintf("%s PASSED", header) } - if hasStd || emitGinkgoWriterOutput || hasEmittableReports { - stream = false + if report.NumAttempts > 1 && report.MaxFlakeAttempts > 1 { + header, reportHasContent = fmt.Sprintf("%s [FLAKEY TEST - TOOK %d ATTEMPTS TO PASS]", r.retryDenoter, report.NumAttempts), true } case types.SpecStatePending: - includeRuntime, emitGinkgoWriterOutput = false, false - if v.Is(types.VerbosityLevelSuccinct) { - header, stream = "P", true - } else { - header, succinctLocationBlock = "P [PENDING]", v.LT(types.VerbosityLevelVeryVerbose) + header = "P" + if v.GT(types.VerbosityLevelSuccinct) { + header, reportHasContent = "P [PENDING]", true } case types.SpecStateSkipped: - if report.Failure.Message != "" || v.Is(types.VerbosityLevelVeryVerbose) { - header = "S [SKIPPED]" - } else { - header, stream = "S", true + header = "S" + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && report.Failure.Message != "") { + header, reportHasContent = "S [SKIPPED]", true + } + default: + header = fmt.Sprintf("%s [%s]", header, r.humanReadableState(report.State)) + if report.MaxMustPassRepeatedly > 1 { + header = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts) } - case types.SpecStateFailed: - header = fmt.Sprintf("%s [FAILED]", denoter) - case types.SpecStateTimedout: - header = fmt.Sprintf("%s [TIMEDOUT]", denoter) - case types.SpecStatePanicked: - header = fmt.Sprintf("%s! [PANICKED]", denoter) - case types.SpecStateInterrupted: - header = fmt.Sprintf("%s! [INTERRUPTED]", denoter) - case types.SpecStateAborted: - header = fmt.Sprintf("%s! [ABORTED]", denoter) } - if report.State.Is(types.SpecStateFailureStates) && report.MaxMustPassRepeatedly > 1 { - header, stream = fmt.Sprintf("%s DURING REPETITION #%d", header, report.NumAttempts), false - } - // Emit stream and return - if stream { + // If we have no content to show, jsut emit the header and return + if !reportHasContent { r.emit(r.f(highlightColor + header + "{{/}}")) return } - // Emit header - r.emitDelimiter() if includeRuntime { header = r.f("%s [%.3f seconds]", header, report.RunTime.Seconds()) } - r.emitBlock(r.f(highlightColor + header + "{{/}}")) - // Emit Code Location Block - r.emitBlock(r.codeLocationBlock(report, highlightColor, succinctLocationBlock, false)) + // Emit header + if !timelineHasBeenStreaming { + r.emitDelimiter(0) + } + r.emitBlock(r.f(highlightColor + header + "{{/}}")) + if showCodeLocation { + r.emitBlock(r.codeLocationBlock(report, highlightColor, v.Is(types.VerbosityLevelVeryVerbose), false)) + } //Emit Stdout/Stderr Output - if hasStd { + if showSeparateStdSection { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Captured StdOut/StdErr Output >>{{/}}")) - r.emitBlock(r.fi(2, "%s", report.CapturedStdOutErr)) - r.emitBlock(r.fi(1, "{{gray}}<< End Captured StdOut/StdErr Output{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Captured StdOut/StdErr Output >>{{/}}")) + r.emitBlock(r.fi(1, "%s", report.CapturedStdOutErr)) + r.emitBlock(r.fi(1, "{{gray}}<< Captured StdOut/StdErr Output{{/}}")) } - //Emit Captured GinkgoWriter Output - if emitGinkgoWriterOutput && hasGW { + if showSeparateVisibilityAlwaysReportsSection { r.emitBlock("\n") - r.emitGinkgoWriterOutput(1, report.CapturedGinkgoWriterOutput, 0) + r.emitBlock(r.fi(1, "{{gray}}Report Entries >>{{/}}")) + for _, entry := range report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) { + r.emitReportEntry(1, entry) + } + r.emitBlock(r.fi(1, "{{gray}}<< Report Entries{{/}}")) } - if hasEmittableReports { + if showTimeline { r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{gray}}Begin Report Entries >>{{/}}")) - reportEntries := report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways) - if !report.Failure.IsZero() || v.GTE(types.VerbosityLevelVerbose) { - reportEntries = report.ReportEntries.WithVisibility(types.ReportEntryVisibilityAlways, types.ReportEntryVisibilityFailureOrVerbose) - } - for _, entry := range reportEntries { - r.emitBlock(r.fi(2, "{{bold}}"+entry.Name+"{{gray}} - %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT))) - if representation := entry.StringRepresentation(); representation != "" { - r.emitBlock(r.fi(3, representation)) - } - } - r.emitBlock(r.fi(1, "{{gray}}<< End Report Entries{{/}}")) + r.emitBlock(r.fi(1, "{{gray}}Timeline >>{{/}}")) + r.emitTimeline(1, report, timeline) + r.emitBlock(r.fi(1, "{{gray}}<< Timeline{{/}}")) } // Emit Failure Message - if !report.Failure.IsZero() { + if !report.Failure.IsZero() && !v.Is(types.VerbosityLevelVeryVerbose) { r.emitBlock("\n") - r.EmitFailure(1, report.State, report.Failure, false) - } - - if len(report.AdditionalFailures) > 0 { - if v.GTE(types.VerbosityLevelVerbose) { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure:{{/}}")) - for i, additionalFailure := range report.AdditionalFailures { - r.EmitFailure(2, additionalFailure.State, additionalFailure.Failure, true) - if i < len(report.AdditionalFailures)-1 { - r.emitBlock(r.fi(2, "{{gray}}%s{{/}}", strings.Repeat("-", 10))) - } - } - } else { - r.emitBlock("\n") - r.emitBlock(r.fi(1, "{{bold}}There were additional failures detected after the initial failure. Here's a summary - for full details run Ginkgo in verbose mode:{{/}}")) - for _, additionalFailure := range report.AdditionalFailures { - r.emitBlock(r.fi(2, r.highlightColorForState(additionalFailure.State)+"[%s]{{/}} in [%s] at %s", - r.humanReadableState(additionalFailure.State), - additionalFailure.Failure.FailureNodeType, - additionalFailure.Failure.Location, - )) - } - + r.emitFailure(1, report.State, report.Failure, true) + if len(report.AdditionalFailures) > 0 { + r.emitBlock(r.fi(1, "\nThere were {{bold}}{{red}}additional failures{{/}} detected. To view them in detail run {{bold}}ginkgo -vv{{/}}")) } } - r.emitDelimiter() + r.emitDelimiter(0) } func (r *DefaultReporter) highlightColorForState(state types.SpecState) string { @@ -293,13 +344,68 @@ func (r *DefaultReporter) humanReadableState(state types.SpecState) string { return strings.ToUpper(state.String()) } -func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failure types.Failure, includeState bool) { - highlightColor := r.highlightColorForState(state) - if includeState { - r.emitBlock(r.fi(indent, highlightColor+"[%s]{{/}}", r.humanReadableState(state))) +func (r *DefaultReporter) emitTimeline(indent uint, report types.SpecReport, timeline types.Timeline) { + isVeryVerbose := r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) + gw := report.CapturedGinkgoWriterOutput + cursor := 0 + for _, entry := range timeline { + tl := entry.GetTimelineLocation() + if tl.Offset < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:tl.Offset])) + cursor = tl.Offset + } else if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + cursor = len(gw) + } + switch x := entry.(type) { + case types.Failure: + if isVeryVerbose { + r.emitFailure(indent, report.State, x, false) + } else { + r.emitShortFailure(indent, report.State, x) + } + case types.AdditionalFailure: + if isVeryVerbose { + r.emitFailure(indent, x.State, x.Failure, true) + } else { + r.emitShortFailure(indent, x.State, x.Failure) + } + case types.ReportEntry: + r.emitReportEntry(indent, x) + case types.ProgressReport: + r.emitProgressReport(indent, false, x) + case types.SpecEvent: + if isVeryVerbose || !x.IsOnlyVisibleAtVeryVerbose() || r.conf.ShowNodeEvents { + r.emitSpecEvent(indent, x, isVeryVerbose) + } + } + } + if cursor < len(gw) { + r.emit(r.fi(indent, "%s", gw[cursor:])) + } +} + +func (r *DefaultReporter) EmitFailure(state types.SpecState, failure types.Failure) { + if r.conf.Verbosity().Is(types.VerbosityLevelVerbose) { + r.emitShortFailure(1, state, failure) + } else if r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose) { + r.emitFailure(1, state, failure, true) } - r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.Message)) - r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}}\n", failure.FailureNodeType, failure.Location)) +} + +func (r *DefaultReporter) emitShortFailure(indent uint, state types.SpecState, failure types.Failure) { + r.emitBlock(r.fi(indent, r.highlightColorForState(state)+"[%s]{{/}} in [%s] - %s {{gray}}@ %s{{/}}", + r.humanReadableState(state), + failure.FailureNodeType, + failure.Location, + failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), + )) +} + +func (r *DefaultReporter) emitFailure(indent uint, state types.SpecState, failure types.Failure, includeAdditionalFailure bool) { + highlightColor := r.highlightColorForState(state) + r.emitBlock(r.fi(indent, highlightColor+"[%s] %s{{/}}", r.humanReadableState(state), failure.Message)) + r.emitBlock(r.fi(indent, highlightColor+"In {{bold}}[%s]{{/}}"+highlightColor+" at: {{bold}}%s{{/}} {{gray}}@ %s{{/}}\n", failure.FailureNodeType, failure.Location, failure.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) if failure.ForwardedPanic != "" { r.emitBlock("\n") r.emitBlock(r.fi(indent, highlightColor+"%s{{/}}", failure.ForwardedPanic)) @@ -315,86 +421,22 @@ func (r *DefaultReporter) EmitFailure(indent uint, state types.SpecState, failur r.emitBlock("\n") r.emitProgressReport(indent, false, failure.ProgressReport) } -} -func (r *DefaultReporter) SuiteDidEnd(report types.Report) { - failures := report.SpecReports.WithState(types.SpecStateFailureStates) - if len(failures) > 0 { - r.emitBlock("\n\n") - if len(failures) > 1 { - r.emitBlock(r.f("{{red}}{{bold}}Summarizing %d Failures:{{/}}", len(failures))) - } else { - r.emitBlock(r.f("{{red}}{{bold}}Summarizing 1 Failure:{{/}}")) - } - for _, specReport := range failures { - highlightColor, heading := "{{red}}", "[FAIL]" - switch specReport.State { - case types.SpecStatePanicked: - highlightColor, heading = "{{magenta}}", "[PANICKED!]" - case types.SpecStateAborted: - highlightColor, heading = "{{coral}}", "[ABORTED]" - case types.SpecStateTimedout: - highlightColor, heading = "{{orange}}", "[TIMEDOUT]" - case types.SpecStateInterrupted: - highlightColor, heading = "{{orange}}", "[INTERRUPTED]" - } - locationBlock := r.codeLocationBlock(specReport, highlightColor, true, true) - r.emitBlock(r.fi(1, highlightColor+"%s{{/}} %s", heading, locationBlock)) - } - } - - //summarize the suite - if r.conf.Verbosity().Is(types.VerbosityLevelSuccinct) && report.SuiteSucceeded { - r.emit(r.f(" {{green}}SUCCESS!{{/}} %s ", report.RunTime)) - return - } - - r.emitBlock("\n") - color, status := "{{green}}{{bold}}", "SUCCESS!" - if !report.SuiteSucceeded { - color, status = "{{red}}{{bold}}", "FAIL!" - } - - specs := report.SpecReports.WithLeafNodeType(types.NodeTypeIt) //exclude any suite setup nodes - r.emitBlock(r.f(color+"Ran %d of %d Specs in %.3f seconds{{/}}", - specs.CountWithState(types.SpecStatePassed)+specs.CountWithState(types.SpecStateFailureStates), - report.PreRunStats.TotalSpecs, - report.RunTime.Seconds()), - ) - - switch len(report.SpecialSuiteFailureReasons) { - case 0: - r.emit(r.f(color+"%s{{/}} -- ", status)) - case 1: - r.emit(r.f(color+"%s - %s{{/}} -- ", status, report.SpecialSuiteFailureReasons[0])) - default: - r.emitBlock(r.f(color+"%s - %s{{/}}\n", status, strings.Join(report.SpecialSuiteFailureReasons, ", "))) - } - - if len(specs) == 0 && report.SpecReports.WithLeafNodeType(types.NodeTypeBeforeSuite|types.NodeTypeSynchronizedBeforeSuite).CountWithState(types.SpecStateFailureStates) > 0 { - r.emit(r.f("{{cyan}}{{bold}}A BeforeSuite node failed so all tests were skipped.{{/}}\n")) - } else { - r.emit(r.f("{{green}}{{bold}}%d Passed{{/}} | ", specs.CountWithState(types.SpecStatePassed))) - r.emit(r.f("{{red}}{{bold}}%d Failed{{/}} | ", specs.CountWithState(types.SpecStateFailureStates))) - if specs.CountOfFlakedSpecs() > 0 { - r.emit(r.f("{{light-yellow}}{{bold}}%d Flaked{{/}} | ", specs.CountOfFlakedSpecs())) - } - if specs.CountOfRepeatedSpecs() > 0 { - r.emit(r.f("{{light-yellow}}{{bold}}%d Repeated{{/}} | ", specs.CountOfRepeatedSpecs())) - } - r.emit(r.f("{{yellow}}{{bold}}%d Pending{{/}} | ", specs.CountWithState(types.SpecStatePending))) - r.emit(r.f("{{cyan}}{{bold}}%d Skipped{{/}}\n", specs.CountWithState(types.SpecStateSkipped))) + if failure.AdditionalFailure != nil && includeAdditionalFailure { + r.emitBlock("\n") + r.emitFailure(indent, failure.AdditionalFailure.State, failure.AdditionalFailure.Failure, true) } } func (r *DefaultReporter) EmitProgressReport(report types.ProgressReport) { - r.emitDelimiter() + r.emitDelimiter(1) if report.RunningInParallel { - r.emit(r.f("{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) + r.emit(r.fi(1, "{{coral}}Progress Report for Ginkgo Process #{{bold}}%d{{/}}\n", report.ParallelProcess)) } - r.emitProgressReport(0, true, report) - r.emitDelimiter() + shouldEmitGW := report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose) + r.emitProgressReport(1, shouldEmitGW, report) + r.emitDelimiter(1) } func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput bool, report types.ProgressReport) { @@ -409,7 +451,7 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(" ") subjectIndent = 0 } - r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time.Sub(report.SpecStartTime).Round(time.Millisecond))) + r.emit(r.fi(subjectIndent, "{{bold}}{{orange}}%s{{/}} (Spec Runtime: %s)\n", report.LeafNodeText, report.Time().Sub(report.SpecStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.LeafNodeLocation)) indent += 1 } @@ -419,12 +461,12 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput r.emit(r.f(" {{bold}}{{orange}}%s{{/}}", report.CurrentNodeText)) } - r.emit(r.f(" (Node Runtime: %s)\n", report.Time.Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) + r.emit(r.f(" (Node Runtime: %s)\n", report.Time().Sub(report.CurrentNodeStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentNodeLocation)) indent += 1 } if report.CurrentStepText != "" { - r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time.Sub(report.CurrentStepStartTime).Round(time.Millisecond))) + r.emit(r.fi(indent, "At {{bold}}{{orange}}[By Step] %s{{/}} (Step Runtime: %s)\n", report.CurrentStepText, report.Time().Sub(report.CurrentStepStartTime).Round(time.Millisecond))) r.emit(r.fi(indent+1, "{{gray}}%s{{/}}\n", report.CurrentStepLocation)) indent += 1 } @@ -433,9 +475,19 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput indent -= 1 } - if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" && (report.RunningInParallel || r.conf.Verbosity().LT(types.VerbosityLevelVerbose)) { + if emitGinkgoWriterOutput && report.CapturedGinkgoWriterOutput != "" { r.emit("\n") - r.emitGinkgoWriterOutput(indent, report.CapturedGinkgoWriterOutput, 10) + r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) + limit, lines := 10, strings.Split(report.CapturedGinkgoWriterOutput, "\n") + if len(lines) <= limit { + r.emitBlock(r.fi(indent+1, "%s", report.CapturedGinkgoWriterOutput)) + } else { + r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) + for _, line := range lines[len(lines)-limit-1:] { + r.emitBlock(r.fi(indent+1, "%s", line)) + } + } + r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) } if !report.SpecGoroutine().IsZero() { @@ -471,22 +523,48 @@ func (r *DefaultReporter) emitProgressReport(indent uint, emitGinkgoWriterOutput } } -func (r *DefaultReporter) emitGinkgoWriterOutput(indent uint, output string, limit int) { - r.emitBlock(r.fi(indent, "{{gray}}Begin Captured GinkgoWriter Output >>{{/}}")) - if limit == 0 { - r.emitBlock(r.fi(indent+1, "%s", output)) - } else { - lines := strings.Split(output, "\n") - if len(lines) <= limit { - r.emitBlock(r.fi(indent+1, "%s", output)) - } else { - r.emitBlock(r.fi(indent+1, "{{gray}}...{{/}}")) - for _, line := range lines[len(lines)-limit-1:] { - r.emitBlock(r.fi(indent+1, "%s", line)) - } - } +func (r *DefaultReporter) EmitReportEntry(entry types.ReportEntry) { + if r.conf.Verbosity().LT(types.VerbosityLevelVerbose) || entry.Visibility == types.ReportEntryVisibilityNever { + return + } + r.emitReportEntry(1, entry) +} + +func (r *DefaultReporter) emitReportEntry(indent uint, entry types.ReportEntry) { + r.emitBlock(r.fi(indent, "{{bold}}"+entry.Name+"{{gray}} "+fmt.Sprintf("- %s @ %s{{/}}", entry.Location, entry.Time.Format(types.GINKGO_TIME_FORMAT)))) + if representation := entry.StringRepresentation(); representation != "" { + r.emitBlock(r.fi(indent+1, representation)) + } +} + +func (r *DefaultReporter) EmitSpecEvent(event types.SpecEvent) { + v := r.conf.Verbosity() + if v.Is(types.VerbosityLevelVeryVerbose) || (v.Is(types.VerbosityLevelVerbose) && (r.conf.ShowNodeEvents || !event.IsOnlyVisibleAtVeryVerbose())) { + r.emitSpecEvent(1, event, r.conf.Verbosity().Is(types.VerbosityLevelVeryVerbose)) + } +} + +func (r *DefaultReporter) emitSpecEvent(indent uint, event types.SpecEvent, includeLocation bool) { + location := "" + if includeLocation { + location = fmt.Sprintf("- %s ", event.CodeLocation.String()) + } + switch event.SpecEventType { + case types.SpecEventInvalid: + return + case types.SpecEventByStart: + r.emitBlock(r.fi(indent, "{{bold}}STEP:{{/}} %s {{gray}}%s@ %s{{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventByEnd: + r.emitBlock(r.fi(indent, "{{bold}}END STEP:{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventNodeStart: + r.emitBlock(r.fi(indent, "> Enter {{bold}}[%s]{{/}} %s {{gray}}%s@ %s{{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventNodeEnd: + r.emitBlock(r.fi(indent, "< Exit {{bold}}[%s]{{/}} %s {{gray}}%s@ %s (%s){{/}}", event.NodeType.String(), event.Message, location, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT), event.Duration.Round(time.Millisecond))) + case types.SpecEventSpecRepeat: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{green}}Passed{{/}}{{bold}}. Repeating %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) + case types.SpecEventSpecRetry: + r.emitBlock(r.fi(indent, "\n{{bold}}Attempt #%d {{red}}Failed{{/}}{{bold}}. Retrying %s{{/}} {{gray}}@ %s{{/}}\n\n", event.Attempt, r.retryDenoter, event.TimelineLocation.Time.Format(types.GINKGO_TIME_FORMAT))) } - r.emitBlock(r.fi(indent, "{{gray}}<< End Captured GinkgoWriter Output{{/}}")) } func (r *DefaultReporter) emitGoroutines(indent uint, goroutines ...types.Goroutine) { @@ -544,31 +622,37 @@ func (r *DefaultReporter) emitSource(indent uint, fc types.FunctionCall) { /* Emitting to the writer */ func (r *DefaultReporter) emit(s string) { - if len(s) > 0 { - r.lastChar = s[len(s)-1:] - r.lastEmissionWasDelimiter = false - r.writer.Write([]byte(s)) - } + r._emit(s, false, false) } func (r *DefaultReporter) emitBlock(s string) { - if len(s) > 0 { - if r.lastChar != "\n" { - r.emit("\n") - } - r.emit(s) - if r.lastChar != "\n" { - r.emit("\n") - } - } + r._emit(s, true, false) +} + +func (r *DefaultReporter) emitDelimiter(indent uint) { + r._emit(r.fi(indent, "{{gray}}%s{{/}}", strings.Repeat("-", 30)), true, true) } -func (r *DefaultReporter) emitDelimiter() { - if r.lastEmissionWasDelimiter { +// a bit ugly - but we're trying to minimize locking on this hot codepath +func (r *DefaultReporter) _emit(s string, block bool, isDelimiter bool) { + if len(s) == 0 { + return + } + r.lock.Lock() + defer r.lock.Unlock() + if isDelimiter && r.lastEmissionWasDelimiter { return } - r.emitBlock(r.f("{{gray}}%s{{/}}", strings.Repeat("-", 30))) - r.lastEmissionWasDelimiter = true + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + } + r.lastCharWasNewline = (s[len(s)-1:] == "\n") + r.writer.Write([]byte(s)) + if block && !r.lastCharWasNewline { + r.writer.Write([]byte("\n")) + r.lastCharWasNewline = true + } + r.lastEmissionWasDelimiter = isDelimiter } /* Rendering text */ @@ -584,13 +668,14 @@ func (r *DefaultReporter) cycleJoin(elements []string, joiner string) string { return r.formatter.CycleJoin(elements, joiner, []string{"{{/}}", "{{gray}}"}) } -func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, succinct bool, usePreciseFailureLocation bool) string { +func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightColor string, veryVerbose bool, usePreciseFailureLocation bool) string { texts, locations, labels := []string{}, []types.CodeLocation{}, [][]string{} texts, locations, labels = append(texts, report.ContainerHierarchyTexts...), append(locations, report.ContainerHierarchyLocations...), append(labels, report.ContainerHierarchyLabels...) + if report.LeafNodeType.Is(types.NodeTypesForSuiteLevelNodes) { texts = append(texts, r.f("[%s] %s", report.LeafNodeType, report.LeafNodeText)) } else { - texts = append(texts, report.LeafNodeText) + texts = append(texts, r.f(report.LeafNodeText)) } labels = append(labels, report.LeafNodeLabels) locations = append(locations, report.LeafNodeLocation) @@ -600,24 +685,58 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo failureLocation = report.Failure.Location } + highlightIndex := -1 switch report.Failure.FailureNodeContext { case types.FailureNodeAtTopLevel: - texts = append([]string{r.f(highlightColor+"{{bold}}TOP-LEVEL [%s]{{/}}", report.Failure.FailureNodeType)}, texts...) + texts = append([]string{fmt.Sprintf("TOP-LEVEL [%s]", report.Failure.FailureNodeType)}, texts...) locations = append([]types.CodeLocation{failureLocation}, locations...) labels = append([][]string{{}}, labels...) + highlightIndex = 0 case types.FailureNodeInContainer: i := report.Failure.FailureNodeContainerIndex - texts[i] = r.f(highlightColor+"{{bold}}%s [%s]{{/}}", texts[i], report.Failure.FailureNodeType) + texts[i] = fmt.Sprintf("%s [%s]", texts[i], report.Failure.FailureNodeType) locations[i] = failureLocation + highlightIndex = i case types.FailureNodeIsLeafNode: i := len(texts) - 1 - texts[i] = r.f(highlightColor+"{{bold}}[%s] %s{{/}}", report.LeafNodeType, report.LeafNodeText) + texts[i] = fmt.Sprintf("[%s] %s", report.LeafNodeType, report.LeafNodeText) locations[i] = failureLocation + highlightIndex = i + default: + //there is no failure, so we highlight the leaf ndoe + highlightIndex = len(texts) - 1 } out := "" - if succinct { - out += r.f("%s", r.cycleJoin(texts, " ")) + if veryVerbose { + for i := range texts { + if i == highlightIndex { + out += r.fi(uint(i), highlightColor+"{{bold}}%s{{/}}", texts[i]) + } else { + out += r.fi(uint(i), "%s", texts[i]) + } + if len(labels[i]) > 0 { + out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + } + out += "\n" + out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) + } + } else { + for i := range texts { + style := "{{/}}" + if i%2 == 1 { + style = "{{gray}}" + } + if i == highlightIndex { + style = highlightColor + "{{bold}}" + } + out += r.f(style+"%s", texts[i]) + if i < len(texts)-1 { + out += " " + } else { + out += r.f("{{/}}") + } + } flattenedLabels := report.Labels() if len(flattenedLabels) > 0 { out += r.f(" {{coral}}[%s]{{/}}", strings.Join(flattenedLabels, ", ")) @@ -626,17 +745,15 @@ func (r *DefaultReporter) codeLocationBlock(report types.SpecReport, highlightCo if usePreciseFailureLocation { out += r.f("{{gray}}%s{{/}}", failureLocation) } else { - out += r.f("{{gray}}%s{{/}}", locations[len(locations)-1]) - } - } else { - for i := range texts { - out += r.fi(uint(i), "%s", texts[i]) - if len(labels[i]) > 0 { - out += r.f(" {{coral}}[%s]{{/}}", strings.Join(labels[i], ", ")) + leafLocation := locations[len(locations)-1] + if (report.Failure.FailureNodeLocation != types.CodeLocation{}) && (report.Failure.FailureNodeLocation != leafLocation) { + out += r.fi(1, highlightColor+"[%s]{{/}} {{gray}}%s{{/}}\n", report.Failure.FailureNodeType, report.Failure.FailureNodeLocation) + out += r.fi(1, "{{gray}}[%s] %s{{/}}", report.LeafNodeType, leafLocation) + } else { + out += r.f("{{gray}}%s{{/}}", leafLocation) } - out += "\n" - out += r.fi(uint(i), "{{gray}}%s{{/}}\n", locations[i]) } + } return out } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go index 89d30076b..613072ebf 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/deprecated_reporter.go @@ -35,7 +35,7 @@ func ReportViaDeprecatedReporter(reporter DeprecatedReporter, report types.Repor FailOnPending: report.SuiteConfig.FailOnPending, FailFast: report.SuiteConfig.FailFast, FlakeAttempts: report.SuiteConfig.FlakeAttempts, - EmitSpecProgress: report.SuiteConfig.EmitSpecProgress, + EmitSpecProgress: false, DryRun: report.SuiteConfig.DryRun, ParallelNode: report.SuiteConfig.ParallelProcess, ParallelTotal: report.SuiteConfig.ParallelTotal, diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index fcea6ab17..592d7f614 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -15,12 +15,32 @@ import ( "fmt" "os" "strings" - "time" "github.com/onsi/ginkgo/v2/config" "github.com/onsi/ginkgo/v2/types" ) +type JunitReportConfig struct { + // Spec States for which no timeline should be emitted for system-err + // set this to types.SpecStatePassed|types.SpecStateSkipped|types.SpecStatePending to only match failing specs + OmitTimelinesForSpecState types.SpecState + + // Enable OmitFailureMessageAttr to prevent failure messages appearing in the "message" attribute of the Failure and Error tags + OmitFailureMessageAttr bool + + //Enable OmitCapturedStdOutErr to prevent captured stdout/stderr appearing in system-out + OmitCapturedStdOutErr bool + + // Enable OmitSpecLabels to prevent labels from appearing in the spec name + OmitSpecLabels bool + + // Enable OmitLeafNodeType to prevent the spec leaf node type from appearing in the spec name + OmitLeafNodeType bool + + // Enable OmitSuiteSetupNodes to prevent the creation of testcase entries for setup nodes + OmitSuiteSetupNodes bool +} + type JUnitTestSuites struct { XMLName xml.Name `xml:"testsuites"` // Tests maps onto the total number of specs in all test suites (this includes any suite nodes such as BeforeSuite) @@ -128,6 +148,10 @@ type JUnitFailure struct { } func GenerateJUnitReport(report types.Report, dst string) error { + return GenerateJUnitReportWithConfig(report, dst, JunitReportConfig{}) +} + +func GenerateJUnitReportWithConfig(report types.Report, dst string, config JunitReportConfig) error { suite := JUnitTestSuite{ Name: report.SuiteDescription, Package: report.SuitePath, @@ -149,7 +173,6 @@ func GenerateJUnitReport(report types.Report, dst string) error { {"FailOnPending", fmt.Sprintf("%t", report.SuiteConfig.FailOnPending)}, {"FailFast", fmt.Sprintf("%t", report.SuiteConfig.FailFast)}, {"FlakeAttempts", fmt.Sprintf("%d", report.SuiteConfig.FlakeAttempts)}, - {"EmitSpecProgress", fmt.Sprintf("%t", report.SuiteConfig.EmitSpecProgress)}, {"DryRun", fmt.Sprintf("%t", report.SuiteConfig.DryRun)}, {"ParallelTotal", fmt.Sprintf("%d", report.SuiteConfig.ParallelTotal)}, {"OutputInterceptorMode", report.SuiteConfig.OutputInterceptorMode}, @@ -157,22 +180,33 @@ func GenerateJUnitReport(report types.Report, dst string) error { }, } for _, spec := range report.SpecReports { + if config.OmitSuiteSetupNodes && spec.LeafNodeType != types.NodeTypeIt { + continue + } name := fmt.Sprintf("[%s]", spec.LeafNodeType) + if config.OmitLeafNodeType { + name = "" + } if spec.FullText() != "" { name = name + " " + spec.FullText() } labels := spec.Labels() - if len(labels) > 0 { + if len(labels) > 0 && !config.OmitSpecLabels { name = name + " [" + strings.Join(labels, ", ") + "]" } + name = strings.TrimSpace(name) test := JUnitTestCase{ Name: name, Classname: report.SuiteDescription, Status: spec.State.String(), Time: spec.RunTime.Seconds(), - SystemOut: systemOutForUnstructuredReporters(spec), - SystemErr: systemErrForUnstructuredReporters(spec), + } + if !spec.State.Is(config.OmitTimelinesForSpecState) { + test.SystemErr = systemErrForUnstructuredReporters(spec) + } + if !config.OmitCapturedStdOutErr { + test.SystemOut = systemOutForUnstructuredReporters(spec) } suite.Tests += 1 @@ -193,6 +227,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "failed", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } suite.Failures += 1 case types.SpecStateTimedout: test.Failure = &JUnitFailure{ @@ -200,6 +237,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "timedout", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } suite.Failures += 1 case types.SpecStateInterrupted: test.Error = &JUnitError{ @@ -207,6 +247,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "interrupted", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } suite.Errors += 1 case types.SpecStateAborted: test.Failure = &JUnitFailure{ @@ -214,6 +257,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "aborted", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Failure.Message = "" + } suite.Errors += 1 case types.SpecStatePanicked: test.Error = &JUnitError{ @@ -221,6 +267,9 @@ func GenerateJUnitReport(report types.Report, dst string) error { Type: "panicked", Description: failureDescriptionForUnstructuredReporters(spec), } + if config.OmitFailureMessageAttr { + test.Error.Message = "" + } suite.Errors += 1 } @@ -287,63 +336,25 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) func failureDescriptionForUnstructuredReporters(spec types.SpecReport) string { out := &strings.Builder{} - out.WriteString(spec.Failure.Location.String() + "\n") - out.WriteString(spec.Failure.Location.FullStackTrace) - if !spec.Failure.ProgressReport.IsZero() { - out.WriteString("\n") - NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(spec.Failure.ProgressReport) - } + NewDefaultReporter(types.ReporterConfig{NoColor: true, VeryVerbose: true}, out).emitFailure(0, spec.State, spec.Failure, true) if len(spec.AdditionalFailures) > 0 { - out.WriteString("\nThere were additional failures detected after the initial failure:\n") - for i, additionalFailure := range spec.AdditionalFailures { - NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitFailure(0, additionalFailure.State, additionalFailure.Failure, true) - if i < len(spec.AdditionalFailures)-1 { - out.WriteString("----------\n") - } - } + out.WriteString("\nThere were additional failures detected after the initial failure. These are visible in the timeline\n") } return out.String() } func systemErrForUnstructuredReporters(spec types.SpecReport) string { - out := &strings.Builder{} - gw := spec.CapturedGinkgoWriterOutput - cursor := 0 - for _, pr := range spec.ProgressReports { - if cursor < pr.GinkgoWriterOffset { - if pr.GinkgoWriterOffset < len(gw) { - out.WriteString(gw[cursor:pr.GinkgoWriterOffset]) - cursor = pr.GinkgoWriterOffset - } else if cursor < len(gw) { - out.WriteString(gw[cursor:]) - cursor = len(gw) - } - } - NewDefaultReporter(types.ReporterConfig{NoColor: true}, out).EmitProgressReport(pr) - } - - if cursor < len(gw) { - out.WriteString(gw[cursor:]) - } + return RenderTimeline(spec, true) +} +func RenderTimeline(spec types.SpecReport, noColor bool) string { + out := &strings.Builder{} + NewDefaultReporter(types.ReporterConfig{NoColor: noColor, VeryVerbose: true}, out).emitTimeline(0, spec, spec.Timeline()) return out.String() } func systemOutForUnstructuredReporters(spec types.SpecReport) string { - systemOut := spec.CapturedStdOutErr - if len(spec.ReportEntries) > 0 { - systemOut += "\nReport Entries:\n" - for i, entry := range spec.ReportEntries { - systemOut += fmt.Sprintf("%s\n%s\n%s\n", entry.Name, entry.Location, entry.Time.Format(time.RFC3339Nano)) - if representation := entry.StringRepresentation(); representation != "" { - systemOut += representation + "\n" - } - if i+1 < len(spec.ReportEntries) { - systemOut += "--\n" - } - } - } - return systemOut + return spec.CapturedStdOutErr } // Deprecated JUnitReporter (so folks can still compile their suites) diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go index f79f005db..5e726c464 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/reporter.go @@ -9,13 +9,21 @@ type Reporter interface { WillRun(report types.SpecReport) DidRun(report types.SpecReport) SuiteDidEnd(report types.Report) + + //Timeline emission + EmitFailure(state types.SpecState, failure types.Failure) EmitProgressReport(progressReport types.ProgressReport) + EmitReportEntry(entry types.ReportEntry) + EmitSpecEvent(event types.SpecEvent) } type NoopReporter struct{} -func (n NoopReporter) SuiteWillBegin(report types.Report) {} -func (n NoopReporter) WillRun(report types.SpecReport) {} -func (n NoopReporter) DidRun(report types.SpecReport) {} -func (n NoopReporter) SuiteDidEnd(report types.Report) {} -func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) SuiteWillBegin(report types.Report) {} +func (n NoopReporter) WillRun(report types.SpecReport) {} +func (n NoopReporter) DidRun(report types.SpecReport) {} +func (n NoopReporter) SuiteDidEnd(report types.Report) {} +func (n NoopReporter) EmitFailure(state types.SpecState, failure types.Failure) {} +func (n NoopReporter) EmitProgressReport(progressReport types.ProgressReport) {} +func (n NoopReporter) EmitReportEntry(entry types.ReportEntry) {} +func (n NoopReporter) EmitSpecEvent(event types.SpecEvent) {} diff --git a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go index afc151b13..f33786a2d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporting_dsl.go @@ -35,7 +35,7 @@ func CurrentSpecReport() SpecReport { } /* - ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter + ReportEntryVisibility governs the visibility of ReportEntries in Ginkgo's console reporter - ReportEntryVisibilityAlways: the default behavior - the ReportEntry is always emitted. - ReportEntryVisibilityFailureOrVerbose: the ReportEntry is only emitted if the spec fails or if the tests are run with -v (similar to GinkgoWriters behavior). @@ -50,9 +50,9 @@ const ReportEntryVisibilityAlways, ReportEntryVisibilityFailureOrVerbose, Report /* AddReportEntry generates and adds a new ReportEntry to the current spec's SpecReport. It can take any of the following arguments: - - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. - - A ReportEntryVisibility enum to control the visibility of the ReportEntry - - An Offset or CodeLocation decoration to control the reported location of the ReportEntry + - A single arbitrary object to attach as the Value of the ReportEntry. This object will be included in any generated reports and will be emitted to the console when the report is emitted. + - A ReportEntryVisibility enum to control the visibility of the ReportEntry + - An Offset or CodeLocation decoration to control the reported location of the ReportEntry If the Value object implements `fmt.Stringer`, it's `String()` representation is used when emitting to the console. @@ -100,6 +100,25 @@ func ReportAfterEach(body func(SpecReport), args ...interface{}) bool { return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportAfterEach, "", combinedArgs...)) } +/* +ReportBeforeSuite nodes are run at the beginning of the suite. ReportBeforeSuite nodes take a function that receives a suite Report. + +They are called at the beginning of the suite, before any specs have run and any BeforeSuite or SynchronizedBeforeSuite nodes, and are passed in the initial report for the suite. +ReportBeforeSuite nodes must be created at the top-level (i.e. not nested in a Context/Describe/When node) + +# When running in parallel, Ginkgo ensures that only one of the parallel nodes runs the ReportBeforeSuite + +You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. +You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + +You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports +*/ +func ReportBeforeSuite(body func(Report), args ...interface{}) bool { + combinedArgs := []interface{}{body} + combinedArgs = append(combinedArgs, args...) + return pushNode(internal.NewNode(deprecationTracker, types.NodeTypeReportBeforeSuite, "", combinedArgs...)) +} + /* ReportAfterSuite nodes are run at the end of the suite. ReportAfterSuite nodes take a function that receives a suite Report. @@ -113,6 +132,7 @@ In addition to using ReportAfterSuite to programmatically generate suite reports You cannot nest any other Ginkgo nodes within a ReportAfterSuite node's closure. You can learn more about ReportAfterSuite here: https://onsi.github.io/ginkgo/#generating-reports-programmatically + You can learn more about Ginkgo's reporting infrastructure, including generating reports with the CLI here: https://onsi.github.io/ginkgo/#generating-machine-readable-reports */ func ReportAfterSuite(text string, body func(Report), args ...interface{}) bool { diff --git a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go index 683674462..ac9b7abb5 100644 --- a/vendor/github.com/onsi/ginkgo/v2/table_dsl.go +++ b/vendor/github.com/onsi/ginkgo/v2/table_dsl.go @@ -13,7 +13,7 @@ import ( /* The EntryDescription decorator allows you to pass a format string to DescribeTable() and Entry(). This format string is used to generate entry names via: - fmt.Sprintf(formatString, parameters...) + fmt.Sprintf(formatString, parameters...) where parameters are the parameters passed into the entry. @@ -32,19 +32,20 @@ DescribeTable describes a table-driven spec. For example: - DescribeTable("a simple table", - func(x int, y int, expected bool) { - Ω(x > y).Should(Equal(expected)) - }, - Entry("x > y", 1, 0, true), - Entry("x == y", 0, 0, false), - Entry("x < y", 0, 1, false), - ) + DescribeTable("a simple table", + func(x int, y int, expected bool) { + Ω(x > y).Should(Equal(expected)) + }, + Entry("x > y", 1, 0, true), + Entry("x == y", 0, 0, false), + Entry("x < y", 0, 1, false), + ) You can learn more about DescribeTable here: https://onsi.github.io/ginkgo/#table-specs And can explore some Table patterns here: https://onsi.github.io/ginkgo/#table-specs-patterns */ func DescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() generateTable(description, args...) return true } @@ -53,6 +54,7 @@ func DescribeTable(description string, args ...interface{}) bool { You can focus a table with `FDescribeTable`. This is equivalent to `FDescribe`. */ func FDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Focus) generateTable(description, args...) return true @@ -62,6 +64,7 @@ func FDescribeTable(description string, args ...interface{}) bool { You can mark a table as pending with `PDescribeTable`. This is equivalent to `PDescribe`. */ func PDescribeTable(description string, args ...interface{}) bool { + GinkgoHelper() args = append(args, internal.Pending) generateTable(description, args...) return true @@ -95,26 +98,29 @@ If you want to generate interruptible specs simply write a Table function that a You can learn more about Entry here: https://onsi.github.io/ginkgo/#table-specs */ func Entry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can focus a particular entry with FEntry. This is equivalent to FIt. */ func FEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Focus) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* You can mark a particular entry as pending with PEntry. This is equivalent to PIt. */ func PEntry(description interface{}, args ...interface{}) TableEntry { + GinkgoHelper() decorations, parameters := internal.PartitionDecorations(args...) decorations = append(decorations, internal.Pending) - return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(1)} + return TableEntry{description: description, decorations: decorations, parameters: parameters, codeLocation: types.NewCodeLocation(0)} } /* @@ -126,7 +132,8 @@ var contextType = reflect.TypeOf(new(context.Context)).Elem() var specContextType = reflect.TypeOf(new(SpecContext)).Elem() func generateTable(description string, args ...interface{}) { - cl := types.NewCodeLocation(2) + GinkgoHelper() + cl := types.NewCodeLocation(0) containerNodeArgs := []interface{}{cl} entries := []TableEntry{} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go index e4e9e38c6..9cd576817 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/code_location.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/code_location.go @@ -1,4 +1,5 @@ package types + import ( "fmt" "os" @@ -6,6 +7,7 @@ import ( "runtime" "runtime/debug" "strings" + "sync" ) type CodeLocation struct { @@ -37,6 +39,73 @@ func (codeLocation CodeLocation) ContentsOfLine() string { return lines[codeLocation.LineNumber-1] } +type codeLocationLocator struct { + pcs map[uintptr]bool + helpers map[string]bool + lock *sync.Mutex +} + +func (c *codeLocationLocator) addHelper(pc uintptr) { + c.lock.Lock() + defer c.lock.Unlock() + + if c.pcs[pc] { + return + } + c.lock.Unlock() + f := runtime.FuncForPC(pc) + c.lock.Lock() + if f == nil { + return + } + c.helpers[f.Name()] = true + c.pcs[pc] = true +} + +func (c *codeLocationLocator) hasHelper(name string) bool { + c.lock.Lock() + defer c.lock.Unlock() + return c.helpers[name] +} + +func (c *codeLocationLocator) getCodeLocation(skip int) CodeLocation { + pc := make([]uintptr, 40) + n := runtime.Callers(skip+2, pc) + if n == 0 { + return CodeLocation{} + } + pc = pc[:n] + frames := runtime.CallersFrames(pc) + for { + frame, more := frames.Next() + if !c.hasHelper(frame.Function) { + return CodeLocation{FileName: frame.File, LineNumber: frame.Line} + } + if !more { + break + } + } + return CodeLocation{} +} + +var clLocator = &codeLocationLocator{ + pcs: map[uintptr]bool{}, + helpers: map[string]bool{}, + lock: &sync.Mutex{}, +} + +// MarkAsHelper is used by GinkgoHelper to mark the caller (appropriately offset by skip)as a helper. You can use this directly if you need to provide an optional `skip` to mark functions further up the call stack as helpers. +func MarkAsHelper(optionalSkip ...int) { + skip := 1 + if len(optionalSkip) > 0 { + skip += optionalSkip[0] + } + pc, _, _, ok := runtime.Caller(skip) + if ok { + clLocator.addHelper(pc) + } +} + func NewCustomCodeLocation(message string) CodeLocation { return CodeLocation{ CustomMessage: message, @@ -44,14 +113,13 @@ func NewCustomCodeLocation(message string) CodeLocation { } func NewCodeLocation(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - return CodeLocation{FileName: file, LineNumber: line} + return clLocator.getCodeLocation(skip + 1) } func NewCodeLocationWithStackTrace(skip int) CodeLocation { - _, file, line, _ := runtime.Caller(skip + 1) - stackTrace := PruneStack(string(debug.Stack()), skip+1) - return CodeLocation{FileName: file, LineNumber: line, FullStackTrace: stackTrace} + cl := clLocator.getCodeLocation(skip + 1) + cl.FullStackTrace = PruneStack(string(debug.Stack()), skip+1) + return cl } // PruneStack removes references to functions that are internal to Ginkgo diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index f016c5c1f..1014c7b49 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -8,6 +8,7 @@ package types import ( "flag" "os" + "path/filepath" "runtime" "strconv" "strings" @@ -26,11 +27,11 @@ type SuiteConfig struct { FailOnPending bool FailFast bool FlakeAttempts int - EmitSpecProgress bool DryRun bool PollProgressAfter time.Duration PollProgressInterval time.Duration Timeout time.Duration + EmitSpecProgress bool // this is deprecated but its removal is causing compile issue for some users that were setting it manually OutputInterceptorMode string SourceRoots []string GracePeriod time.Duration @@ -81,13 +82,12 @@ func (vl VerbosityLevel) LT(comp VerbosityLevel) bool { // Configuration for Ginkgo's reporter type ReporterConfig struct { - NoColor bool - SlowSpecThreshold time.Duration - Succinct bool - Verbose bool - VeryVerbose bool - FullTrace bool - AlwaysEmitGinkgoWriter bool + NoColor bool + Succinct bool + Verbose bool + VeryVerbose bool + FullTrace bool + ShowNodeEvents bool JSONReport string JUnitReport string @@ -110,9 +110,7 @@ func (rc ReporterConfig) WillGenerateReport() bool { } func NewDefaultReporterConfig() ReporterConfig { - return ReporterConfig{ - SlowSpecThreshold: 5 * time.Second, - } + return ReporterConfig{} } // Configuration for the Ginkgo CLI @@ -235,6 +233,9 @@ type deprecatedConfig struct { SlowSpecThresholdWithFLoatUnits float64 Stream bool Notify bool + EmitSpecProgress bool + SlowSpecThreshold time.Duration + AlwaysEmitGinkgoWriter bool } // Flags @@ -275,8 +276,6 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "S.DryRun", Name: "dry-run", SectionKey: "debug", DeprecatedName: "dryRun", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, ginkgo will walk the test hierarchy without actually running anything. Best paired with -v."}, - {KeyPath: "S.EmitSpecProgress", Name: "progress", SectionKey: "debug", - Usage: "If set, ginkgo will emit progress information as each spec runs to the GinkgoWriter."}, {KeyPath: "S.PollProgressAfter", Name: "poll-progress-after", SectionKey: "debug", UsageDefaultValue: "0", Usage: "Emit node progress reports periodically if node hasn't completed after this duration."}, {KeyPath: "S.PollProgressInterval", Name: "poll-progress-interval", SectionKey: "debug", UsageDefaultValue: "10s", @@ -303,6 +302,8 @@ var SuiteConfigFlags = GinkgoFlags{ {KeyPath: "D.RegexScansFilePath", DeprecatedName: "regexScansFilePath", DeprecatedDocLink: "removed--regexscansfilepath", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.DebugParallel", DeprecatedName: "debug", DeprecatedDocLink: "removed--debug", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.EmitSpecProgress", DeprecatedName: "progress", SectionKey: "debug", + DeprecatedVersion: "2.5.0", Usage: ". The functionality provided by --progress was confusing and is no longer needed. Use --show-node-events instead to see node entry and exit events included in the timeline of failed and verbose specs. Or you can run with -vv to always see all node events. Lastly, --poll-progress-after and the PollProgressAfter decorator now provide a better mechanism for debugging specs that tend to get stuck."}, } // ParallelConfigFlags provides flags for the Ginkgo test process (not the CLI) @@ -319,8 +320,6 @@ var ParallelConfigFlags = GinkgoFlags{ var ReporterConfigFlags = GinkgoFlags{ {KeyPath: "R.NoColor", Name: "no-color", SectionKey: "output", DeprecatedName: "noColor", DeprecatedDocLink: "changed-command-line-flags", Usage: "If set, suppress color output in default reporter."}, - {KeyPath: "R.SlowSpecThreshold", Name: "slow-spec-threshold", SectionKey: "output", UsageArgument: "duration", UsageDefaultValue: "5s", - Usage: "Specs that take longer to run than this threshold are flagged as slow by the default reporter."}, {KeyPath: "R.Verbose", Name: "v", SectionKey: "output", Usage: "If set, emits more output including GinkgoWriter contents."}, {KeyPath: "R.VeryVerbose", Name: "vv", SectionKey: "output", @@ -329,8 +328,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "If set, default reporter prints out a very succinct report"}, {KeyPath: "R.FullTrace", Name: "trace", SectionKey: "output", Usage: "If set, default reporter prints out the full stack trace when a failure occurs"}, - {KeyPath: "R.AlwaysEmitGinkgoWriter", Name: "always-emit-ginkgo-writer", SectionKey: "output", DeprecatedName: "reportPassed", DeprecatedDocLink: "renamed--reportpassed", - Usage: "If set, default reporter prints out captured output of passed tests."}, + {KeyPath: "R.ShowNodeEvents", Name: "show-node-events", SectionKey: "output", + Usage: "If set, default reporter prints node > Enter and < Exit events when specs fail"}, {KeyPath: "R.JSONReport", Name: "json-report", UsageArgument: "filename.json", SectionKey: "output", Usage: "If set, Ginkgo will generate a JSON-formatted test report at the specified location."}, @@ -343,6 +342,8 @@ var ReporterConfigFlags = GinkgoFlags{ Usage: "use --slow-spec-threshold instead and pass in a duration string (e.g. '5s', not '5.0')"}, {KeyPath: "D.NoisyPendings", DeprecatedName: "noisyPendings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, {KeyPath: "D.NoisySkippings", DeprecatedName: "noisySkippings", DeprecatedDocLink: "removed--noisypendings-and--noisyskippings", DeprecatedVersion: "2.0.0"}, + {KeyPath: "D.SlowSpecThreshold", DeprecatedName: "slow-spec-threshold", SectionKey: "output", Usage: "--slow-spec-threshold has been deprecated and will be removed in a future version of Ginkgo. This feature has proved to be more noisy than useful. You can use --poll-progress-after, instead, to get more actionable feedback about potentially slow specs and understand where they might be getting stuck.", DeprecatedVersion: "2.5.0"}, + {KeyPath: "D.AlwaysEmitGinkgoWriter", DeprecatedName: "always-emit-ginkgo-writer", SectionKey: "output", Usage: " - use -v instead, or one of Ginkgo's machine-readable report formats to get GinkgoWriter output for passing specs."}, } // BuildTestSuiteFlagSet attaches to the CommandLine flagset and provides flags for the Ginkgo test process @@ -600,13 +601,29 @@ func VetAndInitializeCLIAndGoConfig(cliConfig CLIConfig, goFlagsConfig GoFlagsCo } // GenerateGoTestCompileArgs is used by the Ginkgo CLI to generate command line arguments to pass to the go test -c command when compiling the test -func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string) ([]string, error) { +func GenerateGoTestCompileArgs(goFlagsConfig GoFlagsConfig, destination string, packageToBuild string, pathToInvocationPath string) ([]string, error) { // if the user has set the CoverProfile run-time flag make sure to set the build-time cover flag to make sure // the built test binary can generate a coverprofile if goFlagsConfig.CoverProfile != "" { goFlagsConfig.Cover = true } + if goFlagsConfig.CoverPkg != "" { + coverPkgs := strings.Split(goFlagsConfig.CoverPkg, ",") + adjustedCoverPkgs := make([]string, len(coverPkgs)) + for i, coverPkg := range coverPkgs { + coverPkg = strings.Trim(coverPkg, " ") + if strings.HasPrefix(coverPkg, "./") { + // this is a relative coverPkg - we need to reroot it + adjustedCoverPkgs[i] = "./" + filepath.Join(pathToInvocationPath, strings.TrimPrefix(coverPkg, "./")) + } else { + // this is a package name - don't touch it + adjustedCoverPkgs[i] = coverPkg + } + } + goFlagsConfig.CoverPkg = strings.Join(adjustedCoverPkgs, ",") + } + args := []string{"test", "-c", "-o", destination, packageToBuild} goArgs, err := GenerateFlagArgs( GoBuildFlags, diff --git a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go index 2948dfa0c..e2519f673 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/deprecation_support.go @@ -38,7 +38,7 @@ func (d deprecations) Async() Deprecation { func (d deprecations) Measure() Deprecation { return Deprecation{ - Message: "Measure is deprecated and will be removed in Ginkgo V2. Please migrate to gomega/gmeasure.", + Message: "Measure is deprecated and has been removed from Ginkgo V2. Any Measure tests in your spec will not run. Please migrate to gomega/gmeasure.", DocLink: "removed-measure", Version: "1.16.3", } @@ -83,6 +83,13 @@ func (d deprecations) Nodot() Deprecation { } } +func (d deprecations) SuppressProgressReporting() Deprecation { + return Deprecation{ + Message: "Improvements to how reporters emit timeline information means that SuppressProgressReporting is no longer necessary and has been deprecated.", + Version: "2.5.0", + } +} + type DeprecationTracker struct { deprecations map[Deprecation][]CodeLocation lock *sync.Mutex diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index b7ed5a21e..1e0dbfd9d 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -108,8 +108,8 @@ Please ensure all assertions are inside leaf nodes such as {{bold}}BeforeEach{{/ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -125,8 +125,8 @@ func (g ginkgoErrors) SuiteNodeInNestedContext(nodeType NodeType, cl CodeLocatio func (g ginkgoErrors) SuiteNodeDuringRunPhase(nodeType NodeType, cl CodeLocation) error { docLink := "suite-setup-and-cleanup-beforesuite-and-aftersuite" - if nodeType.Is(NodeTypeReportAfterSuite) { - docLink = "reporting-nodes---reportaftersuite" + if nodeType.Is(NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite) { + docLink = "reporting-nodes---reportbeforesuite-and-reportaftersuite" } return GinkgoError{ @@ -298,6 +298,15 @@ func (g ginkgoErrors) SetupNodeNotInOrderedContainer(cl CodeLocation, nodeType N } } +func (g ginkgoErrors) InvalidContinueOnFailureDecoration(cl CodeLocation) error { + return GinkgoError{ + Heading: "ContinueOnFailure not decorating an outermost Ordered Container", + Message: "ContinueOnFailure can only decorate an Ordered container, and this Ordered container must be the outermost Ordered container.", + CodeLocation: cl, + DocLink: "ordered-containers", + } +} + /* DeferCleanup errors */ func (g ginkgoErrors) DeferCleanupInvalidFunction(cl CodeLocation) error { return GinkgoError{ @@ -320,7 +329,7 @@ func (g ginkgoErrors) PushingCleanupNodeDuringTreeConstruction(cl CodeLocation) func (g ginkgoErrors) PushingCleanupInReportingNode(cl CodeLocation, nodeType NodeType) error { return GinkgoError{ Heading: fmt.Sprintf("DeferCleanup cannot be called in %s", nodeType), - Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a ReportAfterEach or ReportAfterSuite.", + Message: "Please inline your cleanup code - Ginkgo won't run cleanup code after a Reporting node.", CodeLocation: cl, DocLink: "cleaning-up-our-cleanup-code-defercleanup", } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go index 0403f9e63..b0d3b651e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/label_filter.go @@ -272,12 +272,23 @@ func tokenize(input string) func() (*treeNode, error) { } } +func MustParseLabelFilter(input string) LabelFilter { + filter, err := ParseLabelFilter(input) + if err != nil { + panic(err) + } + return filter +} + func ParseLabelFilter(input string) (LabelFilter, error) { if DEBUG_LABEL_FILTER_PARSING { fmt.Println("\n==============") fmt.Println("Input: ", input) fmt.Print("Tokens: ") } + if input == "" { + return func(_ []string) bool { return true }, nil + } nextToken := tokenize(input) root := &treeNode{token: lfTokenRoot} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go index 798bedc03..7b1524b52 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/report_entry.go @@ -6,8 +6,8 @@ import ( "time" ) -//ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports -//and across the network connection when running in parallel +// ReportEntryValue wraps a report entry's value ensuring it can be encoded and decoded safely into reports +// and across the network connection when running in parallel type ReportEntryValue struct { raw interface{} //unexported to prevent gob from freaking out about unregistered structs AsJSON string @@ -85,10 +85,12 @@ func (rev *ReportEntryValue) GobDecode(data []byte) error { type ReportEntry struct { // Visibility captures the visibility policy for this ReportEntry Visibility ReportEntryVisibility - // Time captures the time the AddReportEntry was called - Time time.Time // Location captures the location of the AddReportEntry call Location CodeLocation + + Time time.Time //need this for backwards compatibility + TimelineLocation TimelineLocation + // Name captures the name of this report Name string // Value captures the (optional) object passed into AddReportEntry - this can be @@ -120,7 +122,9 @@ func (entry ReportEntry) GetRawValue() interface{} { return entry.Value.GetRawValue() } - +func (entry ReportEntry) GetTimelineLocation() TimelineLocation { + return entry.TimelineLocation +} type ReportEntries []ReportEntry diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index 9fc4425fe..d048a8ada 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -2,6 +2,8 @@ package types import ( "encoding/json" + "fmt" + "sort" "strings" "time" ) @@ -56,19 +58,20 @@ type Report struct { SuiteConfig SuiteConfig //SpecReports is a list of all SpecReports generated by this test run + //It is empty when the SuiteReport is provided to ReportBeforeSuite SpecReports SpecReports } -//PreRunStats contains a set of stats captured before the test run begins. This is primarily used -//by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) -//and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. +// PreRunStats contains a set of stats captured before the test run begins. This is primarily used +// by Ginkgo's reporter to tell the user how many specs are in the current suite (PreRunStats.TotalSpecs) +// and how many it intends to run (PreRunStats.SpecsThatWillRun) after applying any relevant focus or skip filters. type PreRunStats struct { TotalSpecs int SpecsThatWillRun int } -//Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes -//to form a complete final report. +// Add is used by Ginkgo's parallel aggregation mechanisms to combine test run reports form individual parallel processes +// to form a complete final report. func (report Report) Add(other Report) Report { report.SuiteSucceeded = report.SuiteSucceeded && other.SuiteSucceeded @@ -147,6 +150,9 @@ type SpecReport struct { // ParallelProcess captures the parallel process that this spec ran on ParallelProcess int + // RunningInParallel captures whether this spec is part of a suite that ran in parallel + RunningInParallel bool + //Failure is populated if a spec has failed, panicked, been interrupted, or skipped by the user (e.g. calling Skip()) //It includes detailed information about the Failure Failure Failure @@ -178,6 +184,9 @@ type SpecReport struct { // AdditionalFailures contains any failures that occurred after the initial spec failure. These typically occur in cleanup nodes after the initial failure and are only emitted when running in verbose mode. AdditionalFailures []AdditionalFailure + + // SpecEvents capture additional events that occur during the spec run + SpecEvents SpecEvents } func (report SpecReport) MarshalJSON() ([]byte, error) { @@ -204,6 +213,7 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { ReportEntries ReportEntries `json:",omitempty"` ProgressReports []ProgressReport `json:",omitempty"` AdditionalFailures []AdditionalFailure `json:",omitempty"` + SpecEvents SpecEvents `json:",omitempty"` }{ ContainerHierarchyTexts: report.ContainerHierarchyTexts, ContainerHierarchyLocations: report.ContainerHierarchyLocations, @@ -238,6 +248,9 @@ func (report SpecReport) MarshalJSON() ([]byte, error) { if len(report.AdditionalFailures) > 0 { out.AdditionalFailures = report.AdditionalFailures } + if len(report.SpecEvents) > 0 { + out.SpecEvents = report.SpecEvents + } return json.Marshal(out) } @@ -255,13 +268,13 @@ func (report SpecReport) CombinedOutput() string { return report.CapturedStdOutErr + "\n" + report.CapturedGinkgoWriterOutput } -//Failed returns true if report.State is one of the SpecStateFailureStates +// Failed returns true if report.State is one of the SpecStateFailureStates // (SpecStateFailed, SpecStatePanicked, SpecStateinterrupted, SpecStateAborted) func (report SpecReport) Failed() bool { return report.State.Is(SpecStateFailureStates) } -//FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText +// FullText returns a concatenation of all the report.ContainerHierarchyTexts and report.LeafNodeText func (report SpecReport) FullText() string { texts := []string{} texts = append(texts, report.ContainerHierarchyTexts...) @@ -271,7 +284,7 @@ func (report SpecReport) FullText() string { return strings.Join(texts, " ") } -//Labels returns a deduped set of all the spec's Labels. +// Labels returns a deduped set of all the spec's Labels. func (report SpecReport) Labels() []string { out := []string{} seen := map[string]bool{} @@ -293,7 +306,7 @@ func (report SpecReport) Labels() []string { return out } -//MatchesLabelFilter returns true if the spec satisfies the passed in label filter query +// MatchesLabelFilter returns true if the spec satisfies the passed in label filter query func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { filter, err := ParseLabelFilter(query) if err != nil { @@ -302,29 +315,54 @@ func (report SpecReport) MatchesLabelFilter(query string) (bool, error) { return filter(report.Labels()), nil } -//FileName() returns the name of the file containing the spec +// FileName() returns the name of the file containing the spec func (report SpecReport) FileName() string { return report.LeafNodeLocation.FileName } -//LineNumber() returns the line number of the leaf node +// LineNumber() returns the line number of the leaf node func (report SpecReport) LineNumber() int { return report.LeafNodeLocation.LineNumber } -//FailureMessage() returns the failure message (or empty string if the test hasn't failed) +// FailureMessage() returns the failure message (or empty string if the test hasn't failed) func (report SpecReport) FailureMessage() string { return report.Failure.Message } -//FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) +// FailureLocation() returns the location of the failure (or an empty CodeLocation if the test hasn't failed) func (report SpecReport) FailureLocation() CodeLocation { return report.Failure.Location } +// Timeline() returns a timeline view of the report +func (report SpecReport) Timeline() Timeline { + timeline := Timeline{} + if !report.Failure.IsZero() { + timeline = append(timeline, report.Failure) + if report.Failure.AdditionalFailure != nil { + timeline = append(timeline, *(report.Failure.AdditionalFailure)) + } + } + for _, additionalFailure := range report.AdditionalFailures { + timeline = append(timeline, additionalFailure) + } + for _, reportEntry := range report.ReportEntries { + timeline = append(timeline, reportEntry) + } + for _, progressReport := range report.ProgressReports { + timeline = append(timeline, progressReport) + } + for _, specEvent := range report.SpecEvents { + timeline = append(timeline, specEvent) + } + sort.Sort(timeline) + return timeline +} + type SpecReports []SpecReport -//WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes +// WithLeafNodeType returns the subset of SpecReports with LeafNodeType matching one of the requested NodeTypes func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { count := 0 for i := range reports { @@ -344,7 +382,7 @@ func (reports SpecReports) WithLeafNodeType(nodeTypes NodeType) SpecReports { return out } -//WithState returns the subset of SpecReports with State matching one of the requested SpecStates +// WithState returns the subset of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) WithState(states SpecState) SpecReports { count := 0 for i := range reports { @@ -363,7 +401,7 @@ func (reports SpecReports) WithState(states SpecState) SpecReports { return out } -//CountWithState returns the number of SpecReports with State matching one of the requested SpecStates +// CountWithState returns the number of SpecReports with State matching one of the requested SpecStates func (reports SpecReports) CountWithState(states SpecState) int { n := 0 for i := range reports { @@ -374,7 +412,7 @@ func (reports SpecReports) CountWithState(states SpecState) int { return n } -//If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. +// If the Spec passes, CountOfFlakedSpecs returns the number of SpecReports that failed after multiple attempts. func (reports SpecReports) CountOfFlakedSpecs() int { n := 0 for i := range reports { @@ -385,7 +423,7 @@ func (reports SpecReports) CountOfFlakedSpecs() int { return n } -//If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts +// If the Spec fails, CountOfRepeatedSpecs returns the number of SpecReports that passed after multiple attempts func (reports SpecReports) CountOfRepeatedSpecs() int { n := 0 for i := range reports { @@ -396,6 +434,53 @@ func (reports SpecReports) CountOfRepeatedSpecs() int { return n } +// TimelineLocation captures the location of an event in the spec's timeline +type TimelineLocation struct { + //Offset is the offset (in bytes) of the event relative to the GinkgoWriter stream + Offset int `json:",omitempty"` + + //Order is the order of the event with respect to other events. The absolute value of Order + //is irrelevant. All that matters is that an event with a lower Order occurs before ane vent with a higher Order + Order int `json:",omitempty"` + + Time time.Time +} + +// TimelineEvent represent an event on the timeline +// consumers of Timeline will need to check the concrete type of each entry to determine how to handle it +type TimelineEvent interface { + GetTimelineLocation() TimelineLocation +} + +type Timeline []TimelineEvent + +func (t Timeline) Len() int { return len(t) } +func (t Timeline) Less(i, j int) bool { + return t[i].GetTimelineLocation().Order < t[j].GetTimelineLocation().Order +} +func (t Timeline) Swap(i, j int) { t[i], t[j] = t[j], t[i] } +func (t Timeline) WithoutHiddenReportEntries() Timeline { + out := Timeline{} + for _, event := range t { + if reportEntry, isReportEntry := event.(ReportEntry); isReportEntry && reportEntry.Visibility == ReportEntryVisibilityNever { + continue + } + out = append(out, event) + } + return out +} + +func (t Timeline) WithoutVeryVerboseSpecEvents() Timeline { + out := Timeline{} + for _, event := range t { + if specEvent, isSpecEvent := event.(SpecEvent); isSpecEvent && specEvent.IsOnlyVisibleAtVeryVerbose() { + continue + } + out = append(out, event) + } + return out +} + // Failure captures failure information for an individual test type Failure struct { // Message - the failure message passed into Fail(...). When using a matcher library @@ -408,6 +493,8 @@ type Failure struct { // This CodeLocation will include a fully-populated StackTrace Location CodeLocation + TimelineLocation TimelineLocation + // ForwardedPanic - if the failure represents a captured panic (i.e. Summary.State == SpecStatePanicked) // then ForwardedPanic will be populated with a string representation of the captured panic. ForwardedPanic string `json:",omitempty"` @@ -420,19 +507,29 @@ type Failure struct { // FailureNodeType will contain the NodeType of the node in which the failure occurred. // FailureNodeLocation will contain the CodeLocation of the node in which the failure occurred. // If populated, FailureNodeContainerIndex will be the index into SpecReport.ContainerHierarchyTexts and SpecReport.ContainerHierarchyLocations that represents the parent container of the node in which the failure occurred. - FailureNodeContext FailureNodeContext - FailureNodeType NodeType - FailureNodeLocation CodeLocation - FailureNodeContainerIndex int + FailureNodeContext FailureNodeContext `json:",omitempty"` + + FailureNodeType NodeType `json:",omitempty"` + + FailureNodeLocation CodeLocation `json:",omitempty"` + + FailureNodeContainerIndex int `json:",omitempty"` //ProgressReport is populated if the spec was interrupted or timed out - ProgressReport ProgressReport + ProgressReport ProgressReport `json:",omitempty"` + + //AdditionalFailure is non-nil if a follow-on failure occurred within the same node after the primary failure. This only happens when a node has timed out or been interrupted. In such cases the AdditionalFailure can include information about where/why the spec was stuck. + AdditionalFailure *AdditionalFailure `json:",omitempty"` } func (f Failure) IsZero() bool { return f.Message == "" && (f.Location == CodeLocation{}) } +func (f Failure) GetTimelineLocation() TimelineLocation { + return f.TimelineLocation +} + // FailureNodeContext captures the location context for the node containing the failing line of code type FailureNodeContext uint @@ -471,6 +568,10 @@ type AdditionalFailure struct { Failure Failure } +func (f AdditionalFailure) GetTimelineLocation() TimelineLocation { + return f.Failure.TimelineLocation +} + // SpecState captures the state of a spec // To determine if a given `state` represents a failure state, use `state.Is(SpecStateFailureStates)` type SpecState uint @@ -503,6 +604,9 @@ var ssEnumSupport = NewEnumSupport(map[uint]string{ func (ss SpecState) String() string { return ssEnumSupport.String(uint(ss)) } +func (ss SpecState) GomegaString() string { + return ssEnumSupport.String(uint(ss)) +} func (ss *SpecState) UnmarshalJSON(b []byte) error { out, err := ssEnumSupport.UnmarshJSON(b) *ss = SpecState(out) @@ -520,38 +624,40 @@ func (ss SpecState) Is(states SpecState) bool { // ProgressReport captures the progress of the current spec. It is, effectively, a structured Ginkgo-aware stack trace type ProgressReport struct { - Message string - ParallelProcess int - RunningInParallel bool + Message string `json:",omitempty"` + ParallelProcess int `json:",omitempty"` + RunningInParallel bool `json:",omitempty"` - Time time.Time + ContainerHierarchyTexts []string `json:",omitempty"` + LeafNodeText string `json:",omitempty"` + LeafNodeLocation CodeLocation `json:",omitempty"` + SpecStartTime time.Time `json:",omitempty"` - ContainerHierarchyTexts []string - LeafNodeText string - LeafNodeLocation CodeLocation - SpecStartTime time.Time + CurrentNodeType NodeType `json:",omitempty"` + CurrentNodeText string `json:",omitempty"` + CurrentNodeLocation CodeLocation `json:",omitempty"` + CurrentNodeStartTime time.Time `json:",omitempty"` - CurrentNodeType NodeType - CurrentNodeText string - CurrentNodeLocation CodeLocation - CurrentNodeStartTime time.Time + CurrentStepText string `json:",omitempty"` + CurrentStepLocation CodeLocation `json:",omitempty"` + CurrentStepStartTime time.Time `json:",omitempty"` - CurrentStepText string - CurrentStepLocation CodeLocation - CurrentStepStartTime time.Time + AdditionalReports []string `json:",omitempty"` - AdditionalReports []string + CapturedGinkgoWriterOutput string `json:",omitempty"` + TimelineLocation TimelineLocation `json:",omitempty"` - CapturedGinkgoWriterOutput string `json:",omitempty"` - GinkgoWriterOffset int - - Goroutines []Goroutine + Goroutines []Goroutine `json:",omitempty"` } func (pr ProgressReport) IsZero() bool { return pr.CurrentNodeType == NodeTypeInvalid } +func (pr ProgressReport) Time() time.Time { + return pr.TimelineLocation.Time +} + func (pr ProgressReport) SpecGoroutine() Goroutine { for _, goroutine := range pr.Goroutines { if goroutine.IsSpecGoroutine { @@ -589,6 +695,22 @@ func (pr ProgressReport) WithoutCapturedGinkgoWriterOutput() ProgressReport { return out } +func (pr ProgressReport) WithoutOtherGoroutines() ProgressReport { + out := pr + filteredGoroutines := []Goroutine{} + for _, goroutine := range pr.Goroutines { + if goroutine.IsSpecGoroutine || goroutine.HasHighlights() { + filteredGoroutines = append(filteredGoroutines, goroutine) + } + } + out.Goroutines = filteredGoroutines + return out +} + +func (pr ProgressReport) GetTimelineLocation() TimelineLocation { + return pr.TimelineLocation +} + type Goroutine struct { ID uint64 State string @@ -643,6 +765,7 @@ const ( NodeTypeReportBeforeEach NodeTypeReportAfterEach + NodeTypeReportBeforeSuite NodeTypeReportAfterSuite NodeTypeCleanupInvalid @@ -652,9 +775,9 @@ const ( ) var NodeTypesForContainerAndIt = NodeTypeContainer | NodeTypeIt -var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite +var NodeTypesForSuiteLevelNodes = NodeTypeBeforeSuite | NodeTypeSynchronizedBeforeSuite | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite | NodeTypeCleanupAfterSuite var NodeTypesAllowedDuringCleanupInterrupt = NodeTypeAfterEach | NodeTypeJustAfterEach | NodeTypeAfterAll | NodeTypeAfterSuite | NodeTypeSynchronizedAfterSuite | NodeTypeCleanupAfterEach | NodeTypeCleanupAfterAll | NodeTypeCleanupAfterSuite -var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportAfterSuite +var NodeTypesAllowedDuringReportInterrupt = NodeTypeReportBeforeEach | NodeTypeReportAfterEach | NodeTypeReportBeforeSuite | NodeTypeReportAfterSuite var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeInvalid): "INVALID NODE TYPE", @@ -672,6 +795,7 @@ var ntEnumSupport = NewEnumSupport(map[uint]string{ uint(NodeTypeSynchronizedAfterSuite): "SynchronizedAfterSuite", uint(NodeTypeReportBeforeEach): "ReportBeforeEach", uint(NodeTypeReportAfterEach): "ReportAfterEach", + uint(NodeTypeReportBeforeSuite): "ReportBeforeSuite", uint(NodeTypeReportAfterSuite): "ReportAfterSuite", uint(NodeTypeCleanupInvalid): "DeferCleanup", uint(NodeTypeCleanupAfterEach): "DeferCleanup (Each)", @@ -694,3 +818,99 @@ func (nt NodeType) MarshalJSON() ([]byte, error) { func (nt NodeType) Is(nodeTypes NodeType) bool { return nt&nodeTypes != 0 } + +/* +SpecEvent captures a vareity of events that can occur when specs run. See SpecEventType for the list of available events. +*/ +type SpecEvent struct { + SpecEventType SpecEventType + + CodeLocation CodeLocation + TimelineLocation TimelineLocation + + Message string `json:",omitempty"` + Duration time.Duration `json:",omitempty"` + NodeType NodeType `json:",omitempty"` + Attempt int `json:",omitempty"` +} + +func (se SpecEvent) GetTimelineLocation() TimelineLocation { + return se.TimelineLocation +} + +func (se SpecEvent) IsOnlyVisibleAtVeryVerbose() bool { + return se.SpecEventType.Is(SpecEventByEnd | SpecEventNodeStart | SpecEventNodeEnd) +} + +func (se SpecEvent) GomegaString() string { + out := &strings.Builder{} + out.WriteString("[" + se.SpecEventType.String() + " SpecEvent] ") + if se.Message != "" { + out.WriteString("Message=") + out.WriteString(`"` + se.Message + `",`) + } + if se.Duration != 0 { + out.WriteString("Duration=" + se.Duration.String() + ",") + } + if se.NodeType != NodeTypeInvalid { + out.WriteString("NodeType=" + se.NodeType.String() + ",") + } + if se.Attempt != 0 { + out.WriteString(fmt.Sprintf("Attempt=%d", se.Attempt) + ",") + } + out.WriteString("CL=" + se.CodeLocation.String() + ",") + out.WriteString(fmt.Sprintf("TL.Offset=%d", se.TimelineLocation.Offset)) + + return out.String() +} + +type SpecEvents []SpecEvent + +func (se SpecEvents) WithType(seType SpecEventType) SpecEvents { + out := SpecEvents{} + for _, event := range se { + if event.SpecEventType.Is(seType) { + out = append(out, event) + } + } + return out +} + +type SpecEventType uint + +const ( + SpecEventInvalid SpecEventType = 0 + + SpecEventByStart SpecEventType = 1 << iota + SpecEventByEnd + SpecEventNodeStart + SpecEventNodeEnd + SpecEventSpecRepeat + SpecEventSpecRetry +) + +var seEnumSupport = NewEnumSupport(map[uint]string{ + uint(SpecEventInvalid): "INVALID SPEC EVENT", + uint(SpecEventByStart): "By", + uint(SpecEventByEnd): "By (End)", + uint(SpecEventNodeStart): "Node", + uint(SpecEventNodeEnd): "Node (End)", + uint(SpecEventSpecRepeat): "Repeat", + uint(SpecEventSpecRetry): "Retry", +}) + +func (se SpecEventType) String() string { + return seEnumSupport.String(uint(se)) +} +func (se *SpecEventType) UnmarshalJSON(b []byte) error { + out, err := seEnumSupport.UnmarshJSON(b) + *se = SpecEventType(out) + return err +} +func (se SpecEventType) MarshalJSON() ([]byte, error) { + return seEnumSupport.MarshJSON(uint(se)) +} + +func (se SpecEventType) Is(specEventTypes SpecEventType) bool { + return se&specEventTypes != 0 +} diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 7ba384a09..43066341e 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.4.0" +const VERSION = "2.9.5" diff --git a/vendor/github.com/onsi/gomega/.gitignore b/vendor/github.com/onsi/gomega/.gitignore index 5f12ff053..425d0a509 100644 --- a/vendor/github.com/onsi/gomega/.gitignore +++ b/vendor/github.com/onsi/gomega/.gitignore @@ -3,4 +3,5 @@ . .idea gomega.iml -TODO.md \ No newline at end of file +TODO +.vscode \ No newline at end of file diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index e088dc078..07a3a5a82 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,140 @@ +## 1.27.7 + +### Fixes +- fix: gcustom.MakeMatcher accepts nil as actual value (#666) [57054d5] + +### Maintenance +- update gitignore [05c1bc6] +- Bump github.com/onsi/ginkgo/v2 from 2.9.4 to 2.9.5 (#663) [7cadcf6] +- Bump golang.org/x/net from 0.9.0 to 0.10.0 (#662) [b524839] +- Bump github.com/onsi/ginkgo/v2 from 2.9.2 to 2.9.4 (#661) [5f44694] +- Bump commonmarker from 0.23.8 to 0.23.9 in /docs (#657) [05dc99a] +- Bump nokogiri from 1.14.1 to 1.14.3 in /docs (#658) [3a033d1] +- Replace deprecated NewGomegaWithT with NewWithT (#659) [a19238f] +- Bump golang.org/x/net from 0.8.0 to 0.9.0 (#656) [29ed041] +- Bump actions/setup-go from 3 to 4 (#651) [11b2080] + +## 1.27.6 + +### Fixes +- Allow collections matchers to work correctly when expected has nil elements [60e7cf3] + +### Maintenance +- updates MatchError godoc comment to also accept a Gomega matcher (#654) [67b869d] + +## 1.27.5 + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.1 to 2.9.2 (#653) [a215021] +- Bump github.com/go-task/slim-sprig (#652) [a26fed8] + +## 1.27.4 + +### Fixes +- improve error formatting and remove duplication of error message in Eventually/Consistently [854f075] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.9.0 to 2.9.1 (#650) [ccebd9b] + +## 1.27.3 + +### Fixes +- format.Object now always includes err.Error() when passed an error [86d97ef] +- Fix HaveExactElements to work inside ContainElement or other collection matchers (#648) [636757e] + +### Maintenance +- Bump github.com/golang/protobuf from 1.5.2 to 1.5.3 (#649) [cc16689] +- Bump github.com/onsi/ginkgo/v2 from 2.8.4 to 2.9.0 (#646) [e783366] + +## 1.27.2 + +### Fixes +- improve poll progress message when polling a consistently that has been passing [28a319b] + +### Maintenance +- bump ginkgo +- remove tools.go hack as Ginkgo 2.8.2 automatically pulls in the cli dependencies [81443b3] + +## 1.27.1 + +### Maintenance + +- Bump golang.org/x/net from 0.6.0 to 0.7.0 (#640) [bc686cd] + +## 1.27.0 + +### Features +- Add HaveExactElements matcher (#634) [9d50783] +- update Gomega docs to discuss GinkgoHelper() [be32774] + +### Maintenance +- Bump github.com/onsi/ginkgo/v2 from 2.8.0 to 2.8.1 (#639) [296a68b] +- Bump golang.org/x/net from 0.5.0 to 0.6.0 (#638) [c2b098b] +- Bump github-pages from 227 to 228 in /docs (#636) [a9069ab] +- test: update matrix for Go 1.20 (#635) [6bd25c8] +- Bump github.com/onsi/ginkgo/v2 from 2.7.0 to 2.8.0 (#631) [5445f8b] +- Bump webrick from 1.7.0 to 1.8.1 in /docs (#630) [03e93bb] +- codeql: add ruby language (#626) [63c7d21] +- dependabot: add bundler package-ecosystem for docs (#625) [d92f963] + +## 1.26.0 + +### Features +- When a polled function returns an error, keep track of the actual and report on the matcher state of the last non-errored actual [21f3090] +- improve eventually failure message output [c530fb3] + +### Fixes +- fix several documentation spelling issues [e2eff1f] + + +## 1.25.0 + +### Features +- add `MustPassRepeatedly(int)` to asyncAssertion (#619) [4509f72] +- compare unwrapped errors using DeepEqual (#617) [aaeaa5d] + +### Maintenance +- Bump golang.org/x/net from 0.4.0 to 0.5.0 (#614) [c7cfea4] +- Bump github.com/onsi/ginkgo/v2 from 2.6.1 to 2.7.0 (#615) [71b8adb] +- Docs: Fix typo "MUltiple" -> "Multiple" (#616) [9351dda] +- clean up go.sum [cd1dc1d] + +## 1.24.2 + +### Fixes +- Correctly handle assertion failure panics for eventually/consistnetly "g Gomega"s in a goroutine [78f1660] +- docs:Fix typo "you an" -> "you can" (#607) [3187c1f] +- fixes issue #600 (#606) [808d192] + +### Maintenance +- Bump golang.org/x/net from 0.2.0 to 0.4.0 (#611) [6ebc0bf] +- Bump nokogiri from 1.13.9 to 1.13.10 in /docs (#612) [258cfc8] +- Bump github.com/onsi/ginkgo/v2 from 2.5.0 to 2.5.1 (#609) [e6c3eb9] + +## 1.24.1 + +### Fixes +- maintain backward compatibility for Eventually and Consisntetly's signatures [4c7df5e] +- fix small typo (#601) [ea0ebe6] + +### Maintenance +- Bump golang.org/x/net from 0.1.0 to 0.2.0 (#603) [1ba8372] +- Bump github.com/onsi/ginkgo/v2 from 2.4.0 to 2.5.0 (#602) [f9426cb] +- fix label-filter in test.yml [d795db6] +- stop running flakey tests and rely on external network dependencies in CI [7133290] + +## 1.24.0 + +### Features + +Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. + +This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. + +### Maintenance + +- Update BeComparableTo documentation [756eaa0] + ## 1.23.0 ### Features diff --git a/vendor/github.com/onsi/gomega/RELEASING.md b/vendor/github.com/onsi/gomega/RELEASING.md index 7153b9b94..9973fff49 100644 --- a/vendor/github.com/onsi/gomega/RELEASING.md +++ b/vendor/github.com/onsi/gomega/RELEASING.md @@ -5,7 +5,7 @@ A Gomega release is a tagged sha and a GitHub release. To cut a release: ```bash LAST_VERSION=$(git tag --sort=version:refname | tail -n1) CHANGES=$(git log --pretty=format:'- %s [%h]' HEAD...$LAST_VERSION) - echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n## Fixes\n\n## Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md + echo -e "## NEXT\n\n$CHANGES\n\n### Features\n\n### Fixes\n\n### Maintenance\n\n$(cat CHANGELOG.md)" > CHANGELOG.md ``` to update the changelog - Categorize the changes into diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 1a2ed877a..56bdd053b 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -52,7 +52,7 @@ var CharactersAroundMismatchToInclude uint = 5 var contextType = reflect.TypeOf((*context.Context)(nil)).Elem() var timeType = reflect.TypeOf(time.Time{}) -//The default indentation string emitted by the format package +// The default indentation string emitted by the format package var Indent = " " var longFormThreshold = 20 @@ -258,7 +258,11 @@ Set PrintContextObjects to true to print the content of objects implementing con func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) - return fmt.Sprintf("%s<%s>: %s", indent, formatType(value), formatValue(value, indentation)) + commonRepresentation := "" + if err, ok := object.(error); ok { + commonRepresentation += "\n" + IndentString(err.Error(), indentation) + "\n" + indent + } + return fmt.Sprintf("%s<%s>: %s%s", indent, formatType(value), commonRepresentation, formatValue(value, indentation)) } /* diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index e236a40f4..82ef52445 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.23.0" +const GOMEGA_VERSION = "1.27.7" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -204,7 +204,7 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // All subsequent arguments will be required to be nil/zero. // // This is convenient if you want to make an assertion on a method/function that returns -// a value and an error - a common patter in Go. +// a value and an error - a common pattern in Go. // // For example, given a function with signature: // @@ -360,6 +360,16 @@ You can also pass additional arugments to functions that take a Gomega. The onl g.Expect(elements).To(ConsistOf(expected)) }).WithContext(ctx).WithArguments("/names", "Joe", "Jane", "Sam").Should(Succeed()) +You can ensure that you get a number of consecutive successful tries before succeeding using `MustPassRepeatedly(int)`. For Example: + + int count := 0 + Eventually(func() bool { + count++ + return count > 2 + }).MustPassRepeatedly(2).Should(BeTrue()) + // Because we had to wait for 2 calls that returned true + Expect(count).To(Equal(3)) + Finally, in addition to passing timeouts and a context to Eventually you can be more explicit with Eventually's chaining configuration methods: Eventually(..., "1s", "2s", ctx).Should(...) @@ -368,9 +378,9 @@ is equivalent to Eventually(...).WithTimeout(time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(args ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Eventually(args...) + return Default.Eventually(actualOrCtx, args...) } // EventuallyWithOffset operates like Eventually but takes an additional @@ -382,9 +392,9 @@ func Eventually(args ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.EventuallyWithOffset(offset, args...) + return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } /* @@ -402,9 +412,9 @@ Consistently is useful in cases where you want to assert that something *does no This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(args ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.Consistently(args...) + return Default.Consistently(actualOrCtx, args...) } // ConsistentlyWithOffset operates like Consistently but takes an additional @@ -413,9 +423,9 @@ func Consistently(args ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { ensureDefaultGomegaIsConfigured() - return Default.ConsistentlyWithOffset(offset, args...) + return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } /* diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index c1e4a9995..1188b0bce 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -2,6 +2,7 @@ package internal import ( "context" + "errors" "fmt" "reflect" "runtime" @@ -16,10 +17,37 @@ var errInterface = reflect.TypeOf((*error)(nil)).Elem() var gomegaType = reflect.TypeOf((*types.Gomega)(nil)).Elem() var contextType = reflect.TypeOf(new(context.Context)).Elem() +type formattedGomegaError interface { + FormattedGomegaError() string +} + +type asyncPolledActualError struct { + message string +} + +func (err *asyncPolledActualError) Error() string { + return err.message +} + +func (err *asyncPolledActualError) FormattedGomegaError() string { + return err.message +} + type contextWithAttachProgressReporter interface { AttachProgressReporter(func() string) func() } +type asyncGomegaHaltExecutionError struct{} + +func (a asyncGomegaHaltExecutionError) GinkgoRecoverShouldIgnoreThisPanic() {} +func (a asyncGomegaHaltExecutionError) Error() string { + return `An assertion has failed in a goroutine. You should call + + defer GinkgoRecover() + +at the top of the goroutine that caused this panic. This will allow Ginkgo and Gomega to correctly capture and manage this panic.` +} + type AsyncAssertionType uint const ( @@ -44,21 +72,23 @@ type AsyncAssertion struct { actual interface{} argsToForward []interface{} - timeoutInterval time.Duration - pollingInterval time.Duration - ctx context.Context - offset int - g *Gomega + timeoutInterval time.Duration + pollingInterval time.Duration + mustPassRepeatedly int + ctx context.Context + offset int + g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, ctx context.Context, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ - asyncType: asyncType, - timeoutInterval: timeoutInterval, - pollingInterval: pollingInterval, - offset: offset, - ctx: ctx, - g: g, + asyncType: asyncType, + timeoutInterval: timeoutInterval, + pollingInterval: pollingInterval, + mustPassRepeatedly: mustPassRepeatedly, + offset: offset, + ctx: ctx, + g: g, } out.actual = actualInput @@ -104,6 +134,11 @@ func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) typ return assertion } +func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssertion { + assertion.mustPassRepeatedly = count + return assertion +} + func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) @@ -130,7 +165,9 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { if len(values) == 0 { - return nil, fmt.Errorf("No values were returned by the function passed to Gomega") + return nil, &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType), + } } actual := values[0].Interface() @@ -153,10 +190,12 @@ func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (in continue } if i == len(values)-2 && extraType.Implements(errInterface) { - err = fmt.Errorf("function returned error: %w", extra.(error)) + err = extra.(error) } if err == nil { - err = fmt.Errorf("Unexpected non-nil/non-zero return value at index %d:\n\t<%T>: %#v", i+1, extra, extra) + err = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s had an unexpected non-nil/non-zero return value at index %d:\n%s", assertion.asyncType, i+1, format.Object(extra, 1)), + } } } @@ -191,6 +230,13 @@ You can learn more at https://onsi.github.io/gomega/#eventually `, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) } +func (assertion *AsyncAssertion) invalidMustPassRepeatedlyError(reason string) error { + return fmt.Errorf(`Invalid use of MustPassRepeatedly with %s %s + +You can learn more at https://onsi.github.io/gomega/#eventually +`, assertion.asyncType, reason) +} + func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { if !assertion.actualIsFunc { return func() (interface{}, error) { return assertion.actual, nil }, nil @@ -228,8 +274,11 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error skip = callerSkip[0] } _, file, line, _ := runtime.Caller(skip + 1) - assertionFailure = fmt.Errorf("Assertion in callback at %s:%d failed:\n%s", file, line, message) - panic("stop execution") + assertionFailure = &asyncPolledActualError{ + message: fmt.Sprintf("The function passed to %s failed at %s:%d with:\n%s", assertion.asyncType, file, line, message), + } + // we throw an asyncGomegaHaltExecutionError so that defer GinkgoRecover() can catch this error if the user makes an assertion in a goroutine + panic(asyncGomegaHaltExecutionError{}) }))) } if takesContext { @@ -245,6 +294,13 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error return nil, assertion.argumentMismatchError(actualType, len(inValues)) } + if assertion.mustPassRepeatedly != 1 && assertion.asyncType != AsyncAssertionTypeEventually { + return nil, assertion.invalidMustPassRepeatedlyError("it can only be used with Eventually") + } + if assertion.mustPassRepeatedly < 1 { + return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1") + } + return func() (actual interface{}, err error) { var values []reflect.Value assertionFailure = nil @@ -326,22 +382,39 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch timeout := assertion.afterTimeout() lock := sync.Mutex{} - var matches bool - var err error + var matches, hasLastValidActual bool + var actual, lastValidActual interface{} + var actualErr, matcherErr error var oracleMatcherSaysStop bool assertion.g.THelper() - pollActual, err := assertion.buildActualPoller() - if err != nil { - assertion.g.Fail(err.Error(), 2+assertion.offset) + pollActual, buildActualPollerErr := assertion.buildActualPoller() + if buildActualPollerErr != nil { + assertion.g.Fail(buildActualPollerErr.Error(), 2+assertion.offset) return false } - value, err := pollActual() - if err == nil { - oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) - matches, err = assertion.pollMatcher(matcher, value) + actual, actualErr = pollActual() + if actualErr == nil { + lastValidActual = actual + hasLastValidActual = true + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + matches, matcherErr = assertion.pollMatcher(matcher, actual) + } + + renderError := func(preamble string, err error) string { + message := "" + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + message = err.Error() + for _, attachment := range pollingSignalErr.Attachments { + message += fmt.Sprintf("\n%s:\n", attachment.Description) + message += format.Object(attachment.Object, 1) + } + } else { + message = preamble + "\n" + format.Object(err, 1) + } + return message } messageGenerator := func() string { @@ -349,23 +422,53 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch lock.Lock() defer lock.Unlock() message := "" - if err != nil { - if pollingSignalErr, ok := AsPollingSignalError(err); ok && pollingSignalErr.IsStopTrying() { - message = err.Error() - for _, attachment := range pollingSignalErr.Attachments { - message += fmt.Sprintf("\n%s:\n", attachment.Description) - message += format.Object(attachment.Object, 1) + + if actualErr == nil { + if matcherErr == nil { + if desiredMatch != matches { + if desiredMatch { + message += matcher.FailureMessage(actual) + } else { + message += matcher.NegatedFailureMessage(actual) + } + } else { + if assertion.asyncType == AsyncAssertionTypeConsistently { + message += "There is no failure as the matcher passed to Consistently has not yet failed" + } else { + message += "There is no failure as the matcher passed to Eventually succeeded on its most recent iteration" + } } } else { - message = "Error: " + err.Error() + "\n" + format.Object(err, 1) + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" + } else { + message += renderError(fmt.Sprintf("The matcher passed to %s returned the following error:", assertion.asyncType), matcherErr) + } } } else { - if desiredMatch { - message = matcher.FailureMessage(value) + var fgErr formattedGomegaError + if errors.As(actualErr, &fgErr) { + message += fgErr.FormattedGomegaError() + "\n" } else { - message = matcher.NegatedFailureMessage(value) + message += renderError(fmt.Sprintf("The function passed to %s returned the following error:", assertion.asyncType), actualErr) + } + if hasLastValidActual { + message += fmt.Sprintf("\nAt one point, however, the function did return successfully.\nYet, %s failed because", assertion.asyncType) + _, e := matcher.Match(lastValidActual) + if e != nil { + message += renderError(" the matcher returned the following error:", e) + } else { + message += " the matcher was not satisfied:\n" + if desiredMatch { + message += matcher.FailureMessage(lastValidActual) + } else { + message += matcher.NegatedFailureMessage(lastValidActual) + } + } } } + description := assertion.buildDescription(optionalDescription...) return fmt.Sprintf("%s%s", description, message) } @@ -384,30 +487,39 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch } } + // Used to count the number of times in a row a step passed + passedRepeatedlyCount := 0 for { var nextPoll <-chan time.Time = nil var isTryAgainAfterError = false - if pollingSignalErr, ok := AsPollingSignalError(err); ok { - if pollingSignalErr.IsStopTrying() { - fail("Told to stop trying") - return false - } - if pollingSignalErr.IsTryAgainAfter() { - nextPoll = time.After(pollingSignalErr.TryAgainDuration()) - isTryAgainAfterError = true + for _, err := range []error{actualErr, matcherErr} { + if pollingSignalErr, ok := AsPollingSignalError(err); ok { + if pollingSignalErr.IsStopTrying() { + fail("Told to stop trying") + return false + } + if pollingSignalErr.IsTryAgainAfter() { + nextPoll = time.After(pollingSignalErr.TryAgainDuration()) + isTryAgainAfterError = true + } } } - if err == nil && matches == desiredMatch { + if actualErr == nil && matcherErr == nil && matches == desiredMatch { if assertion.asyncType == AsyncAssertionTypeEventually { - return true + passedRepeatedlyCount += 1 + if passedRepeatedlyCount == assertion.mustPassRepeatedly { + return true + } } } else if !isTryAgainAfterError { if assertion.asyncType == AsyncAssertionTypeConsistently { fail("Failed") return false } + // Reset the consecutive pass count + passedRepeatedlyCount = 0 } if oracleMatcherSaysStop { @@ -425,15 +537,19 @@ func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch select { case <-nextPoll: - v, e := pollActual() + a, e := pollActual() lock.Lock() - value, err = v, e + actual, actualErr = a, e lock.Unlock() - if err == nil { - oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, value) - m, e := assertion.pollMatcher(matcher, value) + if actualErr == nil { + lock.Lock() + lastValidActual = actual + hasLastValidActual = true + lock.Unlock() + oracleMatcherSaysStop = assertion.matcherSaysStopTrying(matcher, actual) + m, e := assertion.pollMatcher(matcher, actual) lock.Lock() - matches, err = m, e + matches, matcherErr = m, e lock.Unlock() } case <-contextDone: diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index e75d2626a..de1f4f336 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -2,7 +2,6 @@ package internal import ( "context" - "fmt" "time" "github.com/onsi/gomega/types" @@ -53,42 +52,38 @@ func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...inter return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, args...) +func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, args...) +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) } -func (g *Gomega) Consistently(args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, args...) +func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, args ...interface{}) types.AsyncAssertion { - return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, args...) +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { + return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) } -func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { baseOffset := 3 timeoutInterval := -time.Duration(1) pollingInterval := -time.Duration(1) intervals := []interface{}{} var ctx context.Context - if len(args) == 0 { - g.Fail(fmt.Sprintf("Call to %s is missing a value or function to poll", asyncAssertionType), offset+baseOffset) - return nil - } - actual := args[0] - startingIndex := 1 - if _, isCtx := args[0].(context.Context); isCtx && len(args) > 1 { + actual := actualOrCtx + startingIndex := 0 + if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual - if _, err := toDuration(args[1]); err != nil { - ctx = args[0].(context.Context) - actual = args[1] - startingIndex = 2 + if _, err := toDuration(args[0]); err != nil { + ctx = actualOrCtx.(context.Context) + actual = args[0] + startingIndex = 1 } } @@ -114,7 +109,7 @@ func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offse } } - return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, ctx, offset) + return NewAsyncAssertion(asyncAssertionType, actual, g, timeoutInterval, pollingInterval, 1, ctx, offset) } func (g *Gomega) SetDefaultEventuallyTimeout(t time.Duration) { diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index f9d9f2aad..b832f3dba 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -27,7 +27,8 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher { } } -// BeComparableTo uses gocmp.Equal to compare. You can pass cmp.Option as options. +// BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. +// You can pass cmp.Option as options. // It is an error for actual and expected to be nil. Use BeNil() instead. func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { return &matchers.BeComparableToMatcher{ @@ -86,14 +87,17 @@ func Succeed() types.GomegaMatcher { return &matchers.SucceedMatcher{} } -// MatchError succeeds if actual is a non-nil error that matches the passed in string/error. +// MatchError succeeds if actual is a non-nil error that matches the passed in +// string, error, or matcher. // // These are valid use-cases: // -// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" -// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// Expect(err).Should(MatchError("an error")) //asserts that err.Error() == "an error" +// Expect(err).Should(MatchError(SomeError)) //asserts that err == SomeError (via reflect.DeepEqual) +// Expect(err).Should(MatchError(ContainsSubstring("sprocket not found"))) // asserts that edrr.Error() contains substring "sprocket not found" // -// It is an error for err to be nil or an object that does not implement the Error interface +// It is an error for err to be nil or an object that does not implement the +// Error interface func MatchError(expected interface{}) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ Expected: expected, @@ -348,6 +352,20 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { } } +// HaveExactElemets succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: +// +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", ContainSubstring("Bar"))) +// Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo"))) +// +// Actual must be an array or slice. +func HaveExactElements(elements ...interface{}) types.GomegaMatcher { + return &matchers.HaveExactElementsMatcher{ + Elements: elements, + } +} + // ContainElements succeeds if actual contains the passed in elements. The ordering of the elements does not matter. // By default ContainElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index e8ef0dee1..f69037a4f 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -48,11 +48,13 @@ func neighbours(value, matcher interface{}) (bool, error) { func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { for _, matcher := range matchers { - equalMatcher, ok := matcher.(*EqualMatcher) - if ok { - matcher = equalMatcher.Expected + if equalMatcher, ok := matcher.(*EqualMatcher); ok { + elements = append(elements, equalMatcher.Expected) + } else if _, ok := matcher.(*BeNilMatcher); ok { + elements = append(elements, nil) + } else { + elements = append(elements, matcher) } - elements = append(elements, matcher) } return } @@ -72,11 +74,13 @@ func flatten(elems []interface{}) []interface{} { func matchers(expectedElems []interface{}) (matchers []interface{}) { for _, e := range flatten(expectedElems) { - matcher, isMatcher := e.(omegaMatcher) - if !isMatcher { - matcher = &EqualMatcher{Expected: e} + if e == nil { + matchers = append(matchers, &BeNilMatcher{}) + } else if matcher, isMatcher := e.(omegaMatcher); isMatcher { + matchers = append(matchers, matcher) + } else { + matchers = append(matchers, &EqualMatcher{Expected: e}) } - matchers = append(matchers, matcher) } return } @@ -89,9 +93,14 @@ func presentable(elems []interface{}) interface{} { } sv := reflect.ValueOf(elems) - tt := sv.Index(0).Elem().Type() + firstEl := sv.Index(0) + if firstEl.IsNil() { + return elems + } + tt := firstEl.Elem().Type() for i := 1; i < sv.Len(); i++ { - if sv.Index(i).Elem().Type() != tt { + el := sv.Index(i) + if el.IsNil() || (sv.Index(i).Elem().Type() != tt) { return elems } } diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go new file mode 100644 index 000000000..7cce776c1 --- /dev/null +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -0,0 +1,83 @@ +package matchers + +import ( + "fmt" + + "github.com/onsi/gomega/format" +) + +type mismatchFailure struct { + failure string + index int +} + +type HaveExactElementsMatcher struct { + Elements []interface{} + mismatchFailures []mismatchFailure + missingIndex int + extraIndex int +} + +func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { + matcher.resetState() + + if isMap(actual) { + return false, fmt.Errorf("error") + } + + matchers := matchers(matcher.Elements) + values := valuesOf(actual) + + lenMatchers := len(matchers) + lenValues := len(values) + + for i := 0; i < lenMatchers || i < lenValues; i++ { + if i >= lenMatchers { + matcher.extraIndex = i + continue + } + + if i >= lenValues { + matcher.missingIndex = i + return + } + + elemMatcher := matchers[i].(omegaMatcher) + match, err := elemMatcher.Match(values[i]) + if err != nil || !match { + matcher.mismatchFailures = append(matcher.mismatchFailures, mismatchFailure{ + index: i, + failure: elemMatcher.FailureMessage(values[i]), + }) + } + } + + return matcher.missingIndex+matcher.extraIndex+len(matcher.mismatchFailures) == 0, nil +} + +func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { + message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements)) + if matcher.missingIndex > 0 { + message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex) + } + if matcher.extraIndex > 0 { + message = fmt.Sprintf("%s\nthe extra elements start from index %d", message, matcher.extraIndex) + } + if len(matcher.mismatchFailures) != 0 { + message = fmt.Sprintf("%s\nthe mismatch indexes were:", message) + } + for _, mismatch := range matcher.mismatchFailures { + message = fmt.Sprintf("%s\n%d: %s", message, mismatch.index, mismatch.failure) + } + return +} + +func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { + return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) +} + +func (matcher *HaveExactElementsMatcher) resetState() { + matcher.mismatchFailures = nil + matcher.missingIndex = 0 + matcher.extraIndex = 0 +} diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index 5bcfdd2ad..22a1b6730 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -31,5 +31,5 @@ func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message } func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Unexpected error:\n%s\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1), "occurred") + return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred") } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c8993a86d..827475ea5 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -25,7 +25,17 @@ func (matcher *MatchErrorMatcher) Match(actual interface{}) (success bool, err e expected := matcher.Expected if isError(expected) { - return reflect.DeepEqual(actualErr, expected) || errors.Is(actualErr, expected.(error)), nil + // first try the built-in errors.Is + if errors.Is(actualErr, expected.(error)) { + return true, nil + } + // if not, try DeepEqual along the error chain + for unwrapped := actualErr; unwrapped != nil; unwrapped = errors.Unwrap(unwrapped) { + if reflect.DeepEqual(unwrapped, expected) { + return true, nil + } + } + return false, nil } if isString(expected) { diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go index 721ed5529..327350f7b 100644 --- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -1,11 +1,16 @@ package matchers import ( + "errors" "fmt" "github.com/onsi/gomega/format" ) +type formattedGomegaError interface { + FormattedGomegaError() string +} + type SucceedMatcher struct { } @@ -25,7 +30,11 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro } func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { - return fmt.Sprintf("Expected success, but got an error:\n%s\n%s", format.Object(actual, 1), format.IndentString(actual.(error).Error(), 1)) + var fgErr formattedGomegaError + if errors.As(actual.(error), &fgErr) { + return fgErr.FormattedGomegaError() + } + return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1)) } func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { diff --git a/vendor/github.com/onsi/gomega/tools b/vendor/github.com/onsi/gomega/tools deleted file mode 100644 index e4195cf36..000000000 --- a/vendor/github.com/onsi/gomega/tools +++ /dev/null @@ -1,8 +0,0 @@ -//go:build tools -// +build tools - -package main - -import ( - _ "github.com/onsi/ginkgo/v2/ginkgo" -) diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 089505a4b..7c7adb941 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -19,11 +19,11 @@ type Gomega interface { Expect(actual interface{}, extra ...interface{}) Assertion ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion - Eventually(args ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, args ...interface{}) AsyncAssertion + Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion - Consistently(args ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, args ...interface{}) AsyncAssertion + Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -75,6 +75,7 @@ type AsyncAssertion interface { ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion WithArguments(argsToForward ...interface{}) AsyncAssertion + MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 000000000..3fbfebf36 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,218 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than ast.Inspect, but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int // index of corresponding push or pop event +} + +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + f(ev.node) + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { + if !f(ev.node, true) { + i = pop + 1 // jump to corresponding pop + 1 + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := 0; i < len(in.events); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = pop + 1 + stack = stack[:len(stack)-1] + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent. + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := extent * 33 / 100 + if capacity > 1e6 { + capacity = 1e6 // impose some reasonable maximum + } + events := make([]event, 0, capacity) + + var stack []event + stack = append(stack, event{}) // include an extra event so file nodes have a parent + for _, f := range files { + ast.Inspect(f, func(n ast.Node) bool { + if n != nil { + // push + ev := event{ + node: n, + typ: 0, // temporarily used to accumulate type bits of subtree + index: len(events), // push event temporarily holds own index + } + stack = append(stack, ev) + events = append(events, ev) + } else { + // pop + top := len(stack) - 1 + ev := stack[top] + typ := typeOf(ev.node) + push := ev.index + parent := top - 1 + + events[push].typ = typ // set type of push + stack[parent].typ |= typ | ev.typ // parent's typ contains push and pop's typs. + events[push].index = len(events) // make push refer to pop + + stack = stack[:top] + events = append(events, ev) + } + return true + }) + } + + return events +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 000000000..703c81395 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,229 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import ( + "go/ast" + "math" + + "golang.org/x/tools/internal/typeparams" +) + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nIndexListExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *typeparams.IndexListExpr: + return 1 << nIndexListExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if nodes == nil { + return math.MaxUint64 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto index aa08b4978..a09af750b 100644 --- a/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/generated.proto @@ -71,7 +71,7 @@ message APIResourceDiscovery { // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. - // This value will be null if an APIService reports subresources but supports no operations on the parent resource + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // scope indicates the scope of a resource, either Cluster or Namespaced @@ -111,7 +111,7 @@ message APISubresourceDiscovery { optional string subresource = 1; // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. - // Some subresources do not return normal resources, these will have null return types. + // Some subresources do not return normal resources, these will have null or empty return types. optional k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind responseKind = 2; // acceptedTypes describes the kinds that this endpoint accepts. diff --git a/vendor/k8s.io/api/apidiscovery/v2beta1/types.go b/vendor/k8s.io/api/apidiscovery/v2beta1/types.go index 1aff3e370..834293773 100644 --- a/vendor/k8s.io/api/apidiscovery/v2beta1/types.go +++ b/vendor/k8s.io/api/apidiscovery/v2beta1/types.go @@ -92,7 +92,7 @@ type APIResourceDiscovery struct { Resource string `json:"resource" protobuf:"bytes,1,opt,name=resource"` // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. // APIs may return other objects types at their discretion, such as error conditions, requests for alternate representations, or other operation specific behavior. - // This value will be null if an APIService reports subresources but supports no operations on the parent resource + // This value will be null or empty if an APIService reports subresources but supports no operations on the parent resource ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` // scope indicates the scope of a resource, either Cluster or Namespaced Scope ResourceScope `json:"scope" protobuf:"bytes,3,opt,name=scope"` @@ -141,7 +141,7 @@ type APISubresourceDiscovery struct { // for this resource across all versions. Subresource string `json:"subresource" protobuf:"bytes,1,opt,name=subresource"` // responseKind describes the group, version, and kind of the serialization schema for the object type this endpoint typically returns. - // Some subresources do not return normal resources, these will have null return types. + // Some subresources do not return normal resources, these will have null or empty return types. ResponseKind *v1.GroupVersionKind `json:"responseKind,omitempty" protobuf:"bytes,2,opt,name=responseKind"` // acceptedTypes describes the kinds that this endpoint accepts. // Subresources may accept the standard content types or define diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 09144d586..5e8159731 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -464,6 +464,7 @@ message PodFailurePolicyRule { // as a list of pod condition patterns. The requirement is satisfied if at // least one pattern matches an actual pod condition. At most 20 elements are allowed. // +listType=atomic + // +optional repeated PodFailurePolicyOnPodConditionsPattern onPodConditions = 3; } diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index f6361391b..d298a02f2 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -190,6 +190,7 @@ type PodFailurePolicyRule struct { // as a list of pod condition patterns. The requirement is satisfied if at // least one pattern matches an actual pod condition. At most 20 elements are allowed. // +listType=atomic + // +optional OnPodConditions []PodFailurePolicyOnPodConditionsPattern `json:"onPodConditions" protobuf:"bytes,3,opt,name=onPodConditions"` } diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index d738725ca..3674914f7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -126,14 +126,17 @@ type rudimentaryErrorBackoff struct { // OnError will block if it is called more often than the embedded period time. // This will prevent overly tight hot error loops. func (r *rudimentaryErrorBackoff) OnError(error) { + now := time.Now() // start the timer before acquiring the lock r.lastErrorTimeLock.Lock() - defer r.lastErrorTimeLock.Unlock() - d := time.Since(r.lastErrorTime) - if d < r.minPeriod { - // If the time moves backwards for any reason, do nothing - time.Sleep(r.minPeriod - d) - } + d := now.Sub(r.lastErrorTime) r.lastErrorTime = time.Now() + r.lastErrorTimeLock.Unlock() + + // Do not sleep with the lock held because that causes all callers of HandleError to block. + // We only want the current goroutine to block. + // A negative or zero duration causes time.Sleep to return immediately. + // If the time moves backwards for any reason, do nothing. + time.Sleep(r.minPeriod - d) } // GetCaller returns the caller of the function that calls it. diff --git a/vendor/k8s.io/apiserver/pkg/features/OWNERS b/vendor/k8s.io/apiserver/pkg/features/OWNERS deleted file mode 100644 index 3e1dd9f08..000000000 --- a/vendor/k8s.io/apiserver/pkg/features/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - feature-approvers diff --git a/vendor/k8s.io/apiserver/pkg/features/kube_features.go b/vendor/k8s.io/apiserver/pkg/features/kube_features.go deleted file mode 100644 index aa903587d..000000000 --- a/vendor/k8s.io/apiserver/pkg/features/kube_features.go +++ /dev/null @@ -1,252 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package features - -import ( - "k8s.io/apimachinery/pkg/util/runtime" - - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-base/featuregate" -) - -const ( - // Every feature gate should add method here following this template: - // - // // owner: @username - // // alpha: v1.4 - // MyFeature featuregate.Feature = "MyFeature" - // - // Feature gates should be listed in alphabetical, case-sensitive - // (upper before any lower case character) order. This reduces the risk - // of code conflicts because changes are more likely to be scattered - // across the file. - - // owner: @jefftree @alexzielenski - // alpha: v1.26 - // - // Enables an single HTTP endpoint /discovery/ which supports native HTTP - // caching with ETags containing all APIResources known to the apiserver. - AggregatedDiscoveryEndpoint featuregate.Feature = "AggregatedDiscoveryEndpoint" - - // owner: @smarterclayton - // alpha: v1.8 - // beta: v1.9 - // - // Allow API clients to retrieve resource lists in chunks rather than - // all at once. - APIListChunking featuregate.Feature = "APIListChunking" - - // owner: @MikeSpreitzer @yue9944882 - // alpha: v1.18 - // beta: v1.20 - // - // Enables managing request concurrency with prioritization and fairness at each server. - // The FeatureGate was introduced in release 1.15 but the feature - // was not really implemented before 1.18. - APIPriorityAndFairness featuregate.Feature = "APIPriorityAndFairness" - - // owner: @ilackams - // alpha: v1.7 - // beta: v1.16 - // - // Enables compression of REST responses (GET and LIST only) - APIResponseCompression featuregate.Feature = "APIResponseCompression" - - // owner: @roycaihw - // alpha: v1.20 - // - // Assigns each kube-apiserver an ID in a cluster. - APIServerIdentity featuregate.Feature = "APIServerIdentity" - - // owner: @dashpole - // alpha: v1.22 - // - // Add support for distributed tracing in the API Server - APIServerTracing featuregate.Feature = "APIServerTracing" - - // owner: @tallclair - // alpha: v1.7 - // beta: v1.8 - // GA: v1.12 - // - // AdvancedAuditing enables a much more general API auditing pipeline, which includes support for - // pluggable output backends and an audit policy specifying how different requests should be - // audited. - AdvancedAuditing featuregate.Feature = "AdvancedAuditing" - - // owner: @cici37 @jpbetz - // kep: http://kep.k8s.io/3488 - // alpha: v1.26 - // - // Enables expression validation in Admission Control - ValidatingAdmissionPolicy featuregate.Feature = "ValidatingAdmissionPolicy" - - // owner: @cici37 - // kep: https://kep.k8s.io/2876 - // alpha: v1.23 - // beta: v1.25 - // - // Enables expression validation for Custom Resource - CustomResourceValidationExpressions featuregate.Feature = "CustomResourceValidationExpressions" - - // owner: @apelisse - // alpha: v1.12 - // beta: v1.13 - // stable: v1.18 - // - // Allow requests to be processed but not stored, so that - // validation, merging, mutation can be tested without - // committing. - DryRun featuregate.Feature = "DryRun" - - // owner: @wojtek-t - // alpha: v1.20 - // beta: v1.21 - // GA: v1.24 - // - // Allows for updating watchcache resource version with progress notify events. - EfficientWatchResumption featuregate.Feature = "EfficientWatchResumption" - - // owner: @aramase - // kep: https://kep.k8s.io/3299 - // alpha: v1.25 - // - // Enables KMS v2 API for encryption at rest. - KMSv2 featuregate.Feature = "KMSv2" - - // owner: @jiahuif - // kep: https://kep.k8s.io/2887 - // alpha: v1.23 - // beta: v1.24 - // - // Enables populating "enum" field of OpenAPI schemas - // in the spec returned from kube-apiserver. - OpenAPIEnums featuregate.Feature = "OpenAPIEnums" - - // owner: @jefftree - // kep: https://kep.k8s.io/2896 - // alpha: v1.23 - // beta: v1.24 - // - // Enables kubernetes to publish OpenAPI v3 - OpenAPIV3 featuregate.Feature = "OpenAPIV3" - - // owner: @caesarxuchao - // alpha: v1.15 - // beta: v1.16 - // - // Allow apiservers to show a count of remaining items in the response - // to a chunking list request. - RemainingItemCount featuregate.Feature = "RemainingItemCount" - - // owner: @wojtek-t - // alpha: v1.16 - // beta: v1.20 - // GA: v1.24 - // - // Deprecates and removes SelfLink from ObjectMeta and ListMeta. - RemoveSelfLink featuregate.Feature = "RemoveSelfLink" - - // owner: @apelisse, @lavalamp - // alpha: v1.14 - // beta: v1.16 - // stable: v1.22 - // - // Server-side apply. Merging happens on the server. - ServerSideApply featuregate.Feature = "ServerSideApply" - - // owner: @kevindelgado - // kep: https://kep.k8s.io/2885 - // alpha: v1.23 - // beta: v1.24 - // - // Enables server-side field validation. - ServerSideFieldValidation featuregate.Feature = "ServerSideFieldValidation" - - // owner: @caesarxuchao @roycaihw - // alpha: v1.20 - // - // Enable the storage version API. - StorageVersionAPI featuregate.Feature = "StorageVersionAPI" - - // owner: @caesarxuchao - // alpha: v1.14 - // beta: v1.15 - // - // Allow apiservers to expose the storage version hash in the discovery - // document. - StorageVersionHash featuregate.Feature = "StorageVersionHash" - - // owner: @wojtek-t - // alpha: v1.15 - // beta: v1.16 - // GA: v1.17 - // - // Enables support for watch bookmark events. - WatchBookmark featuregate.Feature = "WatchBookmark" -) - -func init() { - runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) -} - -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. -// To add a new feature, define a key for it above and add it here. The features will be -// available throughout Kubernetes binaries. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - AggregatedDiscoveryEndpoint: {Default: false, PreRelease: featuregate.Alpha}, - - APIListChunking: {Default: true, PreRelease: featuregate.Beta}, - - APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, - - APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - - APIServerIdentity: {Default: true, PreRelease: featuregate.Beta}, - - APIServerTracing: {Default: false, PreRelease: featuregate.Alpha}, - - AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, - - ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, - - DryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - EfficientWatchResumption: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - KMSv2: {Default: false, PreRelease: featuregate.Alpha}, - - OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - - OpenAPIV3: {Default: true, PreRelease: featuregate.Beta}, - - RemainingItemCount: {Default: true, PreRelease: featuregate.Beta}, - - RemoveSelfLink: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - ServerSideFieldValidation: {Default: true, PreRelease: featuregate.Beta}, - - StorageVersionAPI: {Default: false, PreRelease: featuregate.Alpha}, - - StorageVersionHash: {Default: true, PreRelease: featuregate.Beta}, - - WatchBookmark: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, -} diff --git a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go b/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go deleted file mode 100644 index 5911b7568..000000000 --- a/vendor/k8s.io/apiserver/pkg/util/feature/feature_gate.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package feature - -import ( - "k8s.io/component-base/featuregate" -) - -var ( - // DefaultMutableFeatureGate is a mutable version of DefaultFeatureGate. - // Only top-level commands/options setup and the k8s.io/component-base/featuregate/testing package should make use of this. - // Tests that need to modify feature gates for the duration of their test should use: - // defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features., )() - DefaultMutableFeatureGate featuregate.MutableFeatureGate = featuregate.NewFeatureGate() - - // DefaultFeatureGate is a shared global FeatureGate. - // Top-level commands/options setup that needs to modify this feature gate should use DefaultMutableFeatureGate. - DefaultFeatureGate featuregate.FeatureGate = DefaultMutableFeatureGate -) diff --git a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go index 7470259dc..f72c42051 100644 --- a/vendor/k8s.io/client-go/discovery/aggregated_discovery.go +++ b/vendor/k8s.io/client-go/discovery/aggregated_discovery.go @@ -111,6 +111,8 @@ func convertAPIGroup(g apidiscovery.APIGroupDiscovery) ( return group, gvResources, failedGVs } +var emptyKind = metav1.GroupVersionKind{} + // convertAPIResource tranforms a APIResourceDiscovery to an APIResource. We are // resilient to missing GVK, since this resource might be the parent resource // for a subresource. If the parent is missing a GVK, it is not returned in @@ -125,7 +127,7 @@ func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResourc Categories: in.Categories, } var err error - if in.ResponseKind != nil { + if in.ResponseKind != nil && (*in.ResponseKind) != emptyKind { result.Group = in.ResponseKind.Group result.Version = in.ResponseKind.Version result.Kind = in.ResponseKind.Kind @@ -140,7 +142,7 @@ func convertAPIResource(in apidiscovery.APIResourceDiscovery) (metav1.APIResourc // convertAPISubresource tranforms a APISubresourceDiscovery to an APIResource. func convertAPISubresource(parent metav1.APIResource, in apidiscovery.APISubresourceDiscovery) (metav1.APIResource, error) { result := metav1.APIResource{} - if in.ResponseKind == nil { + if in.ResponseKind == nil || (*in.ResponseKind) == emptyKind { return result, fmt.Errorf("subresource %s/%s missing GVK", parent.Name, in.Subresource) } result.Name = fmt.Sprintf("%s/%s", parent.Name, in.Subresource) diff --git a/vendor/k8s.io/client-go/tools/record/event.go b/vendor/k8s.io/client-go/tools/record/event.go index 998bf8dfb..926605975 100644 --- a/vendor/k8s.io/client-go/tools/record/event.go +++ b/vendor/k8s.io/client-go/tools/record/event.go @@ -344,6 +344,9 @@ func (recorder *recorderImpl) generateEvent(object runtime.Object, annotations m event := recorder.makeEvent(ref, annotations, eventtype, reason, message) event.Source = recorder.source + event.ReportingInstance = recorder.source.Host + event.ReportingController = recorder.source.Component + // NOTE: events should be a non-blocking operation, but we also need to not // put this in a goroutine, otherwise we'll race to write to a closed channel // when we go to shut down this broadcaster. Just drop events if we get overloaded, diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 43374a2f7..4ef02f09f 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -45,6 +45,7 @@ type Config struct { Organization []string AltNames AltNames Usages []x509.ExtKeyUsage + NotBefore time.Time } // AltNames contains the domain names and IP addresses that will be added @@ -64,6 +65,10 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro return nil, err } serial = new(big.Int).Add(serial, big.NewInt(1)) + notBefore := now.UTC() + if !cfg.NotBefore.IsZero() { + notBefore = cfg.NotBefore.UTC() + } tmpl := x509.Certificate{ SerialNumber: serial, Subject: pkix.Name{ @@ -71,7 +76,7 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro Organization: cfg.Organization, }, DNSNames: []string{cfg.CommonName}, - NotBefore: now.UTC(), + NotBefore: notBefore, NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, diff --git a/vendor/k8s.io/kube-scheduler/LICENSE b/vendor/k8s.io/kube-scheduler/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/k8s.io/kube-scheduler/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/kube-scheduler/config/v1/doc.go b/vendor/k8s.io/kube-scheduler/config/v1/doc.go deleted file mode 100644 index bbc4641e7..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=kubescheduler.config.k8s.io - -package v1 // import "k8s.io/kube-scheduler/config/v1" diff --git a/vendor/k8s.io/kube-scheduler/config/v1/register.go b/vendor/k8s.io/kube-scheduler/config/v1/register.go deleted file mode 100644 index 58adac5dc..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubescheduler.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeSchedulerConfiguration{}, - &DefaultPreemptionArgs{}, - &InterPodAffinityArgs{}, - &NodeResourcesBalancedAllocationArgs{}, - &NodeResourcesFitArgs{}, - &PodTopologySpreadArgs{}, - &VolumeBindingArgs{}, - &NodeAffinityArgs{}, - ) - return nil -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1/types.go b/vendor/k8s.io/kube-scheduler/config/v1/types.go deleted file mode 100644 index 703516fb7..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1/types.go +++ /dev/null @@ -1,391 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "bytes" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "sigs.k8s.io/yaml" -) - -const ( - // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") - SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem - - // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") - SchedulerDefaultLockObjectName = "kube-scheduler" - - // SchedulerDefaultProviderName defines the default provider names - SchedulerDefaultProviderName = "DefaultProvider" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerConfiguration configures a scheduler -type KubeSchedulerConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16 - Parallelism *int32 `json:"parallelism,omitempty"` - - // LeaderElection defines the configuration of leader election client. - LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:"leaderElection"` - - // ClientConnection specifies the kubeconfig file and client connection - // settings for the proxy server to use when communicating with the apiserver. - ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` - - // DebuggingConfiguration holds configuration for Debugging related features - // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration - componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. It is overridden by profile level PercentageofNodesToScore. - PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` - - // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. - // If specified, it must be greater than 0. If this value is null, the default value (1s) - // will be used. - PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"` - - // PodMaxBackoffSeconds is the max backoff for unschedulable pods. - // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, - // the default value (10s) will be used. - PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"` - - // Profiles are scheduling profiles that kube-scheduler supports. Pods can - // choose to be scheduled under a particular profile by setting its associated - // scheduler name. Pods that don't specify any scheduler name are scheduled - // with the "default-scheduler" profile, if present here. - // +listType=map - // +listMapKey=schedulerName - Profiles []KubeSchedulerProfile `json:"profiles,omitempty"` - - // Extenders are the list of scheduler extenders, each holding the values of how to communicate - // with the extender. These extenders are shared by all scheduler profiles. - // +listType=set - Extenders []Extender `json:"extenders,omitempty"` -} - -// DecodeNestedObjects decodes plugin args for known types. -func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error { - var strictDecodingErrs []error - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].decodeNestedObjects(d) - if err != nil { - decodingErr := fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - if runtime.IsStrictDecodingError(err) { - strictDecodingErrs = append(strictDecodingErrs, decodingErr) - } else { - return decodingErr - } - } - } - } - if len(strictDecodingErrs) > 0 { - return runtime.NewStrictDecodingError(strictDecodingErrs) - } - return nil -} - -// EncodeNestedObjects encodes plugin args. -func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error { - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].encodeNestedObjects(e) - if err != nil { - return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - } - } - } - return nil -} - -// KubeSchedulerProfile is a scheduling profile. -type KubeSchedulerProfile struct { - // SchedulerName is the name of the scheduler associated to this profile. - // If SchedulerName matches with the pod's "spec.schedulerName", then the pod - // is scheduled with this profile. - SchedulerName *string `json:"schedulerName,omitempty"` - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty, - // global PercentageOfNodesToScore will be used. - PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` - - // Plugins specify the set of plugins that should be enabled or disabled. - // Enabled plugins are the ones that should be enabled in addition to the - // default plugins. Disabled plugins are any of the default plugins that - // should be disabled. - // When no enabled or disabled plugin is specified for an extension point, - // default plugins for that extension point will be used if there is any. - // If a QueueSort plugin is specified, the same QueueSort Plugin and - // PluginConfig must be specified for all profiles. - Plugins *Plugins `json:"plugins,omitempty"` - - // PluginConfig is an optional set of custom plugin arguments for each plugin. - // Omitting config args for a plugin is equivalent to using the default config - // for that plugin. - // +listType=map - // +listMapKey=name - PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` -} - -// Plugins include multiple extension points. When specified, the list of plugins for -// a particular extension point are the only ones enabled. If an extension point is -// omitted from the config, then the default set of plugins is used for that extension point. -// Enabled plugins are called in the order specified here, after default plugins. If they need to -// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. -type Plugins struct { - // PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue. - PreEnqueue PluginSet `json:"preEnqueue,omitempty"` - - // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. - QueueSort PluginSet `json:"queueSort,omitempty"` - - // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. - PreFilter PluginSet `json:"preFilter,omitempty"` - - // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. - Filter PluginSet `json:"filter,omitempty"` - - // PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod. - PostFilter PluginSet `json:"postFilter,omitempty"` - - // PreScore is a list of plugins that are invoked before scoring. - PreScore PluginSet `json:"preScore,omitempty"` - - // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. - Score PluginSet `json:"score,omitempty"` - - // Reserve is a list of plugins invoked when reserving/unreserving resources - // after a node is assigned to run the pod. - Reserve PluginSet `json:"reserve,omitempty"` - - // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. - Permit PluginSet `json:"permit,omitempty"` - - // PreBind is a list of plugins that should be invoked before a pod is bound. - PreBind PluginSet `json:"preBind,omitempty"` - - // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. - // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. - Bind PluginSet `json:"bind,omitempty"` - - // PostBind is a list of plugins that should be invoked after a pod is successfully bound. - PostBind PluginSet `json:"postBind,omitempty"` - - // MultiPoint is a simplified config section to enable plugins for all valid extension points. - // Plugins enabled through MultiPoint will automatically register for every individual extension - // point the plugin has implemented. Disabling a plugin through MultiPoint disables that behavior. - // The same is true for disabling "*" through MultiPoint (no default plugins will be automatically registered). - // Plugins can still be disabled through their individual extension points. - // - // In terms of precedence, plugin config follows this basic hierarchy - // 1. Specific extension points - // 2. Explicitly configured MultiPoint plugins - // 3. The set of default plugins, as MultiPoint plugins - // This implies that a higher precedence plugin will run first and overwrite any settings within MultiPoint. - // Explicitly user-configured plugins also take a higher precedence over default plugins. - // Within this hierarchy, an Enabled setting takes precedence over Disabled. For example, if a plugin is - // set in both `multiPoint.Enabled` and `multiPoint.Disabled`, the plugin will be enabled. Similarly, - // including `multiPoint.Disabled = '*'` and `multiPoint.Enabled = pluginA` will still register that specific - // plugin through MultiPoint. This follows the same behavior as all other extension point configurations. - MultiPoint PluginSet `json:"multiPoint,omitempty"` -} - -// PluginSet specifies enabled and disabled plugins for an extension point. -// If an array is empty, missing, or nil, default plugins at that extension point will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled in addition to default plugins. - // If the default plugin is also configured in the scheduler config file, the weight of plugin will - // be overridden accordingly. - // These are called after default plugins and in the same order specified here. - // +listType=atomic - Enabled []Plugin `json:"enabled,omitempty"` - // Disabled specifies default plugins that should be disabled. - // When all default plugins need to be disabled, an array containing only one "*" should be provided. - // +listType=map - // +listMapKey=name - Disabled []Plugin `json:"disabled,omitempty"` -} - -// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. -type Plugin struct { - // Name defines the name of plugin - Name string `json:"name"` - // Weight defines the weight of plugin, only used for Score plugins. - Weight *int32 `json:"weight,omitempty"` -} - -// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -// It is up to the plugin to process these Args. -type PluginConfig struct { - // Name defines the name of plugin being configured - Name string `json:"name"` - // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. - Args runtime.RawExtension `json:"args,omitempty"` -} - -func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error { - gvk := SchemeGroupVersion.WithKind(c.Name + "Args") - // dry-run to detect and skip out-of-tree plugin args. - if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) { - return nil - } - - var strictDecodingErr error - obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil) - if err != nil { - decodingArgsErr := fmt.Errorf("decoding args for plugin %s: %w", c.Name, err) - if obj != nil && runtime.IsStrictDecodingError(err) { - strictDecodingErr = runtime.NewStrictDecodingError([]error{decodingArgsErr}) - } else { - return decodingArgsErr - } - } - if parsedGvk.GroupKind() != gvk.GroupKind() { - return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind()) - } - c.Args.Object = obj - return strictDecodingErr -} - -func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error { - if c.Args.Object == nil { - return nil - } - var buf bytes.Buffer - err := e.Encode(c.Args.Object, &buf) - if err != nil { - return err - } - // The encoder might be a YAML encoder, but the parent encoder expects - // JSON output, so we convert YAML back to JSON. - // This is a no-op if produces JSON. - json, err := yaml.YAMLToJSON(buf.Bytes()) - if err != nil { - return err - } - c.Args.Raw = json - return nil -} - -// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -// it is assumed that the extender chose not to provide that extension. -type Extender struct { - // URLPrefix at which the extender is available - URLPrefix string `json:"urlPrefix"` - // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. - FilterVerb string `json:"filterVerb,omitempty"` - // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. - PreemptVerb string `json:"preemptVerb,omitempty"` - // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. - PrioritizeVerb string `json:"prioritizeVerb,omitempty"` - // The numeric multiplier for the node scores that the prioritize call generates. - // The weight should be a positive integer - Weight int64 `json:"weight,omitempty"` - // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. - // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender - // can implement this function. - BindVerb string `json:"bindVerb,omitempty"` - // EnableHTTPS specifies whether https should be used to communicate with the extender - EnableHTTPS bool `json:"enableHTTPS,omitempty"` - // TLSConfig specifies the transport layer security config - TLSConfig *ExtenderTLSConfig `json:"tlsConfig,omitempty"` - // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize - // timeout is ignored, k8s/other extenders priorities are used to select the node. - HTTPTimeout metav1.Duration `json:"httpTimeout,omitempty"` - // NodeCacheCapable specifies that the extender is capable of caching node information, - // so the scheduler should only send minimal information about the eligible nodes - // assuming that the extender already cached full details of all nodes in the cluster - NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` - // ManagedResources is a list of extended resources that are managed by - // this extender. - // - A pod will be sent to the extender on the Filter, Prioritize and Bind - // (if the extender is the binder) phases iff the pod requests at least - // one of the extended resources in this list. If empty or unspecified, - // all pods will be sent to this extender. - // - If IgnoredByScheduler is set to true for a resource, kube-scheduler - // will skip checking the resource in predicates. - // +optional - // +listType=atomic - ManagedResources []ExtenderManagedResource `json:"managedResources,omitempty"` - // Ignorable specifies if the extender is ignorable, i.e. scheduling should not - // fail when the extender returns an error or is not reachable. - Ignorable bool `json:"ignorable,omitempty"` -} - -// ExtenderManagedResource describes the arguments of extended resources -// managed by an extender. -type ExtenderManagedResource struct { - // Name is the extended resource name. - Name string `json:"name"` - // IgnoredByScheduler indicates whether kube-scheduler should ignore this - // resource when applying predicates. - IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` -} - -// ExtenderTLSConfig contains settings to enable TLS with extender -type ExtenderTLSConfig struct { - // Server should be accessed without verifying the TLS certificate. For testing only. - Insecure bool `json:"insecure,omitempty"` - // ServerName is passed to the server for SNI and is used in the client to check server - // certificates against. If ServerName is empty, the hostname used to contact the - // server is used. - ServerName string `json:"serverName,omitempty"` - - // Server requires TLS client certificate authentication - CertFile string `json:"certFile,omitempty"` - // Server requires TLS client certificate authentication - KeyFile string `json:"keyFile,omitempty"` - // Trusted root certificates for server - CAFile string `json:"caFile,omitempty"` - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - // +listType=atomic - CertData []byte `json:"certData,omitempty"` - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - // +listType=atomic - KeyData []byte `json:"keyData,omitempty"` - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - // +listType=atomic - CAData []byte `json:"caData,omitempty"` -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go b/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go deleted file mode 100644 index 2698a4769..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1/types_pluginargs.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DefaultPreemptionArgs holds arguments used to configure the -// DefaultPreemption plugin. -type DefaultPreemptionArgs struct { - metav1.TypeMeta `json:",inline"` - - // MinCandidateNodesPercentage is the minimum number of candidates to - // shortlist when dry running preemption as a percentage of number of nodes. - // Must be in the range [0, 100]. Defaults to 10% of the cluster size if - // unspecified. - MinCandidateNodesPercentage *int32 `json:"minCandidateNodesPercentage,omitempty"` - // MinCandidateNodesAbsolute is the absolute minimum number of candidates to - // shortlist. The likely number of candidates enumerated for dry running - // preemption is given by the formula: - // numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) - // We say "likely" because there are other factors such as PDB violations - // that play a role in the number of candidates shortlisted. Must be at least - // 0 nodes. Defaults to 100 nodes if unspecified. - MinCandidateNodesAbsolute *int32 `json:"minCandidateNodesAbsolute,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. -type InterPodAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // HardPodAffinityWeight is the scoring weight for existing pods with a - // matching hard affinity to the incoming pod. - HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. -type NodeResourcesFitArgs struct { - metav1.TypeMeta `json:",inline"` - - // IgnoredResources is the list of resources that NodeResources fit filter - // should ignore. This doesn't apply to scoring. - // +listType=atomic - IgnoredResources []string `json:"ignoredResources,omitempty"` - // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. - // e.g. if group is ["example.com"], it will ignore all resource names that begin - // with "example.com", such as "example.com/aaa" and "example.com/bbb". - // A resource group name can't contain '/'. This doesn't apply to scoring. - // +listType=atomic - IgnoredResourceGroups []string `json:"ignoredResourceGroups,omitempty"` - - // ScoringStrategy selects the node resource scoring strategy. - // The default strategy is LeastAllocated with an equal "cpu" and "memory" weight. - ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"` -} - -// PodTopologySpreadConstraintsDefaulting defines how to set default constraints -// for the PodTopologySpread plugin. -type PodTopologySpreadConstraintsDefaulting string - -const ( - // SystemDefaulting instructs to use the kubernetes defined default. - SystemDefaulting PodTopologySpreadConstraintsDefaulting = "System" - // ListDefaulting instructs to use the config provided default. - ListDefaulting PodTopologySpreadConstraintsDefaulting = "List" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. -type PodTopologySpreadArgs struct { - metav1.TypeMeta `json:",inline"` - - // DefaultConstraints defines topology spread constraints to be applied to - // Pods that don't define any in `pod.spec.topologySpreadConstraints`. - // `.defaultConstraints[*].labelSelectors` must be empty, as they are - // deduced from the Pod's membership to Services, ReplicationControllers, - // ReplicaSets or StatefulSets. - // When not empty, .defaultingType must be "List". - // +optional - // +listType=atomic - DefaultConstraints []corev1.TopologySpreadConstraint `json:"defaultConstraints,omitempty"` - - // DefaultingType determines how .defaultConstraints are deduced. Can be one - // of "System" or "List". - // - // - "System": Use kubernetes defined constraints that spread Pods among - // Nodes and Zones. - // - "List": Use constraints defined in .defaultConstraints. - // - // Defaults to "System". - // +optional - DefaultingType PodTopologySpreadConstraintsDefaulting `json:"defaultingType,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. -type NodeResourcesBalancedAllocationArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be managed, the default is "cpu" and "memory" if not specified. - // +listType=map - // +listMapKey=name - Resources []ResourceSpec `json:"resources,omitempty"` -} - -// UtilizationShapePoint represents single point of priority function shape. -type UtilizationShapePoint struct { - // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int32 `json:"utilization"` - // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int32 `json:"score"` -} - -// ResourceSpec represents a single resource. -type ResourceSpec struct { - // Name of the resource. - Name string `json:"name"` - // Weight of the resource. - Weight int64 `json:"weight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. -type VolumeBindingArgs struct { - metav1.TypeMeta `json:",inline"` - - // BindTimeoutSeconds is the timeout in seconds in volume binding operation. - // Value must be non-negative integer. The value zero indicates no waiting. - // If this value is nil, the default value (600) will be used. - BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` - - // Shape specifies the points defining the score function shape, which is - // used to score nodes based on the utilization of statically provisioned - // PVs. The utilization is calculated by dividing the total requested - // storage of the pod by the total capacity of feasible PVs on each node. - // Each point contains utilization (ranges from 0 to 100) and its - // associated score (ranges from 0 to 10). You can turn the priority by - // specifying different scores for different utilization numbers. - // The default shape points are: - // 1) 0 for 0 utilization - // 2) 10 for 100 utilization - // All points must be sorted in increasing order by utilization. - // +featureGate=VolumeCapacityPriority - // +optional - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. -type NodeAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // AddedAffinity is applied to all Pods additionally to the NodeAffinity - // specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity - // AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes - // match). - // When AddedAffinity is used, some Pods with affinity requirements that match - // a specific Node (such as Daemonset Pods) might remain unschedulable. - // +optional - AddedAffinity *corev1.NodeAffinity `json:"addedAffinity,omitempty"` -} - -// ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. -type ScoringStrategyType string - -const ( - // LeastAllocated strategy prioritizes nodes with least allocated resources. - LeastAllocated ScoringStrategyType = "LeastAllocated" - // MostAllocated strategy prioritizes nodes with most allocated resources. - MostAllocated ScoringStrategyType = "MostAllocated" - // RequestedToCapacityRatio strategy allows specifying a custom shape function - // to score nodes based on the request to capacity ratio. - RequestedToCapacityRatio ScoringStrategyType = "RequestedToCapacityRatio" -) - -// ScoringStrategy define ScoringStrategyType for node resource plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType `json:"type,omitempty"` - - // Resources to consider when scoring. - // The default resource set includes "cpu" and "memory" with an equal weight. - // Allowed weights go from 1 to 100. - // Weight defaults to 1 if not specified or explicitly set to 0. - // +listType=map - // +listMapKey=topologyKey - Resources []ResourceSpec `json:"resources,omitempty"` - - // Arguments specific to RequestedToCapacityRatio strategy. - RequestedToCapacityRatio *RequestedToCapacityRatioParam `json:"requestedToCapacityRatio,omitempty"` -} - -// RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters -type RequestedToCapacityRatioParam struct { - // Shape is a list of points defining the scoring function shape. - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go deleted file mode 100644 index 1203cdd3b..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,609 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.MinCandidateNodesPercentage != nil { - in, out := &in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage - *out = new(int32) - **out = **in - } - if in.MinCandidateNodesAbsolute != nil { - in, out := &in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs. -func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs { - if in == nil { - return nil - } - out := new(DefaultPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Extender) DeepCopyInto(out *Extender) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - out.HTTPTimeout = in.HTTPTimeout - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. -func (in *Extender) DeepCopy() *Extender { - if in == nil { - return nil - } - out := new(Extender) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.HardPodAffinityWeight != nil { - in, out := &in.HardPodAffinityWeight, &out.HardPodAffinityWeight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. -func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { - if in == nil { - return nil - } - out := new(InterPodAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - in.LeaderElection.DeepCopyInto(&out.LeaderElection) - out.ClientConnection = in.ClientConnection - in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.PodInitialBackoffSeconds != nil { - in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds - *out = new(int64) - **out = **in - } - if in.PodMaxBackoffSeconds != nil { - in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds - *out = new(int64) - **out = **in - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]KubeSchedulerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extenders != nil { - in, out := &in.Extenders, &out.Extenders - *out = make([]Extender, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { - if in == nil { - return nil - } - out := new(KubeSchedulerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { - *out = *in - if in.SchedulerName != nil { - in, out := &in.SchedulerName, &out.SchedulerName - *out = new(string) - **out = **in - } - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(Plugins) - (*in).DeepCopyInto(*out) - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]PluginConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. -func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { - if in == nil { - return nil - } - out := new(KubeSchedulerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.AddedAffinity != nil { - in, out := &in.AddedAffinity, &out.AddedAffinity - *out = new(corev1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs. -func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs { - if in == nil { - return nil - } - out := new(NodeAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs { - if in == nil { - return nil - } - out := new(NodeResourcesBalancedAllocationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.IgnoredResources != nil { - in, out := &in.IgnoredResources, &out.IgnoredResources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoredResourceGroups != nil { - in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. -func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { - if in == nil { - return nil - } - out := new(NodeResourcesFitArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - if in.Weight != nil { - in, out := &in.Weight, &out.Weight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { - *out = *in - in.Args.DeepCopyInto(&out.Args) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. -func (in *PluginConfig) DeepCopy() *PluginConfig { - if in == nil { - return nil - } - out := new(PluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugins) DeepCopyInto(out *Plugins) { - *out = *in - in.PreEnqueue.DeepCopyInto(&out.PreEnqueue) - in.QueueSort.DeepCopyInto(&out.QueueSort) - in.PreFilter.DeepCopyInto(&out.PreFilter) - in.Filter.DeepCopyInto(&out.Filter) - in.PostFilter.DeepCopyInto(&out.PostFilter) - in.PreScore.DeepCopyInto(&out.PreScore) - in.Score.DeepCopyInto(&out.Score) - in.Reserve.DeepCopyInto(&out.Reserve) - in.Permit.DeepCopyInto(&out.Permit) - in.PreBind.DeepCopyInto(&out.PreBind) - in.Bind.DeepCopyInto(&out.Bind) - in.PostBind.DeepCopyInto(&out.PostBind) - in.MultiPoint.DeepCopyInto(&out.MultiPoint) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. -func (in *Plugins) DeepCopy() *Plugins { - if in == nil { - return nil - } - out := new(Plugins) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultConstraints != nil { - in, out := &in.DefaultConstraints, &out.DefaultConstraints - *out = make([]corev1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. -func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { - if in == nil { - return nil - } - out := new(PodTopologySpreadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) { - *out = *in - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam. -func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - if in.RequestedToCapacityRatio != nil { - in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio - *out = new(RequestedToCapacityRatioParam) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.BindTimeoutSeconds != nil { - in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds - *out = new(int64) - **out = **in - } - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. -func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { - if in == nil { - return nil - } - out := new(VolumeBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta2/doc.go b/vendor/k8s.io/kube-scheduler/config/v1beta2/doc.go deleted file mode 100644 index c9f5f62ed..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta2/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=kubescheduler.config.k8s.io - -package v1beta2 // import "k8s.io/kube-scheduler/config/v1beta2" diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta2/register.go b/vendor/k8s.io/kube-scheduler/config/v1beta2/register.go deleted file mode 100644 index 59fc014a9..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta2/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubescheduler.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeSchedulerConfiguration{}, - &DefaultPreemptionArgs{}, - &InterPodAffinityArgs{}, - &NodeResourcesBalancedAllocationArgs{}, - &NodeResourcesFitArgs{}, - &PodTopologySpreadArgs{}, - &VolumeBindingArgs{}, - &NodeAffinityArgs{}, - ) - return nil -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta2/types.go b/vendor/k8s.io/kube-scheduler/config/v1beta2/types.go deleted file mode 100644 index 0e47967ad..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta2/types.go +++ /dev/null @@ -1,369 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "bytes" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "sigs.k8s.io/yaml" -) - -const ( - // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") - SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem - - // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") - SchedulerDefaultLockObjectName = "kube-scheduler" - - // SchedulerDefaultProviderName defines the default provider names - SchedulerDefaultProviderName = "DefaultProvider" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerConfiguration configures a scheduler -type KubeSchedulerConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16 - Parallelism *int32 `json:"parallelism,omitempty"` - - // LeaderElection defines the configuration of leader election client. - LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:"leaderElection"` - - // ClientConnection specifies the kubeconfig file and client connection - // settings for the proxy server to use when communicating with the apiserver. - ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` - - // Note: Both HealthzBindAddress and MetricsBindAddress fields are deprecated. - // Only empty address or port 0 is allowed. Anything else will fail validation. - // HealthzBindAddress is the IP address and port for the health check server to serve on. - HealthzBindAddress *string `json:"healthzBindAddress,omitempty"` - // MetricsBindAddress is the IP address and port for the metrics server to serve on. - MetricsBindAddress *string `json:"metricsBindAddress,omitempty"` - - // DebuggingConfiguration holds configuration for Debugging related features - // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration - componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. - PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` - - // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. - // If specified, it must be greater than 0. If this value is null, the default value (1s) - // will be used. - PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"` - - // PodMaxBackoffSeconds is the max backoff for unschedulable pods. - // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, - // the default value (10s) will be used. - PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"` - - // Profiles are scheduling profiles that kube-scheduler supports. Pods can - // choose to be scheduled under a particular profile by setting its associated - // scheduler name. Pods that don't specify any scheduler name are scheduled - // with the "default-scheduler" profile, if present here. - // +listType=map - // +listMapKey=schedulerName - Profiles []KubeSchedulerProfile `json:"profiles,omitempty"` - - // Extenders are the list of scheduler extenders, each holding the values of how to communicate - // with the extender. These extenders are shared by all scheduler profiles. - // +listType=set - Extenders []Extender `json:"extenders,omitempty"` -} - -// DecodeNestedObjects decodes plugin args for known types. -func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error { - var strictDecodingErrs []error - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].decodeNestedObjects(d) - if err != nil { - decodingErr := fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - if runtime.IsStrictDecodingError(err) { - strictDecodingErrs = append(strictDecodingErrs, decodingErr) - } else { - return decodingErr - } - } - } - } - if len(strictDecodingErrs) > 0 { - return runtime.NewStrictDecodingError(strictDecodingErrs) - } - return nil -} - -// EncodeNestedObjects encodes plugin args. -func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error { - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].encodeNestedObjects(e) - if err != nil { - return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - } - } - } - return nil -} - -// KubeSchedulerProfile is a scheduling profile. -type KubeSchedulerProfile struct { - // SchedulerName is the name of the scheduler associated to this profile. - // If SchedulerName matches with the pod's "spec.schedulerName", then the pod - // is scheduled with this profile. - SchedulerName *string `json:"schedulerName,omitempty"` - - // Plugins specify the set of plugins that should be enabled or disabled. - // Enabled plugins are the ones that should be enabled in addition to the - // default plugins. Disabled plugins are any of the default plugins that - // should be disabled. - // When no enabled or disabled plugin is specified for an extension point, - // default plugins for that extension point will be used if there is any. - // If a QueueSort plugin is specified, the same QueueSort Plugin and - // PluginConfig must be specified for all profiles. - Plugins *Plugins `json:"plugins,omitempty"` - - // PluginConfig is an optional set of custom plugin arguments for each plugin. - // Omitting config args for a plugin is equivalent to using the default config - // for that plugin. - // +listType=map - // +listMapKey=name - PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` -} - -// Plugins include multiple extension points. When specified, the list of plugins for -// a particular extension point are the only ones enabled. If an extension point is -// omitted from the config, then the default set of plugins is used for that extension point. -// Enabled plugins are called in the order specified here, after default plugins. If they need to -// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. -type Plugins struct { - // PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue. - PreEnqueue PluginSet `json:"preEnqueue,omitempty"` - - // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. - QueueSort PluginSet `json:"queueSort,omitempty"` - - // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. - PreFilter PluginSet `json:"preFilter,omitempty"` - - // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. - Filter PluginSet `json:"filter,omitempty"` - - // PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod. - PostFilter PluginSet `json:"postFilter,omitempty"` - - // PreScore is a list of plugins that are invoked before scoring. - PreScore PluginSet `json:"preScore,omitempty"` - - // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. - Score PluginSet `json:"score,omitempty"` - - // Reserve is a list of plugins invoked when reserving/unreserving resources - // after a node is assigned to run the pod. - Reserve PluginSet `json:"reserve,omitempty"` - - // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. - Permit PluginSet `json:"permit,omitempty"` - - // PreBind is a list of plugins that should be invoked before a pod is bound. - PreBind PluginSet `json:"preBind,omitempty"` - - // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. - // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. - Bind PluginSet `json:"bind,omitempty"` - - // PostBind is a list of plugins that should be invoked after a pod is successfully bound. - PostBind PluginSet `json:"postBind,omitempty"` - - // MultiPoint is a simplified config section to enable plugins for all valid extension points. - MultiPoint PluginSet `json:"multiPoint,omitempty"` -} - -// PluginSet specifies enabled and disabled plugins for an extension point. -// If an array is empty, missing, or nil, default plugins at that extension point will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled in addition to default plugins. - // If the default plugin is also configured in the scheduler config file, the weight of plugin will - // be overridden accordingly. - // These are called after default plugins and in the same order specified here. - // +listType=atomic - Enabled []Plugin `json:"enabled,omitempty"` - // Disabled specifies default plugins that should be disabled. - // When all default plugins need to be disabled, an array containing only one "*" should be provided. - // +listType=map - // +listMapKey=name - Disabled []Plugin `json:"disabled,omitempty"` -} - -// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. -type Plugin struct { - // Name defines the name of plugin - Name string `json:"name"` - // Weight defines the weight of plugin, only used for Score plugins. - Weight *int32 `json:"weight,omitempty"` -} - -// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -// It is up to the plugin to process these Args. -type PluginConfig struct { - // Name defines the name of plugin being configured - Name string `json:"name"` - // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. - Args runtime.RawExtension `json:"args,omitempty"` -} - -func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error { - gvk := SchemeGroupVersion.WithKind(c.Name + "Args") - // dry-run to detect and skip out-of-tree plugin args. - if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) { - return nil - } - - var strictDecodingErr error - obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil) - if err != nil { - decodingArgsErr := fmt.Errorf("decoding args for plugin %s: %w", c.Name, err) - if obj != nil && runtime.IsStrictDecodingError(err) { - strictDecodingErr = runtime.NewStrictDecodingError([]error{decodingArgsErr}) - } else { - return decodingArgsErr - } - } - if parsedGvk.GroupKind() != gvk.GroupKind() { - return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind()) - } - c.Args.Object = obj - return strictDecodingErr -} - -func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error { - if c.Args.Object == nil { - return nil - } - var buf bytes.Buffer - err := e.Encode(c.Args.Object, &buf) - if err != nil { - return err - } - // The encoder might be a YAML encoder, but the parent encoder expects - // JSON output, so we convert YAML back to JSON. - // This is a no-op if produces JSON. - json, err := yaml.YAMLToJSON(buf.Bytes()) - if err != nil { - return err - } - c.Args.Raw = json - return nil -} - -// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -// it is assumed that the extender chose not to provide that extension. -type Extender struct { - // URLPrefix at which the extender is available - URLPrefix string `json:"urlPrefix"` - // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. - FilterVerb string `json:"filterVerb,omitempty"` - // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. - PreemptVerb string `json:"preemptVerb,omitempty"` - // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. - PrioritizeVerb string `json:"prioritizeVerb,omitempty"` - // The numeric multiplier for the node scores that the prioritize call generates. - // The weight should be a positive integer - Weight int64 `json:"weight,omitempty"` - // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. - // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender - // can implement this function. - BindVerb string `json:"bindVerb,omitempty"` - // EnableHTTPS specifies whether https should be used to communicate with the extender - EnableHTTPS bool `json:"enableHTTPS,omitempty"` - // TLSConfig specifies the transport layer security config - TLSConfig *ExtenderTLSConfig `json:"tlsConfig,omitempty"` - // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize - // timeout is ignored, k8s/other extenders priorities are used to select the node. - HTTPTimeout metav1.Duration `json:"httpTimeout,omitempty"` - // NodeCacheCapable specifies that the extender is capable of caching node information, - // so the scheduler should only send minimal information about the eligible nodes - // assuming that the extender already cached full details of all nodes in the cluster - NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` - // ManagedResources is a list of extended resources that are managed by - // this extender. - // - A pod will be sent to the extender on the Filter, Prioritize and Bind - // (if the extender is the binder) phases iff the pod requests at least - // one of the extended resources in this list. If empty or unspecified, - // all pods will be sent to this extender. - // - If IgnoredByScheduler is set to true for a resource, kube-scheduler - // will skip checking the resource in predicates. - // +optional - // +listType=atomic - ManagedResources []ExtenderManagedResource `json:"managedResources,omitempty"` - // Ignorable specifies if the extender is ignorable, i.e. scheduling should not - // fail when the extender returns an error or is not reachable. - Ignorable bool `json:"ignorable,omitempty"` -} - -// ExtenderManagedResource describes the arguments of extended resources -// managed by an extender. -type ExtenderManagedResource struct { - // Name is the extended resource name. - Name string `json:"name"` - // IgnoredByScheduler indicates whether kube-scheduler should ignore this - // resource when applying predicates. - IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` -} - -// ExtenderTLSConfig contains settings to enable TLS with extender -type ExtenderTLSConfig struct { - // Server should be accessed without verifying the TLS certificate. For testing only. - Insecure bool `json:"insecure,omitempty"` - // ServerName is passed to the server for SNI and is used in the client to check server - // certificates against. If ServerName is empty, the hostname used to contact the - // server is used. - ServerName string `json:"serverName,omitempty"` - - // Server requires TLS client certificate authentication - CertFile string `json:"certFile,omitempty"` - // Server requires TLS client certificate authentication - KeyFile string `json:"keyFile,omitempty"` - // Trusted root certificates for server - CAFile string `json:"caFile,omitempty"` - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte `json:"certData,omitempty"` - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte `json:"keyData,omitempty"` - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte `json:"caData,omitempty"` -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go b/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go deleted file mode 100644 index 3ab964f42..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta2/types_pluginargs.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DefaultPreemptionArgs holds arguments used to configure the -// DefaultPreemption plugin. -type DefaultPreemptionArgs struct { - metav1.TypeMeta `json:",inline"` - - // MinCandidateNodesPercentage is the minimum number of candidates to - // shortlist when dry running preemption as a percentage of number of nodes. - // Must be in the range [0, 100]. Defaults to 10% of the cluster size if - // unspecified. - MinCandidateNodesPercentage *int32 `json:"minCandidateNodesPercentage,omitempty"` - // MinCandidateNodesAbsolute is the absolute minimum number of candidates to - // shortlist. The likely number of candidates enumerated for dry running - // preemption is given by the formula: - // numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) - // We say "likely" because there are other factors such as PDB violations - // that play a role in the number of candidates shortlisted. Must be at least - // 0 nodes. Defaults to 100 nodes if unspecified. - MinCandidateNodesAbsolute *int32 `json:"minCandidateNodesAbsolute,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. -type InterPodAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // HardPodAffinityWeight is the scoring weight for existing pods with a - // matching hard affinity to the incoming pod. - HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. -type NodeResourcesFitArgs struct { - metav1.TypeMeta `json:",inline"` - - // IgnoredResources is the list of resources that NodeResources fit filter - // should ignore. This doesn't apply to scoring. - // +listType=atomic - IgnoredResources []string `json:"ignoredResources,omitempty"` - // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. - // e.g. if group is ["example.com"], it will ignore all resource names that begin - // with "example.com", such as "example.com/aaa" and "example.com/bbb". - // A resource group name can't contain '/'. This doesn't apply to scoring. - // +listType=atomic - IgnoredResourceGroups []string `json:"ignoredResourceGroups,omitempty"` - - // ScoringStrategy selects the node resource scoring strategy. - // The default strategy is LeastAllocated with an equal "cpu" and "memory" weight. - ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"` -} - -// PodTopologySpreadConstraintsDefaulting defines how to set default constraints -// for the PodTopologySpread plugin. -type PodTopologySpreadConstraintsDefaulting string - -const ( - // SystemDefaulting instructs to use the kubernetes defined default. - SystemDefaulting PodTopologySpreadConstraintsDefaulting = "System" - // ListDefaulting instructs to use the config provided default. - ListDefaulting PodTopologySpreadConstraintsDefaulting = "List" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. -type PodTopologySpreadArgs struct { - metav1.TypeMeta `json:",inline"` - - // DefaultConstraints defines topology spread constraints to be applied to - // Pods that don't define any in `pod.spec.topologySpreadConstraints`. - // `.defaultConstraints[*].labelSelectors` must be empty, as they are - // deduced from the Pod's membership to Services, ReplicationControllers, - // ReplicaSets or StatefulSets. - // When not empty, .defaultingType must be "List". - // +optional - // +listType=atomic - DefaultConstraints []corev1.TopologySpreadConstraint `json:"defaultConstraints,omitempty"` - - // DefaultingType determines how .defaultConstraints are deduced. Can be one - // of "System" or "List". - // - // - "System": Use kubernetes defined constraints that spread Pods among - // Nodes and Zones. - // - "List": Use constraints defined in .defaultConstraints. - // - // Defaults to "System". - // +optional - DefaultingType PodTopologySpreadConstraintsDefaulting `json:"defaultingType,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. -type NodeResourcesBalancedAllocationArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be managed, the default is "cpu" and "memory" if not specified. - // +listType=map - // +listMapKey=name - Resources []ResourceSpec `json:"resources,omitempty"` -} - -// UtilizationShapePoint represents single point of priority function shape. -type UtilizationShapePoint struct { - // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int32 `json:"utilization"` - // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int32 `json:"score"` -} - -// ResourceSpec represents a single resource. -type ResourceSpec struct { - // Name of the resource. - Name string `json:"name"` - // Weight of the resource. - Weight int64 `json:"weight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. -type VolumeBindingArgs struct { - metav1.TypeMeta `json:",inline"` - - // BindTimeoutSeconds is the timeout in seconds in volume binding operation. - // Value must be non-negative integer. The value zero indicates no waiting. - // If this value is nil, the default value (600) will be used. - BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` - - // Shape specifies the points defining the score function shape, which is - // used to score nodes based on the utilization of statically provisioned - // PVs. The utilization is calculated by dividing the total requested - // storage of the pod by the total capacity of feasible PVs on each node. - // Each point contains utilization (ranges from 0 to 100) and its - // associated score (ranges from 0 to 10). You can turn the priority by - // specifying different scores for different utilization numbers. - // The default shape points are: - // 1) 0 for 0 utilization - // 2) 10 for 100 utilization - // All points must be sorted in increasing order by utilization. - // +featureGate=VolumeCapacityPriority - // +optional - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. -type NodeAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // AddedAffinity is applied to all Pods additionally to the NodeAffinity - // specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity - // AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes - // match). - // When AddedAffinity is used, some Pods with affinity requirements that match - // a specific Node (such as Daemonset Pods) might remain unschedulable. - // +optional - AddedAffinity *corev1.NodeAffinity `json:"addedAffinity,omitempty"` -} - -// ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. -type ScoringStrategyType string - -const ( - // LeastAllocated strategy prioritizes nodes with least allocated resources. - LeastAllocated ScoringStrategyType = "LeastAllocated" - // MostAllocated strategy prioritizes nodes with most allocated resources. - MostAllocated ScoringStrategyType = "MostAllocated" - // RequestedToCapacityRatio strategy allows specifying a custom shape function - // to score nodes based on the request to capacity ratio. - RequestedToCapacityRatio ScoringStrategyType = "RequestedToCapacityRatio" -) - -// ScoringStrategy define ScoringStrategyType for node resource plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType `json:"type,omitempty"` - - // Resources to consider when scoring. - // The default resource set includes "cpu" and "memory" with an equal weight. - // Allowed weights go from 1 to 100. - // Weight defaults to 1 if not specified or explicitly set to 0. - // +listType=map - // +listMapKey=topologyKey - Resources []ResourceSpec `json:"resources,omitempty"` - - // Arguments specific to RequestedToCapacityRatio strategy. - RequestedToCapacityRatio *RequestedToCapacityRatioParam `json:"requestedToCapacityRatio,omitempty"` -} - -// RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters -type RequestedToCapacityRatioParam struct { - // Shape is a list of points defining the scoring function shape. - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/kube-scheduler/config/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index 7ffacf0f3..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,614 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.MinCandidateNodesPercentage != nil { - in, out := &in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage - *out = new(int32) - **out = **in - } - if in.MinCandidateNodesAbsolute != nil { - in, out := &in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs. -func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs { - if in == nil { - return nil - } - out := new(DefaultPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Extender) DeepCopyInto(out *Extender) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - out.HTTPTimeout = in.HTTPTimeout - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. -func (in *Extender) DeepCopy() *Extender { - if in == nil { - return nil - } - out := new(Extender) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.HardPodAffinityWeight != nil { - in, out := &in.HardPodAffinityWeight, &out.HardPodAffinityWeight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. -func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { - if in == nil { - return nil - } - out := new(InterPodAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - in.LeaderElection.DeepCopyInto(&out.LeaderElection) - out.ClientConnection = in.ClientConnection - if in.HealthzBindAddress != nil { - in, out := &in.HealthzBindAddress, &out.HealthzBindAddress - *out = new(string) - **out = **in - } - if in.MetricsBindAddress != nil { - in, out := &in.MetricsBindAddress, &out.MetricsBindAddress - *out = new(string) - **out = **in - } - in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.PodInitialBackoffSeconds != nil { - in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds - *out = new(int64) - **out = **in - } - if in.PodMaxBackoffSeconds != nil { - in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds - *out = new(int64) - **out = **in - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]KubeSchedulerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extenders != nil { - in, out := &in.Extenders, &out.Extenders - *out = make([]Extender, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { - if in == nil { - return nil - } - out := new(KubeSchedulerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { - *out = *in - if in.SchedulerName != nil { - in, out := &in.SchedulerName, &out.SchedulerName - *out = new(string) - **out = **in - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(Plugins) - (*in).DeepCopyInto(*out) - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]PluginConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. -func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { - if in == nil { - return nil - } - out := new(KubeSchedulerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.AddedAffinity != nil { - in, out := &in.AddedAffinity, &out.AddedAffinity - *out = new(v1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs. -func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs { - if in == nil { - return nil - } - out := new(NodeAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs { - if in == nil { - return nil - } - out := new(NodeResourcesBalancedAllocationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.IgnoredResources != nil { - in, out := &in.IgnoredResources, &out.IgnoredResources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoredResourceGroups != nil { - in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. -func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { - if in == nil { - return nil - } - out := new(NodeResourcesFitArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - if in.Weight != nil { - in, out := &in.Weight, &out.Weight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { - *out = *in - in.Args.DeepCopyInto(&out.Args) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. -func (in *PluginConfig) DeepCopy() *PluginConfig { - if in == nil { - return nil - } - out := new(PluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugins) DeepCopyInto(out *Plugins) { - *out = *in - in.PreEnqueue.DeepCopyInto(&out.PreEnqueue) - in.QueueSort.DeepCopyInto(&out.QueueSort) - in.PreFilter.DeepCopyInto(&out.PreFilter) - in.Filter.DeepCopyInto(&out.Filter) - in.PostFilter.DeepCopyInto(&out.PostFilter) - in.PreScore.DeepCopyInto(&out.PreScore) - in.Score.DeepCopyInto(&out.Score) - in.Reserve.DeepCopyInto(&out.Reserve) - in.Permit.DeepCopyInto(&out.Permit) - in.PreBind.DeepCopyInto(&out.PreBind) - in.Bind.DeepCopyInto(&out.Bind) - in.PostBind.DeepCopyInto(&out.PostBind) - in.MultiPoint.DeepCopyInto(&out.MultiPoint) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. -func (in *Plugins) DeepCopy() *Plugins { - if in == nil { - return nil - } - out := new(Plugins) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultConstraints != nil { - in, out := &in.DefaultConstraints, &out.DefaultConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. -func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { - if in == nil { - return nil - } - out := new(PodTopologySpreadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) { - *out = *in - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam. -func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - if in.RequestedToCapacityRatio != nil { - in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio - *out = new(RequestedToCapacityRatioParam) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.BindTimeoutSeconds != nil { - in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds - *out = new(int64) - **out = **in - } - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. -func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { - if in == nil { - return nil - } - out := new(VolumeBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta3/doc.go b/vendor/k8s.io/kube-scheduler/config/v1beta3/doc.go deleted file mode 100644 index c791874d6..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta3/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=kubescheduler.config.k8s.io - -package v1beta3 // import "k8s.io/kube-scheduler/config/v1beta3" diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta3/register.go b/vendor/k8s.io/kube-scheduler/config/v1beta3/register.go deleted file mode 100644 index 768f107b4..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta3/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubescheduler.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta3"} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeSchedulerConfiguration{}, - &DefaultPreemptionArgs{}, - &InterPodAffinityArgs{}, - &NodeResourcesBalancedAllocationArgs{}, - &NodeResourcesFitArgs{}, - &PodTopologySpreadArgs{}, - &VolumeBindingArgs{}, - &NodeAffinityArgs{}, - ) - return nil -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta3/types.go b/vendor/k8s.io/kube-scheduler/config/v1beta3/types.go deleted file mode 100644 index 45371c9d9..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta3/types.go +++ /dev/null @@ -1,377 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "bytes" - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "sigs.k8s.io/yaml" -) - -const ( - // SchedulerDefaultLockObjectNamespace defines default scheduler lock object namespace ("kube-system") - SchedulerDefaultLockObjectNamespace string = metav1.NamespaceSystem - - // SchedulerDefaultLockObjectName defines default scheduler lock object name ("kube-scheduler") - SchedulerDefaultLockObjectName = "kube-scheduler" - - // SchedulerDefaultProviderName defines the default provider names - SchedulerDefaultProviderName = "DefaultProvider" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerConfiguration configures a scheduler -type KubeSchedulerConfiguration struct { - metav1.TypeMeta `json:",inline"` - - // Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16 - Parallelism *int32 `json:"parallelism,omitempty"` - - // LeaderElection defines the configuration of leader election client. - LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration `json:"leaderElection"` - - // ClientConnection specifies the kubeconfig file and client connection - // settings for the proxy server to use when communicating with the apiserver. - ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration `json:"clientConnection"` - - // DebuggingConfiguration holds configuration for Debugging related features - // TODO: We might wanna make this a substruct like Debugging componentbaseconfigv1alpha1.DebuggingConfiguration - componentbaseconfigv1alpha1.DebuggingConfiguration `json:",inline"` - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. - PercentageOfNodesToScore *int32 `json:"percentageOfNodesToScore,omitempty"` - - // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. - // If specified, it must be greater than 0. If this value is null, the default value (1s) - // will be used. - PodInitialBackoffSeconds *int64 `json:"podInitialBackoffSeconds,omitempty"` - - // PodMaxBackoffSeconds is the max backoff for unschedulable pods. - // If specified, it must be greater than podInitialBackoffSeconds. If this value is null, - // the default value (10s) will be used. - PodMaxBackoffSeconds *int64 `json:"podMaxBackoffSeconds,omitempty"` - - // Profiles are scheduling profiles that kube-scheduler supports. Pods can - // choose to be scheduled under a particular profile by setting its associated - // scheduler name. Pods that don't specify any scheduler name are scheduled - // with the "default-scheduler" profile, if present here. - // +listType=map - // +listMapKey=schedulerName - Profiles []KubeSchedulerProfile `json:"profiles,omitempty"` - - // Extenders are the list of scheduler extenders, each holding the values of how to communicate - // with the extender. These extenders are shared by all scheduler profiles. - // +listType=set - Extenders []Extender `json:"extenders,omitempty"` -} - -// DecodeNestedObjects decodes plugin args for known types. -func (c *KubeSchedulerConfiguration) DecodeNestedObjects(d runtime.Decoder) error { - var strictDecodingErrs []error - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].decodeNestedObjects(d) - if err != nil { - decodingErr := fmt.Errorf("decoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - if runtime.IsStrictDecodingError(err) { - strictDecodingErrs = append(strictDecodingErrs, decodingErr) - } else { - return decodingErr - } - } - } - } - if len(strictDecodingErrs) > 0 { - return runtime.NewStrictDecodingError(strictDecodingErrs) - } - return nil -} - -// EncodeNestedObjects encodes plugin args. -func (c *KubeSchedulerConfiguration) EncodeNestedObjects(e runtime.Encoder) error { - for i := range c.Profiles { - prof := &c.Profiles[i] - for j := range prof.PluginConfig { - err := prof.PluginConfig[j].encodeNestedObjects(e) - if err != nil { - return fmt.Errorf("encoding .profiles[%d].pluginConfig[%d]: %w", i, j, err) - } - } - } - return nil -} - -// KubeSchedulerProfile is a scheduling profile. -type KubeSchedulerProfile struct { - // SchedulerName is the name of the scheduler associated to this profile. - // If SchedulerName matches with the pod's "spec.schedulerName", then the pod - // is scheduled with this profile. - SchedulerName *string `json:"schedulerName,omitempty"` - - // Plugins specify the set of plugins that should be enabled or disabled. - // Enabled plugins are the ones that should be enabled in addition to the - // default plugins. Disabled plugins are any of the default plugins that - // should be disabled. - // When no enabled or disabled plugin is specified for an extension point, - // default plugins for that extension point will be used if there is any. - // If a QueueSort plugin is specified, the same QueueSort Plugin and - // PluginConfig must be specified for all profiles. - Plugins *Plugins `json:"plugins,omitempty"` - - // PluginConfig is an optional set of custom plugin arguments for each plugin. - // Omitting config args for a plugin is equivalent to using the default config - // for that plugin. - // +listType=map - // +listMapKey=name - PluginConfig []PluginConfig `json:"pluginConfig,omitempty"` -} - -// Plugins include multiple extension points. When specified, the list of plugins for -// a particular extension point are the only ones enabled. If an extension point is -// omitted from the config, then the default set of plugins is used for that extension point. -// Enabled plugins are called in the order specified here, after default plugins. If they need to -// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. -type Plugins struct { - // PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue. - PreEnqueue PluginSet `json:"preEnqueue,omitempty"` - - // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. - QueueSort PluginSet `json:"queueSort,omitempty"` - - // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. - PreFilter PluginSet `json:"preFilter,omitempty"` - - // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. - Filter PluginSet `json:"filter,omitempty"` - - // PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod. - PostFilter PluginSet `json:"postFilter,omitempty"` - - // PreScore is a list of plugins that are invoked before scoring. - PreScore PluginSet `json:"preScore,omitempty"` - - // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. - Score PluginSet `json:"score,omitempty"` - - // Reserve is a list of plugins invoked when reserving/unreserving resources - // after a node is assigned to run the pod. - Reserve PluginSet `json:"reserve,omitempty"` - - // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. - Permit PluginSet `json:"permit,omitempty"` - - // PreBind is a list of plugins that should be invoked before a pod is bound. - PreBind PluginSet `json:"preBind,omitempty"` - - // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. - // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. - Bind PluginSet `json:"bind,omitempty"` - - // PostBind is a list of plugins that should be invoked after a pod is successfully bound. - PostBind PluginSet `json:"postBind,omitempty"` - - // MultiPoint is a simplified config section to enable plugins for all valid extension points. - // Plugins enabled through MultiPoint will automatically register for every individual extension - // point the plugin has implemented. Disabling a plugin through MultiPoint disables that behavior. - // The same is true for disabling "*" through MultiPoint (no default plugins will be automatically registered). - // Plugins can still be disabled through their individual extension points. - // - // In terms of precedence, plugin config follows this basic hierarchy - // 1. Specific extension points - // 2. Explicitly configured MultiPoint plugins - // 3. The set of default plugins, as MultiPoint plugins - // This implies that a higher precedence plugin will run first and overwrite any settings within MultiPoint. - // Explicitly user-configured plugins also take a higher precedence over default plugins. - // Within this hierarchy, an Enabled setting takes precedence over Disabled. For example, if a plugin is - // set in both `multiPoint.Enabled` and `multiPoint.Disabled`, the plugin will be enabled. Similarly, - // including `multiPoint.Disabled = '*'` and `multiPoint.Enabled = pluginA` will still register that specific - // plugin through MultiPoint. This follows the same behavior as all other extension point configurations. - MultiPoint PluginSet `json:"multiPoint,omitempty"` -} - -// PluginSet specifies enabled and disabled plugins for an extension point. -// If an array is empty, missing, or nil, default plugins at that extension point will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled in addition to default plugins. - // If the default plugin is also configured in the scheduler config file, the weight of plugin will - // be overridden accordingly. - // These are called after default plugins and in the same order specified here. - // +listType=atomic - Enabled []Plugin `json:"enabled,omitempty"` - // Disabled specifies default plugins that should be disabled. - // When all default plugins need to be disabled, an array containing only one "*" should be provided. - // +listType=map - // +listMapKey=name - Disabled []Plugin `json:"disabled,omitempty"` -} - -// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. -type Plugin struct { - // Name defines the name of plugin - Name string `json:"name"` - // Weight defines the weight of plugin, only used for Score plugins. - Weight *int32 `json:"weight,omitempty"` -} - -// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -// It is up to the plugin to process these Args. -type PluginConfig struct { - // Name defines the name of plugin being configured - Name string `json:"name"` - // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. - Args runtime.RawExtension `json:"args,omitempty"` -} - -func (c *PluginConfig) decodeNestedObjects(d runtime.Decoder) error { - gvk := SchemeGroupVersion.WithKind(c.Name + "Args") - // dry-run to detect and skip out-of-tree plugin args. - if _, _, err := d.Decode(nil, &gvk, nil); runtime.IsNotRegisteredError(err) { - return nil - } - - var strictDecodingErr error - obj, parsedGvk, err := d.Decode(c.Args.Raw, &gvk, nil) - if err != nil { - decodingArgsErr := fmt.Errorf("decoding args for plugin %s: %w", c.Name, err) - if obj != nil && runtime.IsStrictDecodingError(err) { - strictDecodingErr = runtime.NewStrictDecodingError([]error{decodingArgsErr}) - } else { - return decodingArgsErr - } - } - if parsedGvk.GroupKind() != gvk.GroupKind() { - return fmt.Errorf("args for plugin %s were not of type %s, got %s", c.Name, gvk.GroupKind(), parsedGvk.GroupKind()) - } - c.Args.Object = obj - return strictDecodingErr -} - -func (c *PluginConfig) encodeNestedObjects(e runtime.Encoder) error { - if c.Args.Object == nil { - return nil - } - var buf bytes.Buffer - err := e.Encode(c.Args.Object, &buf) - if err != nil { - return err - } - // The encoder might be a YAML encoder, but the parent encoder expects - // JSON output, so we convert YAML back to JSON. - // This is a no-op if produces JSON. - json, err := yaml.YAMLToJSON(buf.Bytes()) - if err != nil { - return err - } - c.Args.Raw = json - return nil -} - -// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -// it is assumed that the extender chose not to provide that extension. -type Extender struct { - // URLPrefix at which the extender is available - URLPrefix string `json:"urlPrefix"` - // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. - FilterVerb string `json:"filterVerb,omitempty"` - // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. - PreemptVerb string `json:"preemptVerb,omitempty"` - // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. - PrioritizeVerb string `json:"prioritizeVerb,omitempty"` - // The numeric multiplier for the node scores that the prioritize call generates. - // The weight should be a positive integer - Weight int64 `json:"weight,omitempty"` - // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. - // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender - // can implement this function. - BindVerb string `json:"bindVerb,omitempty"` - // EnableHTTPS specifies whether https should be used to communicate with the extender - EnableHTTPS bool `json:"enableHTTPS,omitempty"` - // TLSConfig specifies the transport layer security config - TLSConfig *ExtenderTLSConfig `json:"tlsConfig,omitempty"` - // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize - // timeout is ignored, k8s/other extenders priorities are used to select the node. - HTTPTimeout metav1.Duration `json:"httpTimeout,omitempty"` - // NodeCacheCapable specifies that the extender is capable of caching node information, - // so the scheduler should only send minimal information about the eligible nodes - // assuming that the extender already cached full details of all nodes in the cluster - NodeCacheCapable bool `json:"nodeCacheCapable,omitempty"` - // ManagedResources is a list of extended resources that are managed by - // this extender. - // - A pod will be sent to the extender on the Filter, Prioritize and Bind - // (if the extender is the binder) phases iff the pod requests at least - // one of the extended resources in this list. If empty or unspecified, - // all pods will be sent to this extender. - // - If IgnoredByScheduler is set to true for a resource, kube-scheduler - // will skip checking the resource in predicates. - // +optional - // +listType=atomic - ManagedResources []ExtenderManagedResource `json:"managedResources,omitempty"` - // Ignorable specifies if the extender is ignorable, i.e. scheduling should not - // fail when the extender returns an error or is not reachable. - Ignorable bool `json:"ignorable,omitempty"` -} - -// ExtenderManagedResource describes the arguments of extended resources -// managed by an extender. -type ExtenderManagedResource struct { - // Name is the extended resource name. - Name string `json:"name"` - // IgnoredByScheduler indicates whether kube-scheduler should ignore this - // resource when applying predicates. - IgnoredByScheduler bool `json:"ignoredByScheduler,omitempty"` -} - -// ExtenderTLSConfig contains settings to enable TLS with extender -type ExtenderTLSConfig struct { - // Server should be accessed without verifying the TLS certificate. For testing only. - Insecure bool `json:"insecure,omitempty"` - // ServerName is passed to the server for SNI and is used in the client to check server - // certificates against. If ServerName is empty, the hostname used to contact the - // server is used. - ServerName string `json:"serverName,omitempty"` - - // Server requires TLS client certificate authentication - CertFile string `json:"certFile,omitempty"` - // Server requires TLS client certificate authentication - KeyFile string `json:"keyFile,omitempty"` - // Trusted root certificates for server - CAFile string `json:"caFile,omitempty"` - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte `json:"certData,omitempty"` - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte `json:"keyData,omitempty"` - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte `json:"caData,omitempty"` -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go b/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go deleted file mode 100644 index 725cd1b92..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta3/types_pluginargs.go +++ /dev/null @@ -1,225 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DefaultPreemptionArgs holds arguments used to configure the -// DefaultPreemption plugin. -type DefaultPreemptionArgs struct { - metav1.TypeMeta `json:",inline"` - - // MinCandidateNodesPercentage is the minimum number of candidates to - // shortlist when dry running preemption as a percentage of number of nodes. - // Must be in the range [0, 100]. Defaults to 10% of the cluster size if - // unspecified. - MinCandidateNodesPercentage *int32 `json:"minCandidateNodesPercentage,omitempty"` - // MinCandidateNodesAbsolute is the absolute minimum number of candidates to - // shortlist. The likely number of candidates enumerated for dry running - // preemption is given by the formula: - // numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) - // We say "likely" because there are other factors such as PDB violations - // that play a role in the number of candidates shortlisted. Must be at least - // 0 nodes. Defaults to 100 nodes if unspecified. - MinCandidateNodesAbsolute *int32 `json:"minCandidateNodesAbsolute,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. -type InterPodAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // HardPodAffinityWeight is the scoring weight for existing pods with a - // matching hard affinity to the incoming pod. - HardPodAffinityWeight *int32 `json:"hardPodAffinityWeight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. -type NodeResourcesFitArgs struct { - metav1.TypeMeta `json:",inline"` - - // IgnoredResources is the list of resources that NodeResources fit filter - // should ignore. This doesn't apply to scoring. - // +listType=atomic - IgnoredResources []string `json:"ignoredResources,omitempty"` - // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. - // e.g. if group is ["example.com"], it will ignore all resource names that begin - // with "example.com", such as "example.com/aaa" and "example.com/bbb". - // A resource group name can't contain '/'. This doesn't apply to scoring. - // +listType=atomic - IgnoredResourceGroups []string `json:"ignoredResourceGroups,omitempty"` - - // ScoringStrategy selects the node resource scoring strategy. - // The default strategy is LeastAllocated with an equal "cpu" and "memory" weight. - ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"` -} - -// PodTopologySpreadConstraintsDefaulting defines how to set default constraints -// for the PodTopologySpread plugin. -type PodTopologySpreadConstraintsDefaulting string - -const ( - // SystemDefaulting instructs to use the kubernetes defined default. - SystemDefaulting PodTopologySpreadConstraintsDefaulting = "System" - // ListDefaulting instructs to use the config provided default. - ListDefaulting PodTopologySpreadConstraintsDefaulting = "List" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. -type PodTopologySpreadArgs struct { - metav1.TypeMeta `json:",inline"` - - // DefaultConstraints defines topology spread constraints to be applied to - // Pods that don't define any in `pod.spec.topologySpreadConstraints`. - // `.defaultConstraints[*].labelSelectors` must be empty, as they are - // deduced from the Pod's membership to Services, ReplicationControllers, - // ReplicaSets or StatefulSets. - // When not empty, .defaultingType must be "List". - // +optional - // +listType=atomic - DefaultConstraints []corev1.TopologySpreadConstraint `json:"defaultConstraints,omitempty"` - - // DefaultingType determines how .defaultConstraints are deduced. Can be one - // of "System" or "List". - // - // - "System": Use kubernetes defined constraints that spread Pods among - // Nodes and Zones. - // - "List": Use constraints defined in .defaultConstraints. - // - // Defaults to "System". - // +optional - DefaultingType PodTopologySpreadConstraintsDefaulting `json:"defaultingType,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. -type NodeResourcesBalancedAllocationArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be managed, the default is "cpu" and "memory" if not specified. - // +listType=map - // +listMapKey=name - Resources []ResourceSpec `json:"resources,omitempty"` -} - -// UtilizationShapePoint represents single point of priority function shape. -type UtilizationShapePoint struct { - // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int32 `json:"utilization"` - // Score assigned to given utilization (y axis). Valid values are 0 to 10. - Score int32 `json:"score"` -} - -// ResourceSpec represents a single resource. -type ResourceSpec struct { - // Name of the resource. - Name string `json:"name"` - // Weight of the resource. - Weight int64 `json:"weight,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. -type VolumeBindingArgs struct { - metav1.TypeMeta `json:",inline"` - - // BindTimeoutSeconds is the timeout in seconds in volume binding operation. - // Value must be non-negative integer. The value zero indicates no waiting. - // If this value is nil, the default value (600) will be used. - BindTimeoutSeconds *int64 `json:"bindTimeoutSeconds,omitempty"` - - // Shape specifies the points defining the score function shape, which is - // used to score nodes based on the utilization of statically provisioned - // PVs. The utilization is calculated by dividing the total requested - // storage of the pod by the total capacity of feasible PVs on each node. - // Each point contains utilization (ranges from 0 to 100) and its - // associated score (ranges from 0 to 10). You can turn the priority by - // specifying different scores for different utilization numbers. - // The default shape points are: - // 1) 0 for 0 utilization - // 2) 10 for 100 utilization - // All points must be sorted in increasing order by utilization. - // +featureGate=VolumeCapacityPriority - // +optional - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. -type NodeAffinityArgs struct { - metav1.TypeMeta `json:",inline"` - - // AddedAffinity is applied to all Pods additionally to the NodeAffinity - // specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity - // AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes - // match). - // When AddedAffinity is used, some Pods with affinity requirements that match - // a specific Node (such as Daemonset Pods) might remain unschedulable. - // +optional - AddedAffinity *corev1.NodeAffinity `json:"addedAffinity,omitempty"` -} - -// ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. -type ScoringStrategyType string - -const ( - // LeastAllocated strategy prioritizes nodes with least allocated resources. - LeastAllocated ScoringStrategyType = "LeastAllocated" - // MostAllocated strategy prioritizes nodes with most allocated resources. - MostAllocated ScoringStrategyType = "MostAllocated" - // RequestedToCapacityRatio strategy allows specifying a custom shape function - // to score nodes based on the request to capacity ratio. - RequestedToCapacityRatio ScoringStrategyType = "RequestedToCapacityRatio" -) - -// ScoringStrategy define ScoringStrategyType for node resource plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType `json:"type,omitempty"` - - // Resources to consider when scoring. - // The default resource set includes "cpu" and "memory" with an equal weight. - // Allowed weights go from 1 to 100. - // Weight defaults to 1 if not specified or explicitly set to 0. - // +listType=map - // +listMapKey=topologyKey - Resources []ResourceSpec `json:"resources,omitempty"` - - // Arguments specific to RequestedToCapacityRatio strategy. - RequestedToCapacityRatio *RequestedToCapacityRatioParam `json:"requestedToCapacityRatio,omitempty"` -} - -// RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters -type RequestedToCapacityRatioParam struct { - // Shape is a list of points defining the scoring function shape. - // +listType=atomic - Shape []UtilizationShapePoint `json:"shape,omitempty"` -} diff --git a/vendor/k8s.io/kube-scheduler/config/v1beta3/zz_generated.deepcopy.go b/vendor/k8s.io/kube-scheduler/config/v1beta3/zz_generated.deepcopy.go deleted file mode 100644 index 2b549d499..000000000 --- a/vendor/k8s.io/kube-scheduler/config/v1beta3/zz_generated.deepcopy.go +++ /dev/null @@ -1,604 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta3 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.MinCandidateNodesPercentage != nil { - in, out := &in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage - *out = new(int32) - **out = **in - } - if in.MinCandidateNodesAbsolute != nil { - in, out := &in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs. -func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs { - if in == nil { - return nil - } - out := new(DefaultPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Extender) DeepCopyInto(out *Extender) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - out.HTTPTimeout = in.HTTPTimeout - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. -func (in *Extender) DeepCopy() *Extender { - if in == nil { - return nil - } - out := new(Extender) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.HardPodAffinityWeight != nil { - in, out := &in.HardPodAffinityWeight, &out.HardPodAffinityWeight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. -func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { - if in == nil { - return nil - } - out := new(InterPodAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - in.LeaderElection.DeepCopyInto(&out.LeaderElection) - out.ClientConnection = in.ClientConnection - in.DebuggingConfiguration.DeepCopyInto(&out.DebuggingConfiguration) - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.PodInitialBackoffSeconds != nil { - in, out := &in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds - *out = new(int64) - **out = **in - } - if in.PodMaxBackoffSeconds != nil { - in, out := &in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds - *out = new(int64) - **out = **in - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]KubeSchedulerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extenders != nil { - in, out := &in.Extenders, &out.Extenders - *out = make([]Extender, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { - if in == nil { - return nil - } - out := new(KubeSchedulerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { - *out = *in - if in.SchedulerName != nil { - in, out := &in.SchedulerName, &out.SchedulerName - *out = new(string) - **out = **in - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(Plugins) - (*in).DeepCopyInto(*out) - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]PluginConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. -func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { - if in == nil { - return nil - } - out := new(KubeSchedulerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.AddedAffinity != nil { - in, out := &in.AddedAffinity, &out.AddedAffinity - *out = new(v1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs. -func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs { - if in == nil { - return nil - } - out := new(NodeAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs { - if in == nil { - return nil - } - out := new(NodeResourcesBalancedAllocationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.IgnoredResources != nil { - in, out := &in.IgnoredResources, &out.IgnoredResources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoredResourceGroups != nil { - in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. -func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { - if in == nil { - return nil - } - out := new(NodeResourcesFitArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - if in.Weight != nil { - in, out := &in.Weight, &out.Weight - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { - *out = *in - in.Args.DeepCopyInto(&out.Args) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. -func (in *PluginConfig) DeepCopy() *PluginConfig { - if in == nil { - return nil - } - out := new(PluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugins) DeepCopyInto(out *Plugins) { - *out = *in - in.PreEnqueue.DeepCopyInto(&out.PreEnqueue) - in.QueueSort.DeepCopyInto(&out.QueueSort) - in.PreFilter.DeepCopyInto(&out.PreFilter) - in.Filter.DeepCopyInto(&out.Filter) - in.PostFilter.DeepCopyInto(&out.PostFilter) - in.PreScore.DeepCopyInto(&out.PreScore) - in.Score.DeepCopyInto(&out.Score) - in.Reserve.DeepCopyInto(&out.Reserve) - in.Permit.DeepCopyInto(&out.Permit) - in.PreBind.DeepCopyInto(&out.PreBind) - in.Bind.DeepCopyInto(&out.Bind) - in.PostBind.DeepCopyInto(&out.PostBind) - in.MultiPoint.DeepCopyInto(&out.MultiPoint) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. -func (in *Plugins) DeepCopy() *Plugins { - if in == nil { - return nil - } - out := new(Plugins) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultConstraints != nil { - in, out := &in.DefaultConstraints, &out.DefaultConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. -func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { - if in == nil { - return nil - } - out := new(PodTopologySpreadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) { - *out = *in - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam. -func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - if in.RequestedToCapacityRatio != nil { - in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio - *out = new(RequestedToCapacityRatioParam) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.BindTimeoutSeconds != nil { - in, out := &in.BindTimeoutSeconds, &out.BindTimeoutSeconds - *out = new(int64) - **out = **in - } - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. -func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { - if in == nil { - return nil - } - out := new(VolumeBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/LICENSE b/vendor/k8s.io/kubernetes/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/k8s.io/kubernetes/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/kubernetes/pkg/features/OWNERS b/vendor/k8s.io/kubernetes/pkg/features/OWNERS deleted file mode 100644 index 3e1dd9f08..000000000 --- a/vendor/k8s.io/kubernetes/pkg/features/OWNERS +++ /dev/null @@ -1,4 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - feature-approvers diff --git a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go b/vendor/k8s.io/kubernetes/pkg/features/kube_features.go deleted file mode 100644 index 2ad904d6f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/features/kube_features.go +++ /dev/null @@ -1,1190 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package features - -import ( - "k8s.io/apimachinery/pkg/util/runtime" - genericfeatures "k8s.io/apiserver/pkg/features" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/component-base/featuregate" -) - -const ( - // Every feature gate should add method here following this template: - // - // // owner: @username - // // kep: https://kep.k8s.io/NNN - // // alpha: v1.X - // MyFeature featuregate.Feature = "MyFeature" - // - // Feature gates should be listed in alphabetical, case-sensitive - // (upper before any lower case character) order. This reduces the risk - // of code conflicts because changes are more likely to be scattered - // across the file. - - // owner: @ttakahashi21 @mkimuram - // kep: https://kep.k8s.io/3294 - // alpha: v1.26 - // - // Enable usage of Provision of PVCs from snapshots in other namespaces - CrossNamespaceVolumeDataSource featuregate.Feature = "CrossNamespaceVolumeDataSource" - - // owner: @bswartz - // alpha: v1.18 - // beta: v1.24 - // - // Enables usage of any object for volume data source in PVCs - AnyVolumeDataSource featuregate.Feature = "AnyVolumeDataSource" - - // owner: @nabokihms - // alpha: v1.26 - // - // Enables API to get self subject attributes after authentication. - APISelfSubjectReview featuregate.Feature = "APISelfSubjectReview" - - // owner: @tallclair - // beta: v1.4 - AppArmor featuregate.Feature = "AppArmor" - - // owner: @szuecs - // alpha: v1.12 - // - // Enable nodes to change CPUCFSQuotaPeriod - CPUCFSQuotaPeriod featuregate.Feature = "CustomCPUCFSQuotaPeriod" - - // owner: @ConnorDoyle, @fromanirh (only for GA graduation) - // alpha: v1.8 - // beta: v1.10 - // GA: v1.26 - // - // Alternative container-level CPU affinity policies. - CPUManager featuregate.Feature = "CPUManager" - - // owner: @fromanirh - // alpha: v1.23 - // beta: see below. - // - // Allow fine-tuning of cpumanager policies, experimental, alpha-quality options - // Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ - // We want to avoid a proliferation of feature gates. This feature gate: - // - will guard *a group* of cpumanager options whose quality level is alpha. - // - will never graduate to beta or stable. - // See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ - // for details about the removal of this feature gate. - CPUManagerPolicyAlphaOptions featuregate.Feature = "CPUManagerPolicyAlphaOptions" - - // owner: @fromanirh - // beta: v1.23 - // beta: see below. - // - // Allow fine-tuning of cpumanager policies, experimental, beta-quality options - // Per https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ - // We want to avoid a proliferation of feature gates. This feature gate: - // - will guard *a group* of cpumanager options whose quality level is beta. - // - is thus *introduced* as beta - // - will never graduate to stable. - // See https://groups.google.com/g/kubernetes-sig-architecture/c/Nxsc7pfe5rw/m/vF2djJh0BAAJ - // for details about the removal of this feature gate. - CPUManagerPolicyBetaOptions featuregate.Feature = "CPUManagerPolicyBetaOptions" - - // owner: @fromanirh - // alpha: v1.22 - // beta: v1.23 - // - // Allow the usage of options to fine-tune the cpumanager policies. - CPUManagerPolicyOptions featuregate.Feature = "CPUManagerPolicyOptions" - - // owner: @pohly - // alpha: v1.14 - // beta: v1.16 - // GA: v1.25 - // - // Enables CSI Inline volumes support for pods - CSIInlineVolume featuregate.Feature = "CSIInlineVolume" - - // owner: @davidz627 - // alpha: v1.14 - // beta: v1.17 - // - // Enables the in-tree storage to CSI Plugin migration feature. - CSIMigration featuregate.Feature = "CSIMigration" - - // owner: @leakingtapan - // alpha: v1.14 - // beta: v1.17 - // GA: v1.25 - // - // Enables the AWS EBS in-tree driver to AWS EBS CSI Driver migration feature. - CSIMigrationAWS featuregate.Feature = "CSIMigrationAWS" - - // owner: @andyzhangx - // alpha: v1.15 - // beta: v1.19 - // GA: v1.24 - // - // Enables the Azure Disk in-tree driver to Azure Disk Driver migration feature. - CSIMigrationAzureDisk featuregate.Feature = "CSIMigrationAzureDisk" - - // owner: @andyzhangx - // alpha: v1.15 - // beta: v1.21 - // GA: v1.26 - // - // Enables the Azure File in-tree driver to Azure File Driver migration feature. - CSIMigrationAzureFile featuregate.Feature = "CSIMigrationAzureFile" - - // owner: @davidz627 - // alpha: v1.14 - // beta: v1.17 - // GA: 1.25 - // - // Enables the GCE PD in-tree driver to GCE CSI Driver migration feature. - CSIMigrationGCE featuregate.Feature = "CSIMigrationGCE" - - // owner: @trierra - // alpha: v1.23 - // - // Enables the Portworx in-tree driver to Portworx migration feature. - CSIMigrationPortworx featuregate.Feature = "CSIMigrationPortworx" - - // owner: @humblec - // alpha: v1.23 - // - // Enables the RBD in-tree driver to RBD CSI Driver migration feature. - CSIMigrationRBD featuregate.Feature = "CSIMigrationRBD" - - // owner: @divyenpatel - // beta: v1.19 (requires: vSphere vCenter/ESXi Version: 7.0u2, HW Version: VM version 15) - // GA: 1.26 - // Enables the vSphere in-tree driver to vSphere CSI Driver migration feature. - CSIMigrationvSphere featuregate.Feature = "CSIMigrationvSphere" - - // owner: @humblec, @zhucan - // kep: https://kep.k8s.io/3171 - // alpha: v1.25 - // - // Enables SecretRef field in CSI NodeExpandVolume request. - CSINodeExpandSecret featuregate.Feature = "CSINodeExpandSecret" - - // owner: @pohly - // alpha: v1.19 - // beta: v1.21 - // GA: v1.24 - // - // Enables tracking of available storage capacity that CSI drivers provide. - CSIStorageCapacity featuregate.Feature = "CSIStorageCapacity" - - // owner: @fengzixu - // alpha: v1.21 - // - // Enables kubelet to detect CSI volume condition and send the event of the abnormal volume to the corresponding pod that is using it. - CSIVolumeHealth featuregate.Feature = "CSIVolumeHealth" - - // owner: @adrianreber - // kep: https://kep.k8s.io/2008 - // alpha: v1.25 - // - // Enables container Checkpoint support in the kubelet - ContainerCheckpoint featuregate.Feature = "ContainerCheckpoint" - - // owner: @bhcleek @wzshiming - // GA: v1.25 - // - // Normalize HttpGet URL and Header passing for lifecycle handlers with probers. - ConsistentHTTPGetHandlers featuregate.Feature = "ConsistentHTTPGetHandlers" - - // owner: @jiahuif - // alpha: v1.21 - // beta: v1.22 - // GA: v1.24 - // - // Enables Leader Migration for kube-controller-manager and cloud-controller-manager - ControllerManagerLeaderMigration featuregate.Feature = "ControllerManagerLeaderMigration" - - // owner: @deejross, @soltysh - // kep: https://kep.k8s.io/3140 - // alpha: v1.24 - // beta: v1.25 - // - // Enables support for time zones in CronJobs. - CronJobTimeZone featuregate.Feature = "CronJobTimeZone" - - // owner: @smarterclayton - // alpha: v1.21 - // beta: v1.22 - // GA: v1.25 - // DaemonSets allow workloads to maintain availability during update per node - DaemonSetUpdateSurge featuregate.Feature = "DaemonSetUpdateSurge" - - // owner: @gnufied, @verult, @bertinatto - // alpha: v1.22 - // beta: v1.23 - // GA: v1.26 - // If supported by the CSI driver, delegates the role of applying FSGroup to - // the driver by passing FSGroup through the NodeStageVolume and - // NodePublishVolume calls. - DelegateFSGroupToCSIDriver featuregate.Feature = "DelegateFSGroupToCSIDriver" - - // owner: @jiayingz, @swatisehgal (for GA graduation) - // alpha: v1.8 - // beta: v1.10 - // GA: v1.26 - // - // Enables support for Device Plugins - DevicePlugins featuregate.Feature = "DevicePlugins" - - // owner: @RenaudWasTaken @dashpole - // alpha: v1.19 - // beta: v1.20 - // ga: v1.25 - // - // Disables Accelerator Metrics Collected by Kubelet - DisableAcceleratorUsageMetrics featuregate.Feature = "DisableAcceleratorUsageMetrics" - - // owner: @andrewsykim - // alpha: v1.22 - // - // Disable any functionality in kube-apiserver, kube-controller-manager and kubelet related to the `--cloud-provider` component flag. - DisableCloudProviders featuregate.Feature = "DisableCloudProviders" - - // owner: @andrewsykim - // alpha: v1.23 - // - // Disable in-tree functionality in kubelet to authenticate to cloud provider container registries for image pull credentials. - DisableKubeletCloudCredentialProviders featuregate.Feature = "DisableKubeletCloudCredentialProviders" - - // owner: @derekwaynecarr - // alpha: v1.20 - // beta: v1.21 (off by default until 1.22) - // - // Enables usage of hugepages- in downward API. - DownwardAPIHugePages featuregate.Feature = "DownwardAPIHugePages" - - // owner: @pohly - // kep: http://kep.k8s.io/3063 - // alpha: v1.26 - // - // Enables support for resources with custom parameters and a lifecycle - // that is independent of a Pod. - DynamicResourceAllocation featuregate.Feature = "DynamicResourceAllocation" - - // owner: @andrewsykim - // kep: https://kep.k8s.io/1672 - // alpha: v1.20 - // beta: v1.22 - // GA: v1.26 - // - // Enable Terminating condition in Endpoint Slices. - EndpointSliceTerminatingCondition featuregate.Feature = "EndpointSliceTerminatingCondition" - - // owner: @verb - // alpha: v1.16 - // beta: v1.23 - // GA: v1.25 - // - // Allows running an ephemeral container in pod namespaces to troubleshoot a running pod. - EphemeralContainers featuregate.Feature = "EphemeralContainers" - - // owner: @harche - // kep: http://kep.k8s.io/3386 - // alpha: v1.25 - // - // Allows using event-driven PLEG (pod lifecycle event generator) through kubelet - // which avoids frequent relisting of containers which helps optimize performance. - EventedPLEG featuregate.Feature = "EventedPLEG" - - // owner: @andrewsykim @SergeyKanzhelev - // GA: v1.20 - // - // Ensure kubelet respects exec probe timeouts. Feature gate exists in-case existing workloads - // may depend on old behavior where exec probe timeouts were ignored. - // Lock to default and remove after v1.22 based on user feedback that should be reflected in KEP #1972 update - ExecProbeTimeout featuregate.Feature = "ExecProbeTimeout" - - // owner: @gnufied - // alpha: v1.14 - // beta: v1.16 - // GA: 1.24 - // Ability to expand CSI volumes - ExpandCSIVolumes featuregate.Feature = "ExpandCSIVolumes" - - // owner: @mlmhl @gnufied - // beta: v1.15 - // GA: 1.24 - // Ability to expand persistent volumes' file system without unmounting volumes. - ExpandInUsePersistentVolumes featuregate.Feature = "ExpandInUsePersistentVolumes" - - // owner: @gnufied - // beta: v1.11 - // GA: 1.24 - // Ability to Expand persistent volumes - ExpandPersistentVolumes featuregate.Feature = "ExpandPersistentVolumes" - - // owner: @gjkim42 - // kep: https://kep.k8s.io/2595 - // alpha: v1.22 - // beta: v1.26 - // - // Enables apiserver and kubelet to allow up to 32 DNSSearchPaths and up to 2048 DNSSearchListChars. - ExpandedDNSConfig featuregate.Feature = "ExpandedDNSConfig" - - // owner: @pweil- - // alpha: v1.5 - // - // Default userns=host for containers that are using other host namespaces, host mounts, the pod - // contains a privileged container, or specific non-namespaced capabilities (MKNOD, SYS_MODULE, - // SYS_TIME). This should only be enabled if user namespace remapping is enabled in the docker daemon. - ExperimentalHostUserNamespaceDefaultingGate featuregate.Feature = "ExperimentalHostUserNamespaceDefaulting" - - // owner: @yuzhiquan, @bowei, @PxyUp, @SergeyKanzhelev - // kep: https://kep.k8s.io/2727 - // alpha: v1.23 - // beta: v1.24 - // - // Enables GRPC probe method for {Liveness,Readiness,Startup}Probe. - GRPCContainerProbe featuregate.Feature = "GRPCContainerProbe" - - // owner: @bobbypage - // alpha: v1.20 - // beta: v1.21 - // Adds support for kubelet to detect node shutdown and gracefully terminate pods prior to the node being shutdown. - GracefulNodeShutdown featuregate.Feature = "GracefulNodeShutdown" - - // owner: @wzshiming - // alpha: v1.23 - // beta: v1.24 - // Make the kubelet use shutdown configuration based on pod priority values for graceful shutdown. - GracefulNodeShutdownBasedOnPodPriority featuregate.Feature = "GracefulNodeShutdownBasedOnPodPriority" - - // owner: @arjunrn @mwielgus @josephburnett - // alpha: v1.20 - // - // Add support for the HPA to scale based on metrics from individual containers - // in target pods - HPAContainerMetrics featuregate.Feature = "HPAContainerMetrics" - - // owner: @dxist - // alpha: v1.16 - // - // Enables support of HPA scaling to zero pods when an object or custom metric is configured. - HPAScaleToZero featuregate.Feature = "HPAScaleToZero" - - // owner: @deepakkinni @xing-yang - // kep: https://kep.k8s.io/2680 - // alpha: v1.23 - // - // Honor Persistent Volume Reclaim Policy when it is "Delete" irrespective of PV-PVC - // deletion ordering. - HonorPVReclaimPolicy featuregate.Feature = "HonorPVReclaimPolicy" - - // owner: @ravig - // alpha: v1.23 - // beta: v1.24 - // GA: v1.25 - // IdentifyPodOS allows user to specify OS on which they'd like the Pod run. The user should still set the nodeSelector - // with appropriate `kubernetes.io/os` label for scheduler to identify appropriate node for the pod to run. - IdentifyPodOS featuregate.Feature = "IdentifyPodOS" - - // owner: @leakingtapan - // alpha: v1.21 - // - // Disables the AWS EBS in-tree driver. - InTreePluginAWSUnregister featuregate.Feature = "InTreePluginAWSUnregister" - - // owner: @andyzhangx - // alpha: v1.21 - // - // Disables the Azure Disk in-tree driver. - InTreePluginAzureDiskUnregister featuregate.Feature = "InTreePluginAzureDiskUnregister" - - // owner: @andyzhangx - // alpha: v1.21 - // - // Disables the Azure File in-tree driver. - InTreePluginAzureFileUnregister featuregate.Feature = "InTreePluginAzureFileUnregister" - - // owner: @Jiawei0227 - // alpha: v1.21 - // - // Disables the GCE PD in-tree driver. - InTreePluginGCEUnregister featuregate.Feature = "InTreePluginGCEUnregister" - - // owner: @adisky - // alpha: v1.21 - // - // Disables the OpenStack Cinder in-tree driver. - InTreePluginOpenStackUnregister featuregate.Feature = "InTreePluginOpenStackUnregister" - - // owner: @trierra - // alpha: v1.23 - // - // Disables the Portworx in-tree driver. - InTreePluginPortworxUnregister featuregate.Feature = "InTreePluginPortworxUnregister" - - // owner: @humblec - // alpha: v1.23 - // - // Disables the RBD in-tree driver. - InTreePluginRBDUnregister featuregate.Feature = "InTreePluginRBDUnregister" - - // owner: @divyenpatel - // alpha: v1.21 - // - // Disables the vSphere in-tree driver. - InTreePluginvSphereUnregister featuregate.Feature = "InTreePluginvSphereUnregister" - - // owner: @danwinship - // kep: https://kep.k8s.io/3178 - // alpha: v1.25 - // - // Causes kubelet to no longer create legacy IPTables rules - IPTablesOwnershipCleanup featuregate.Feature = "IPTablesOwnershipCleanup" - - // owner: @mimowo - // kep: https://kep.k8s.io/3329 - // alpha: v1.25 - // beta: v1.26 - // - // Allow users to specify handling of pod failures based on container exit codes - // and pod conditions. - JobPodFailurePolicy featuregate.Feature = "JobPodFailurePolicy" - - // owner: @ahg - // beta: v1.23 - // - // Allow updating node scheduling directives in the pod template of jobs. Specifically, - // node affinity, selector and tolerations. This is allowed only for suspended jobs - // that have never been unsuspended before. - JobMutableNodeSchedulingDirectives featuregate.Feature = "JobMutableNodeSchedulingDirectives" - - // owner: @alculquicondor - // alpha: v1.23 - // beta: v1.24 - // - // Track the number of pods with Ready condition in the Job status. - JobReadyPods featuregate.Feature = "JobReadyPods" - - // owner: @alculquicondor - // alpha: v1.22 - // beta: v1.23 - // stable: v1.26 - // - // Track Job completion without relying on Pod remaining in the cluster - // indefinitely. Pod finalizers, in addition to a field in the Job status - // allow the Job controller to keep track of Pods that it didn't account for - // yet. - JobTrackingWithFinalizers featuregate.Feature = "JobTrackingWithFinalizers" - - // owner: @andrewsykim @adisky @ndixita - // alpha: v1.20 - // beta: v1.24 - // GA: v1.26 - // - // Enable kubelet exec plugins for image pull credentials. - KubeletCredentialProviders featuregate.Feature = "KubeletCredentialProviders" - - // owner: @AkihiroSuda - // alpha: v1.22 - // - // Enables support for running kubelet in a user namespace. - // The user namespace has to be created before running kubelet. - // All the node components such as CRI need to be running in the same user namespace. - KubeletInUserNamespace featuregate.Feature = "KubeletInUserNamespace" - - // owner: @dashpole - // alpha: v1.13 - // beta: v1.15 - // - // Enables the kubelet's pod resources grpc endpoint - KubeletPodResources featuregate.Feature = "KubeletPodResources" - - // owner: @fromanirh - // alpha: v1.21 - // beta: v1.23 - // Enable POD resources API to return allocatable resources - KubeletPodResourcesGetAllocatable featuregate.Feature = "KubeletPodResourcesGetAllocatable" - - // owner: @sallyom - // kep: https://kep.k8s.io/2832 - // alpha: v1.25 - // - // Add support for distributed tracing in the kubelet - KubeletTracing featuregate.Feature = "KubeletTracing" - - // owner: @zshihang - // kep: https://kep.k8s.io/2800 - // beta: v1.24 - // - // Stop auto-generation of secret-based service account tokens. - LegacyServiceAccountTokenNoAutoGeneration featuregate.Feature = "LegacyServiceAccountTokenNoAutoGeneration" - - // owner: @zshihang - // kep: http://kep.k8s.io/2800 - // alpha: v1.25 - // - // Enables tracking of secret-based service account tokens usage. - LegacyServiceAccountTokenTracking featuregate.Feature = "LegacyServiceAccountTokenTracking" - - // owner: @jinxu - // beta: v1.10 - // stable: v1.25 - // - // Support local ephemeral storage types for local storage capacity isolation feature. - LocalStorageCapacityIsolation featuregate.Feature = "LocalStorageCapacityIsolation" - - // owner: @RobertKrawitz - // alpha: v1.15 - // - // Allow use of filesystems for ephemeral storage monitoring. - // Only applies if LocalStorageCapacityIsolation is set. - LocalStorageCapacityIsolationFSQuotaMonitoring featuregate.Feature = "LocalStorageCapacityIsolationFSQuotaMonitoring" - - // owner: @damemi - // alpha: v1.21 - // beta: v1.22 - // - // Enables scaling down replicas via logarithmic comparison of creation/ready timestamps - LogarithmicScaleDown featuregate.Feature = "LogarithmicScaleDown" - - // owner: @denkensk - // kep: https://kep.k8s.io/3243 - // alpha: v1.25 - // - // Enable MatchLabelKeys in PodTopologySpread. - MatchLabelKeysInPodTopologySpread featuregate.Feature = "MatchLabelKeysInPodTopologySpread" - - // owner: @krmayankk - // alpha: v1.24 - // - // Enables maxUnavailable for StatefulSet - MaxUnavailableStatefulSet featuregate.Feature = "MaxUnavailableStatefulSet" - - // owner: @cynepco3hahue(alukiano) @cezaryzukowski @k-wiatrzyk - // alpha: v1.21 - // beta: v1.22 - // Allows setting memory affinity for a container based on NUMA topology - MemoryManager featuregate.Feature = "MemoryManager" - - // owner: @xiaoxubeii - // kep: https://kep.k8s.io/2570 - // alpha: v1.22 - // - // Enables kubelet to support memory QoS with cgroups v2. - MemoryQoS featuregate.Feature = "MemoryQoS" - - // owner: @sanposhiho - // kep: https://kep.k8s.io/3022 - // alpha: v1.24 - // beta: v1.25 - // - // Enable MinDomains in Pod Topology Spread. - MinDomainsInPodTopologySpread featuregate.Feature = "MinDomainsInPodTopologySpread" - - // owner: @danwinship - // kep: http://kep.k8s.io/3453 - // alpha: v1.26 - // - // Enables new performance-improving code in kube-proxy iptables mode - MinimizeIPTablesRestore featuregate.Feature = "MinimizeIPTablesRestore" - - // owner: @janosi @bridgetkromhout - // kep: https://kep.k8s.io/1435 - // alpha: v1.20 - // beta: v1.24 - // ga: v1.26 - // - // Enables the usage of different protocols in the same Service with type=LoadBalancer - MixedProtocolLBService featuregate.Feature = "MixedProtocolLBService" - - // owner: @sarveshr7 - // kep: https://kep.k8s.io/2593 - // alpha: v1.25 - // - // Enables the MultiCIDR Range allocator. - MultiCIDRRangeAllocator featuregate.Feature = "MultiCIDRRangeAllocator" - - // owner: @rikatz - // kep: https://kep.k8s.io/2079 - // alpha: v1.21 - // beta: v1.22 - // ga: v1.25 - // - // Enables the endPort field in NetworkPolicy to enable a Port Range behavior in Network Policies. - NetworkPolicyEndPort featuregate.Feature = "NetworkPolicyEndPort" - - // owner: @rikatz - // kep: https://kep.k8s.io/2943 - // alpha: v1.24 - // - // Enables NetworkPolicy status subresource - NetworkPolicyStatus featuregate.Feature = "NetworkPolicyStatus" - - // owner: @xing-yang @sonasingh46 - // kep: https://kep.k8s.io/2268 - // alpha: v1.24 - // beta: v1.26 - // - // Allow pods to failover to a different node in case of non graceful node shutdown - NodeOutOfServiceVolumeDetach featuregate.Feature = "NodeOutOfServiceVolumeDetach" - - // owner: @ehashman - // alpha: v1.22 - // - // Permits kubelet to run with swap enabled - NodeSwap featuregate.Feature = "NodeSwap" - - // owner: @mortent, @atiratree, @ravig - // kep: http://kep.k8s.io/3018 - // alpha: v1.26 - // - // Enables PDBUnhealthyPodEvictionPolicy for PodDisruptionBudgets - PDBUnhealthyPodEvictionPolicy featuregate.Feature = "PDBUnhealthyPodEvictionPolicy" - - // owner: @haircommander - // kep: https://kep.k8s.io/2364 - // alpha: v1.23 - // - // Configures the Kubelet to use the CRI to populate pod and container stats, instead of supplimenting with stats from cAdvisor. - // Requires the CRI implementation supports supplying the required stats. - PodAndContainerStatsFromCRI featuregate.Feature = "PodAndContainerStatsFromCRI" - - // owner: @ahg-g - // alpha: v1.21 - // beta: v1.22 - // - // Enables controlling pod ranking on replicaset scale-down. - PodDeletionCost featuregate.Feature = "PodDeletionCost" - - // owner: @mimowo - // kep: https://kep.k8s.io/3329 - // alpha: v1.25 - // beta: v1.26 - // - // Enables support for appending a dedicated pod condition indicating that - // the pod is being deleted due to a disruption. - PodDisruptionConditions featuregate.Feature = "PodDisruptionConditions" - - // owner: @ddebroy - // alpha: v1.25 - // - // Enables reporting of PodHasNetwork condition in pod status after pod - // sandbox creation and network configuration completes successfully - PodHasNetworkCondition featuregate.Feature = "PodHasNetworkCondition" - - // owner: @Huang-Wei - // kep: https://kep.k8s.io/3521 - // alpha: v1.26 - // - // Enable users to specify when a Pod is ready for scheduling. - PodSchedulingReadiness featuregate.Feature = "PodSchedulingReadiness" - - // owner: @liggitt, @tallclair, sig-auth - // alpha: v1.22 - // beta: v1.23 - // ga: v1.25 - // - // Enables the PodSecurity admission plugin - PodSecurity featuregate.Feature = "PodSecurity" - - // owner: @ehashman - // alpha: v1.21 - // beta: v1.22 - // - // Allows user to override pod-level terminationGracePeriod for probes - ProbeTerminationGracePeriod featuregate.Feature = "ProbeTerminationGracePeriod" - - // owner: @jessfraz - // alpha: v1.12 - // - // Enables control over ProcMountType for containers. - ProcMountType featuregate.Feature = "ProcMountType" - - // owner: @andrewsykim - // kep: https://kep.k8s.io/1669 - // alpha: v1.22 - // beta: v1.26 - // - // Enable kube-proxy to handle terminating ednpoints when externalTrafficPolicy=Local - ProxyTerminatingEndpoints featuregate.Feature = "ProxyTerminatingEndpoints" - - // owner: @sjenning - // alpha: v1.11 - // - // Allows resource reservations at the QoS level preventing pods at lower QoS levels from - // bursting into resources requested at higher QoS levels (memory only for now) - QOSReserved featuregate.Feature = "QOSReserved" - - // owner: @chrishenzie - // alpha: v1.22 - // - // Enables usage of the ReadWriteOncePod PersistentVolume access mode. - ReadWriteOncePod featuregate.Feature = "ReadWriteOncePod" - - // owner: @gnufied - // kep: https://kep.k8s.io/1790 - // alpha: v1.23 - // - // Allow users to recover from volume expansion failure - RecoverVolumeExpansionFailure featuregate.Feature = "RecoverVolumeExpansionFailure" - - // owner: @RomanBednar - // kep: https://kep.k8s.io/3333 - // alpha: v1.25 - // - // Allow assigning StorageClass to unbound PVCs retroactively - RetroactiveDefaultStorageClass featuregate.Feature = "RetroactiveDefaultStorageClass" - - // owner: @mikedanese - // alpha: v1.7 - // beta: v1.12 - // - // Gets a server certificate for the kubelet from the Certificate Signing - // Request API instead of generating one self signed and auto rotates the - // certificate as expiration approaches. - RotateKubeletServerCertificate featuregate.Feature = "RotateKubeletServerCertificate" - - // owner: @saschagrunert - // kep: https://kep.k8s.io/2413 - // alpha: v1.22 - // beta: v1.25 - // - // Enables the use of `RuntimeDefault` as the default seccomp profile for all workloads. - SeccompDefault featuregate.Feature = "SeccompDefault" - - // owner: @maplain @andrewsykim - // kep: https://kep.k8s.io/2086 - // alpha: v1.21 - // beta: v1.22 - // GA: v1.26 - // - // Enables node-local routing for Service internal traffic - ServiceInternalTrafficPolicy featuregate.Feature = "ServiceInternalTrafficPolicy" - - // owner: @aojea - // kep: https://kep.k8s.io/3070 - // alpha: v1.24 - // beta: v1.25 - // ga: v1.26 - // - // Subdivide the ClusterIP range for dynamic and static IP allocation. - ServiceIPStaticSubrange featuregate.Feature = "ServiceIPStaticSubrange" - - // owner: @derekwaynecarr - // alpha: v1.20 - // beta: v1.22 - // - // Enables kubelet support to size memory backed volumes - SizeMemoryBackedVolumes featuregate.Feature = "SizeMemoryBackedVolumes" - - // owner: @mattcary - // alpha: v1.22 - // - // Enables policies controlling deletion of PVCs created by a StatefulSet. - StatefulSetAutoDeletePVC featuregate.Feature = "StatefulSetAutoDeletePVC" - - // owner: @ravig - // kep: https://kep.k8s.io/2607 - // alpha: v1.22 - // beta: v1.23 - // GA: v1.25 - // StatefulSetMinReadySeconds allows minReadySeconds to be respected by StatefulSet controller - StatefulSetMinReadySeconds featuregate.Feature = "StatefulSetMinReadySeconds" - - // owner: @psch - // alpha: v1.26 - // - // Enables a StatefulSet to start from an arbitrary non zero ordinal - StatefulSetStartOrdinal featuregate.Feature = "StatefulSetStartOrdinal" - - // owner: @robscott - // kep: https://kep.k8s.io/2433 - // alpha: v1.21 - // beta: v1.23 - // - // Enables topology aware hints for EndpointSlices - TopologyAwareHints featuregate.Feature = "TopologyAwareHints" - - // owner: @lmdaly - // alpha: v1.16 - // beta: v1.18 - // - // Enable resource managers to make NUMA aligned decisions - TopologyManager featuregate.Feature = "TopologyManager" - - // owner: @PiotrProkop - // kep: https://kep.k8s.io/3545 - // alpha: v1.26 - // - // Allow fine-tuning of topology manager policies with alpha options. - // This feature gate: - // - will guard *a group* of topology manager options whose quality level is alpha. - // - will never graduate to beta or stable. - TopologyManagerPolicyAlphaOptions featuregate.Feature = "TopologyManagerPolicyAlphaOptions" - - // owner: @PiotrProkop - // kep: https://kep.k8s.io/3545 - // alpha: v1.26 - // - // Allow fine-tuning of topology manager policies with beta options. - // This feature gate: - // - will guard *a group* of topology manager options whose quality level is beta. - // - is thus *introduced* as beta - // - will never graduate to stable. - TopologyManagerPolicyBetaOptions featuregate.Feature = "TopologyManagerPolicyBetaOptions" - - // owner: @PiotrProkop - // kep: https://kep.k8s.io/3545 - // alpha: v1.26 - // - // Allow the usage of options to fine-tune the topology manager policies. - TopologyManagerPolicyOptions featuregate.Feature = "TopologyManagerPolicyOptions" - - // owner: @rata, @giuseppe - // kep: https://kep.k8s.io/127 - // alpha: v1.25 - // - // Enables user namespace support for stateless pods. - UserNamespacesStatelessPodsSupport featuregate.Feature = "UserNamespacesStatelessPodsSupport" - - // owner: @cofyc - // alpha: v1.21 - VolumeCapacityPriority featuregate.Feature = "VolumeCapacityPriority" - - // owner: @ksubrmnn - // alpha: v1.14 - // - // Allows kube-proxy to create DSR loadbalancers for Windows - WinDSR featuregate.Feature = "WinDSR" - - // owner: @ksubrmnn - // alpha: v1.14 - // beta: v1.20 - // - // Allows kube-proxy to run in Overlay mode for Windows - WinOverlay featuregate.Feature = "WinOverlay" - - // owner: @marosset - // kep: https://kep.k8s.io/3503 - // alpha: v1.26 - // - // Enables support for joining Windows containers to a hosts' network namespace. - WindowsHostNetwork featuregate.Feature = "WindowsHostNetwork" - - // owner: @marosset - // alpha: v1.22 - // beta: v1.23 - // GA: v1.26 - // - // Enables support for 'HostProcess' containers on Windows nodes. - WindowsHostProcessContainers featuregate.Feature = "WindowsHostProcessContainers" - - // owner: @kerthcet - // kep: https://kep.k8s.io/3094 - // alpha: v1.25 - // beta: v1.26 - // - // Allow users to specify whether to take nodeAffinity/nodeTaint into consideration when - // calculating pod topology spread skew. - NodeInclusionPolicyInPodTopologySpread featuregate.Feature = "NodeInclusionPolicyInPodTopologySpread" - - // owner: @jsafrane - // kep: https://kep.k8s.io/1710 - // alpha: v1.25 - // Speed up container startup by mounting volumes with the correct SELinux label - // instead of changing each file on the volumes recursively. - // Initial implementation focused on ReadWriteOncePod volumes. - SELinuxMountReadWriteOncePod featuregate.Feature = "SELinuxMountReadWriteOncePod" -) - -func init() { - runtime.Must(utilfeature.DefaultMutableFeatureGate.Add(defaultKubernetesFeatureGates)) -} - -// defaultKubernetesFeatureGates consists of all known Kubernetes-specific feature keys. -// To add a new feature, define a key for it above and add it here. The features will be -// available throughout Kubernetes binaries. -// -// Entries are separated from each other with blank lines to avoid sweeping gofmt changes -// when adding or removing one entry. -var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureSpec{ - CrossNamespaceVolumeDataSource: {Default: false, PreRelease: featuregate.Alpha}, - - AnyVolumeDataSource: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.24 - - APISelfSubjectReview: {Default: false, PreRelease: featuregate.Alpha}, - - AppArmor: {Default: true, PreRelease: featuregate.Beta}, - - CPUCFSQuotaPeriod: {Default: false, PreRelease: featuregate.Alpha}, - - CPUManager: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 - - CPUManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha}, - - CPUManagerPolicyBetaOptions: {Default: true, PreRelease: featuregate.Beta}, - - CPUManagerPolicyOptions: {Default: true, PreRelease: featuregate.Beta}, - - CSIInlineVolume: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - CSIMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - CSIMigrationAWS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - CSIMigrationAzureDisk: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26 - - CSIMigrationAzureFile: {Default: true, PreRelease: featuregate.GA}, // remove in 1.28 - - CSIMigrationGCE: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - CSIMigrationPortworx: {Default: false, PreRelease: featuregate.Beta}, // Off by default (requires Portworx CSI driver) - - CSIMigrationRBD: {Default: false, PreRelease: featuregate.Alpha}, // Off by default (requires RBD CSI driver) - - CSIMigrationvSphere: {Default: true, PreRelease: featuregate.GA}, // LockToDefault when CSI driver with GA support for Windows, raw block and xfs features are available - - CSINodeExpandSecret: {Default: false, PreRelease: featuregate.Alpha}, - - CSIStorageCapacity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26 - - CSIVolumeHealth: {Default: false, PreRelease: featuregate.Alpha}, - - ContainerCheckpoint: {Default: false, PreRelease: featuregate.Alpha}, - - ConsistentHTTPGetHandlers: {Default: true, PreRelease: featuregate.GA}, - - ControllerManagerLeaderMigration: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.26 - - CronJobTimeZone: {Default: true, PreRelease: featuregate.Beta}, - - DaemonSetUpdateSurge: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - DelegateFSGroupToCSIDriver: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - DevicePlugins: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // GA in 1.26 - - DisableAcceleratorUsageMetrics: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - DisableCloudProviders: {Default: false, PreRelease: featuregate.Alpha}, - - DisableKubeletCloudCredentialProviders: {Default: false, PreRelease: featuregate.Alpha}, - - DownwardAPIHugePages: {Default: true, PreRelease: featuregate.Beta}, // on by default in 1.22 - - EndpointSliceTerminatingCondition: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in v1.28 - - DynamicResourceAllocation: {Default: false, PreRelease: featuregate.Alpha}, - - EphemeralContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - EventedPLEG: {Default: false, PreRelease: featuregate.Alpha}, - - ExecProbeTimeout: {Default: true, PreRelease: featuregate.GA}, // lock to default and remove after v1.22 based on KEP #1972 update - - ExpandCSIVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 - - ExpandInUsePersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 - - ExpandPersistentVolumes: {Default: true, PreRelease: featuregate.GA}, // remove in 1.26 - - ExpandedDNSConfig: {Default: true, PreRelease: featuregate.Beta}, - - ExperimentalHostUserNamespaceDefaultingGate: {Default: false, PreRelease: featuregate.Beta}, - - GRPCContainerProbe: {Default: true, PreRelease: featuregate.Beta}, - - GracefulNodeShutdown: {Default: true, PreRelease: featuregate.Beta}, - - GracefulNodeShutdownBasedOnPodPriority: {Default: true, PreRelease: featuregate.Beta}, - - HPAContainerMetrics: {Default: false, PreRelease: featuregate.Alpha}, - - HonorPVReclaimPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - IdentifyPodOS: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - InTreePluginAWSUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginAzureDiskUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginAzureFileUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginGCEUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginOpenStackUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginPortworxUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginRBDUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - InTreePluginvSphereUnregister: {Default: false, PreRelease: featuregate.Alpha}, - - IPTablesOwnershipCleanup: {Default: false, PreRelease: featuregate.Alpha}, - - JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta}, - - JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.Beta}, - - JobReadyPods: {Default: true, PreRelease: featuregate.Beta}, - - JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - KubeletCredentialProviders: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - KubeletInUserNamespace: {Default: false, PreRelease: featuregate.Alpha}, - - KubeletPodResources: {Default: true, PreRelease: featuregate.Beta}, - - KubeletPodResourcesGetAllocatable: {Default: true, PreRelease: featuregate.Beta}, - - KubeletTracing: {Default: false, PreRelease: featuregate.Alpha}, - - LegacyServiceAccountTokenNoAutoGeneration: {Default: true, PreRelease: featuregate.GA}, - - LegacyServiceAccountTokenTracking: {Default: false, PreRelease: featuregate.Alpha}, - - LocalStorageCapacityIsolation: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - LocalStorageCapacityIsolationFSQuotaMonitoring: {Default: false, PreRelease: featuregate.Alpha}, - - LogarithmicScaleDown: {Default: true, PreRelease: featuregate.Beta}, - - MatchLabelKeysInPodTopologySpread: {Default: false, PreRelease: featuregate.Alpha}, - - MaxUnavailableStatefulSet: {Default: false, PreRelease: featuregate.Alpha}, - - MemoryManager: {Default: true, PreRelease: featuregate.Beta}, - - MemoryQoS: {Default: false, PreRelease: featuregate.Alpha}, - - MinDomainsInPodTopologySpread: {Default: false, PreRelease: featuregate.Beta}, - - MinimizeIPTablesRestore: {Default: false, PreRelease: featuregate.Alpha}, - - MixedProtocolLBService: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - MultiCIDRRangeAllocator: {Default: false, PreRelease: featuregate.Alpha}, - - NetworkPolicyEndPort: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - NetworkPolicyStatus: {Default: false, PreRelease: featuregate.Alpha}, - - NodeOutOfServiceVolumeDetach: {Default: true, PreRelease: featuregate.Beta}, - - NodeSwap: {Default: false, PreRelease: featuregate.Alpha}, - - PDBUnhealthyPodEvictionPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - PodAndContainerStatsFromCRI: {Default: false, PreRelease: featuregate.Alpha}, - - PodDeletionCost: {Default: true, PreRelease: featuregate.Beta}, - - PodDisruptionConditions: {Default: true, PreRelease: featuregate.Beta}, - - PodHasNetworkCondition: {Default: false, PreRelease: featuregate.Alpha}, - - PodSchedulingReadiness: {Default: false, PreRelease: featuregate.Alpha}, - - PodSecurity: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, - - ProbeTerminationGracePeriod: {Default: true, PreRelease: featuregate.Beta}, // Default to true in beta 1.25 - - ProcMountType: {Default: false, PreRelease: featuregate.Alpha}, - - ProxyTerminatingEndpoints: {Default: true, PreRelease: featuregate.Beta}, - - QOSReserved: {Default: false, PreRelease: featuregate.Alpha}, - - ReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha}, - - RecoverVolumeExpansionFailure: {Default: false, PreRelease: featuregate.Alpha}, - - RetroactiveDefaultStorageClass: {Default: true, PreRelease: featuregate.Beta}, - - RotateKubeletServerCertificate: {Default: true, PreRelease: featuregate.Beta}, - - SeccompDefault: {Default: true, PreRelease: featuregate.Beta}, - - ServiceIPStaticSubrange: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - ServiceInternalTrafficPolicy: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - SizeMemoryBackedVolumes: {Default: true, PreRelease: featuregate.Beta}, - - StatefulSetAutoDeletePVC: {Default: false, PreRelease: featuregate.Alpha}, - - StatefulSetMinReadySeconds: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.27 - - StatefulSetStartOrdinal: {Default: false, PreRelease: featuregate.Alpha}, - - TopologyAwareHints: {Default: true, PreRelease: featuregate.Beta}, - - TopologyManager: {Default: true, PreRelease: featuregate.Beta}, - - TopologyManagerPolicyAlphaOptions: {Default: false, PreRelease: featuregate.Alpha}, - - TopologyManagerPolicyBetaOptions: {Default: false, PreRelease: featuregate.Beta}, - - TopologyManagerPolicyOptions: {Default: false, PreRelease: featuregate.Alpha}, - - VolumeCapacityPriority: {Default: false, PreRelease: featuregate.Alpha}, - - UserNamespacesStatelessPodsSupport: {Default: false, PreRelease: featuregate.Alpha}, - - WinDSR: {Default: false, PreRelease: featuregate.Alpha}, - - WinOverlay: {Default: true, PreRelease: featuregate.Beta}, - - WindowsHostNetwork: {Default: true, PreRelease: featuregate.Alpha}, - - WindowsHostProcessContainers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - NodeInclusionPolicyInPodTopologySpread: {Default: true, PreRelease: featuregate.Beta}, - - SELinuxMountReadWriteOncePod: {Default: false, PreRelease: featuregate.Alpha}, - - // inherited features from generic apiserver, relisted here to get a conflict if it is changed - // unintentionally on either side: - - genericfeatures.AggregatedDiscoveryEndpoint: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.APIListChunking: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.APIPriorityAndFairness: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.APIResponseCompression: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.AdvancedAuditing: {Default: true, PreRelease: featuregate.GA}, - - genericfeatures.ValidatingAdmissionPolicy: {Default: false, PreRelease: featuregate.Alpha}, - - genericfeatures.CustomResourceValidationExpressions: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.DryRun: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28 - - genericfeatures.OpenAPIEnums: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.OpenAPIV3: {Default: true, PreRelease: featuregate.Beta}, - - genericfeatures.ServerSideApply: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29 - - genericfeatures.ServerSideFieldValidation: {Default: true, PreRelease: featuregate.Beta}, - - // features that enable backwards compatibility but are scheduled to be removed - // ... - HPAScaleToZero: {Default: false, PreRelease: featuregate.Alpha}, -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS deleted file mode 100644 index 3023c572e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/OWNERS +++ /dev/null @@ -1,11 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: - - api-approvers -reviewers: - - api-reviewers - - sig-scheduling-api-reviewers - - sig-scheduling-api-approvers -labels: - - kind/api-change - - sig/scheduling diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go deleted file mode 100644 index 896eaa83b..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=kubescheduler.config.k8s.io - -package config // import "k8s.io/kubernetes/pkg/scheduler/apis/config" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go deleted file mode 100644 index 457556e10..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name used in this package -const GroupName = "kubescheduler.config.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &KubeSchedulerConfiguration{}, - &DefaultPreemptionArgs{}, - &InterPodAffinityArgs{}, - &NodeResourcesFitArgs{}, - &PodTopologySpreadArgs{}, - &VolumeBindingArgs{}, - &NodeResourcesBalancedAllocationArgs{}, - &NodeAffinityArgs{}, - ) - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go deleted file mode 100644 index 375b49b56..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/scheme/scheme.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" - configv1 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" - configv1beta2 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" - configv1beta3 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3" -) - -var ( - // Scheme is the runtime.Scheme to which all kubescheduler api types are registered. - Scheme = runtime.NewScheme() - - // Codecs provides access to encoding and decoding for the scheme. - Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict) -) - -func init() { - AddToScheme(Scheme) -} - -// AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api. -func AddToScheme(scheme *runtime.Scheme) { - utilruntime.Must(config.AddToScheme(scheme)) - utilruntime.Must(configv1beta2.AddToScheme(scheme)) - utilruntime.Must(configv1beta3.AddToScheme(scheme)) - utilruntime.Must(configv1.AddToScheme(scheme)) - utilruntime.Must(scheme.SetVersionPriority( - configv1.SchemeGroupVersion, - configv1beta3.SchemeGroupVersion, - configv1beta2.SchemeGroupVersion, - )) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go deleted file mode 100644 index 8db4e35c9..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types.go +++ /dev/null @@ -1,338 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "math" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - componentbaseconfig "k8s.io/component-base/config" -) - -const ( - // SchedulerPolicyConfigMapKey defines the key of the element in the - // scheduler's policy ConfigMap that contains scheduler's policy config. - SchedulerPolicyConfigMapKey = "policy.cfg" - - // DefaultKubeSchedulerPort is the default port for the scheduler status server. - // May be overridden by a flag at startup. - DefaultKubeSchedulerPort = 10259 -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// KubeSchedulerConfiguration configures a scheduler -type KubeSchedulerConfiguration struct { - // TypeMeta contains the API version and kind. In kube-scheduler, after - // conversion from the versioned KubeSchedulerConfiguration type to this - // internal type, we set the APIVersion field to the scheme group/version of - // the type we converted from. This is done in cmd/kube-scheduler in two - // places: (1) when loading config from a file, (2) generating the default - // config. Based on the versioned type set in this field, we make decisions; - // for example (1) during validation to check for usage of removed plugins, - // (2) writing config to a file, (3) initialising the scheduler. - metav1.TypeMeta - - // Parallelism defines the amount of parallelism in algorithms for scheduling a Pods. Must be greater than 0. Defaults to 16 - Parallelism int32 - - // LeaderElection defines the configuration of leader election client. - LeaderElection componentbaseconfig.LeaderElectionConfiguration - - // ClientConnection specifies the kubeconfig file and client connection - // settings for the proxy server to use when communicating with the apiserver. - ClientConnection componentbaseconfig.ClientConnectionConfiguration - // HealthzBindAddress is the IP address and port for the health check server to serve on. - HealthzBindAddress string - // MetricsBindAddress is the IP address and port for the metrics server to serve on. - MetricsBindAddress string - - // DebuggingConfiguration holds configuration for Debugging related features - // TODO: We might wanna make this a substruct like Debugging componentbaseconfig.DebuggingConfiguration - componentbaseconfig.DebuggingConfiguration - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. It is overridden by profile level PercentageOfNodesToScore. - PercentageOfNodesToScore *int32 - - // PodInitialBackoffSeconds is the initial backoff for unschedulable pods. - // If specified, it must be greater than 0. If this value is null, the default value (1s) - // will be used. - PodInitialBackoffSeconds int64 - - // PodMaxBackoffSeconds is the max backoff for unschedulable pods. - // If specified, it must be greater than or equal to podInitialBackoffSeconds. If this value is null, - // the default value (10s) will be used. - PodMaxBackoffSeconds int64 - - // Profiles are scheduling profiles that kube-scheduler supports. Pods can - // choose to be scheduled under a particular profile by setting its associated - // scheduler name. Pods that don't specify any scheduler name are scheduled - // with the "default-scheduler" profile, if present here. - Profiles []KubeSchedulerProfile - - // Extenders are the list of scheduler extenders, each holding the values of how to communicate - // with the extender. These extenders are shared by all scheduler profiles. - Extenders []Extender -} - -// KubeSchedulerProfile is a scheduling profile. -type KubeSchedulerProfile struct { - // SchedulerName is the name of the scheduler associated to this profile. - // If SchedulerName matches with the pod's "spec.schedulerName", then the pod - // is scheduled with this profile. - SchedulerName string - - // PercentageOfNodesToScore is the percentage of all nodes that once found feasible - // for running a pod, the scheduler stops its search for more feasible nodes in - // the cluster. This helps improve scheduler's performance. Scheduler always tries to find - // at least "minFeasibleNodesToFind" feasible nodes no matter what the value of this flag is. - // Example: if the cluster size is 500 nodes and the value of this flag is 30, - // then scheduler stops finding further feasible nodes once it finds 150 feasible ones. - // When the value is 0, default percentage (5%--50% based on the size of the cluster) of the - // nodes will be scored. It will override global PercentageOfNodesToScore. If it is empty, - // global PercentageOfNodesToScore will be used. - PercentageOfNodesToScore *int32 - - // Plugins specify the set of plugins that should be enabled or disabled. - // Enabled plugins are the ones that should be enabled in addition to the - // default plugins. Disabled plugins are any of the default plugins that - // should be disabled. - // When no enabled or disabled plugin is specified for an extension point, - // default plugins for that extension point will be used if there is any. - // If a QueueSort plugin is specified, the same QueueSort Plugin and - // PluginConfig must be specified for all profiles. - Plugins *Plugins - - // PluginConfig is an optional set of custom plugin arguments for each plugin. - // Omitting config args for a plugin is equivalent to using the default config - // for that plugin. - PluginConfig []PluginConfig -} - -// Plugins include multiple extension points. When specified, the list of plugins for -// a particular extension point are the only ones enabled. If an extension point is -// omitted from the config, then the default set of plugins is used for that extension point. -// Enabled plugins are called in the order specified here, after default plugins. If they need to -// be invoked before default plugins, default plugins must be disabled and re-enabled here in desired order. -type Plugins struct { - // PreEnqueue is a list of plugins that should be invoked before adding pods to the scheduling queue. - PreEnqueue PluginSet - - // QueueSort is a list of plugins that should be invoked when sorting pods in the scheduling queue. - QueueSort PluginSet - - // PreFilter is a list of plugins that should be invoked at "PreFilter" extension point of the scheduling framework. - PreFilter PluginSet - - // Filter is a list of plugins that should be invoked when filtering out nodes that cannot run the Pod. - Filter PluginSet - - // PostFilter is a list of plugins that are invoked after filtering phase, but only when no feasible nodes were found for the pod. - PostFilter PluginSet - - // PreScore is a list of plugins that are invoked before scoring. - PreScore PluginSet - - // Score is a list of plugins that should be invoked when ranking nodes that have passed the filtering phase. - Score PluginSet - - // Reserve is a list of plugins invoked when reserving/unreserving resources - // after a node is assigned to run the pod. - Reserve PluginSet - - // Permit is a list of plugins that control binding of a Pod. These plugins can prevent or delay binding of a Pod. - Permit PluginSet - - // PreBind is a list of plugins that should be invoked before a pod is bound. - PreBind PluginSet - - // Bind is a list of plugins that should be invoked at "Bind" extension point of the scheduling framework. - // The scheduler call these plugins in order. Scheduler skips the rest of these plugins as soon as one returns success. - Bind PluginSet - - // PostBind is a list of plugins that should be invoked after a pod is successfully bound. - PostBind PluginSet - - // MultiPoint is a simplified config field for enabling plugins for all valid extension points - MultiPoint PluginSet -} - -// PluginSet specifies enabled and disabled plugins for an extension point. -// If an array is empty, missing, or nil, default plugins at that extension point will be used. -type PluginSet struct { - // Enabled specifies plugins that should be enabled in addition to default plugins. - // These are called after default plugins and in the same order specified here. - Enabled []Plugin - // Disabled specifies default plugins that should be disabled. - // When all default plugins need to be disabled, an array containing only one "*" should be provided. - Disabled []Plugin -} - -// Plugin specifies a plugin name and its weight when applicable. Weight is used only for Score plugins. -type Plugin struct { - // Name defines the name of plugin - Name string - // Weight defines the weight of plugin, only used for Score plugins. - Weight int32 -} - -// PluginConfig specifies arguments that should be passed to a plugin at the time of initialization. -// A plugin that is invoked at multiple extension points is initialized once. Args can have arbitrary structure. -// It is up to the plugin to process these Args. -type PluginConfig struct { - // Name defines the name of plugin being configured - Name string - // Args defines the arguments passed to the plugins at the time of initialization. Args can have arbitrary structure. - Args runtime.Object -} - -/* - * NOTE: The following variables and methods are intentionally left out of the staging mirror. - */ -const ( - // DefaultPercentageOfNodesToScore defines the percentage of nodes of all nodes - // that once found feasible, the scheduler stops looking for more nodes. - // A value of 0 means adaptive, meaning the scheduler figures out a proper default. - DefaultPercentageOfNodesToScore = 0 - - // MaxCustomPriorityScore is the max score UtilizationShapePoint expects. - MaxCustomPriorityScore int64 = 10 - - // MaxTotalScore is the maximum total score. - MaxTotalScore int64 = math.MaxInt64 - - // MaxWeight defines the max weight value allowed for custom PriorityPolicy - MaxWeight = MaxTotalScore / MaxCustomPriorityScore -) - -// Names returns the list of enabled plugin names. -func (p *Plugins) Names() []string { - if p == nil { - return nil - } - extensions := []PluginSet{ - p.PreEnqueue, - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.QueueSort, - } - n := sets.NewString() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return n.List() -} - -// Extender holds the parameters used to communicate with the extender. If a verb is unspecified/empty, -// it is assumed that the extender chose not to provide that extension. -type Extender struct { - // URLPrefix at which the extender is available - URLPrefix string - // Verb for the filter call, empty if not supported. This verb is appended to the URLPrefix when issuing the filter call to extender. - FilterVerb string - // Verb for the preempt call, empty if not supported. This verb is appended to the URLPrefix when issuing the preempt call to extender. - PreemptVerb string - // Verb for the prioritize call, empty if not supported. This verb is appended to the URLPrefix when issuing the prioritize call to extender. - PrioritizeVerb string - // The numeric multiplier for the node scores that the prioritize call generates. - // The weight should be a positive integer - Weight int64 - // Verb for the bind call, empty if not supported. This verb is appended to the URLPrefix when issuing the bind call to extender. - // If this method is implemented by the extender, it is the extender's responsibility to bind the pod to apiserver. Only one extender - // can implement this function. - BindVerb string - // EnableHTTPS specifies whether https should be used to communicate with the extender - EnableHTTPS bool - // TLSConfig specifies the transport layer security config - TLSConfig *ExtenderTLSConfig - // HTTPTimeout specifies the timeout duration for a call to the extender. Filter timeout fails the scheduling of the pod. Prioritize - // timeout is ignored, k8s/other extenders priorities are used to select the node. - HTTPTimeout metav1.Duration - // NodeCacheCapable specifies that the extender is capable of caching node information, - // so the scheduler should only send minimal information about the eligible nodes - // assuming that the extender already cached full details of all nodes in the cluster - NodeCacheCapable bool - // ManagedResources is a list of extended resources that are managed by - // this extender. - // - A pod will be sent to the extender on the Filter, Prioritize and Bind - // (if the extender is the binder) phases iff the pod requests at least - // one of the extended resources in this list. If empty or unspecified, - // all pods will be sent to this extender. - // - If IgnoredByScheduler is set to true for a resource, kube-scheduler - // will skip checking the resource in predicates. - // +optional - ManagedResources []ExtenderManagedResource - // Ignorable specifies if the extender is ignorable, i.e. scheduling should not - // fail when the extender returns an error or is not reachable. - Ignorable bool -} - -// ExtenderManagedResource describes the arguments of extended resources -// managed by an extender. -type ExtenderManagedResource struct { - // Name is the extended resource name. - Name string - // IgnoredByScheduler indicates whether kube-scheduler should ignore this - // resource when applying predicates. - IgnoredByScheduler bool -} - -// ExtenderTLSConfig contains settings to enable TLS with extender -type ExtenderTLSConfig struct { - // Server should be accessed without verifying the TLS certificate. For testing only. - Insecure bool - // ServerName is passed to the server for SNI and is used in the client to check server - // certificates against. If ServerName is empty, the hostname used to contact the - // server is used. - ServerName string - - // Server requires TLS client certificate authentication - CertFile string - // Server requires TLS client certificate authentication - KeyFile string - // Trusted root certificates for server - CAFile string - - // CertData holds PEM-encoded bytes (typically read from a client certificate file). - // CertData takes precedence over CertFile - CertData []byte - // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). - // KeyData takes precedence over KeyFile - KeyData []byte `datapolicy:"security-key"` - // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). - // CAData takes precedence over CAFile - CAData []byte -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go deleted file mode 100644 index 31bb8df02..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/types_pluginargs.go +++ /dev/null @@ -1,214 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DefaultPreemptionArgs holds arguments used to configure the -// DefaultPreemption plugin. -type DefaultPreemptionArgs struct { - metav1.TypeMeta - - // MinCandidateNodesPercentage is the minimum number of candidates to - // shortlist when dry running preemption as a percentage of number of nodes. - // Must be in the range [0, 100]. Defaults to 10% of the cluster size if - // unspecified. - MinCandidateNodesPercentage int32 - // MinCandidateNodesAbsolute is the absolute minimum number of candidates to - // shortlist. The likely number of candidates enumerated for dry running - // preemption is given by the formula: - // numCandidates = max(numNodes * minCandidateNodesPercentage, minCandidateNodesAbsolute) - // We say "likely" because there are other factors such as PDB violations - // that play a role in the number of candidates shortlisted. Must be at least - // 0 nodes. Defaults to 100 nodes if unspecified. - MinCandidateNodesAbsolute int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InterPodAffinityArgs holds arguments used to configure the InterPodAffinity plugin. -type InterPodAffinityArgs struct { - metav1.TypeMeta - - // HardPodAffinityWeight is the scoring weight for existing pods with a - // matching hard affinity to the incoming pod. - HardPodAffinityWeight int32 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesFitArgs holds arguments used to configure the NodeResourcesFit plugin. -type NodeResourcesFitArgs struct { - metav1.TypeMeta - - // IgnoredResources is the list of resources that NodeResources fit filter - // should ignore. - IgnoredResources []string - // IgnoredResourceGroups defines the list of resource groups that NodeResources fit filter should ignore. - // e.g. if group is ["example.com"], it will ignore all resource names that begin - // with "example.com", such as "example.com/aaa" and "example.com/bbb". - // A resource group name can't contain '/'. - IgnoredResourceGroups []string - - // ScoringStrategy selects the node resource scoring strategy. - ScoringStrategy *ScoringStrategy -} - -// PodTopologySpreadConstraintsDefaulting defines how to set default constraints -// for the PodTopologySpread plugin. -type PodTopologySpreadConstraintsDefaulting string - -const ( - // SystemDefaulting instructs to use the kubernetes defined default. - SystemDefaulting PodTopologySpreadConstraintsDefaulting = "System" - // ListDefaulting instructs to use the config provided default. - ListDefaulting PodTopologySpreadConstraintsDefaulting = "List" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTopologySpreadArgs holds arguments used to configure the PodTopologySpread plugin. -type PodTopologySpreadArgs struct { - metav1.TypeMeta - - // DefaultConstraints defines topology spread constraints to be applied to - // Pods that don't define any in `pod.spec.topologySpreadConstraints`. - // `.defaultConstraints[*].labelSelectors` must be empty, as they are - // deduced from the Pod's membership to Services, ReplicationControllers, - // ReplicaSets or StatefulSets. - // When not empty, .defaultingType must be "List". - DefaultConstraints []v1.TopologySpreadConstraint - - // DefaultingType determines how .defaultConstraints are deduced. Can be one - // of "System" or "List". - // - // - "System": Use kubernetes defined constraints that spread Pods among - // Nodes and Zones. - // - "List": Use constraints defined in .defaultConstraints. - // - // Defaults to "System". - // +optional - DefaultingType PodTopologySpreadConstraintsDefaulting -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesBalancedAllocationArgs holds arguments used to configure NodeResourcesBalancedAllocation plugin. -type NodeResourcesBalancedAllocationArgs struct { - metav1.TypeMeta - - // Resources to be considered when scoring. - // The default resource set includes "cpu" and "memory", only valid weight is 1. - Resources []ResourceSpec -} - -// UtilizationShapePoint represents a single point of a priority function shape. -type UtilizationShapePoint struct { - // Utilization (x axis). Valid values are 0 to 100. Fully utilized node maps to 100. - Utilization int32 - // Score assigned to a given utilization (y axis). Valid values are 0 to 10. - Score int32 -} - -// ResourceSpec represents single resource. -type ResourceSpec struct { - // Name of the resource. - Name string - // Weight of the resource. - Weight int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeBindingArgs holds arguments used to configure the VolumeBinding plugin. -type VolumeBindingArgs struct { - metav1.TypeMeta - - // BindTimeoutSeconds is the timeout in seconds in volume binding operation. - // Value must be non-negative integer. The value zero indicates no waiting. - // If this value is nil, the default value will be used. - BindTimeoutSeconds int64 - - // Shape specifies the points defining the score function shape, which is - // used to score nodes based on the utilization of statically provisioned - // PVs. The utilization is calculated by dividing the total requested - // storage of the pod by the total capacity of feasible PVs on each node. - // Each point contains utilization (ranges from 0 to 100) and its - // associated score (ranges from 0 to 10). You can turn the priority by - // specifying different scores for different utilization numbers. - // The default shape points are: - // 1) 0 for 0 utilization - // 2) 10 for 100 utilization - // All points must be sorted in increasing order by utilization. - // +featureGate=VolumeCapacityPriority - // +optional - Shape []UtilizationShapePoint -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeAffinityArgs holds arguments to configure the NodeAffinity plugin. -type NodeAffinityArgs struct { - metav1.TypeMeta - - // AddedAffinity is applied to all Pods additionally to the NodeAffinity - // specified in the PodSpec. That is, Nodes need to satisfy AddedAffinity - // AND .spec.NodeAffinity. AddedAffinity is empty by default (all Nodes - // match). - // When AddedAffinity is used, some Pods with affinity requirements that match - // a specific Node (such as Daemonset Pods) might remain unschedulable. - AddedAffinity *v1.NodeAffinity -} - -// ScoringStrategyType the type of scoring strategy used in NodeResourcesFit plugin. -type ScoringStrategyType string - -const ( - // LeastAllocated strategy prioritizes nodes with least allocated resources. - LeastAllocated ScoringStrategyType = "LeastAllocated" - // MostAllocated strategy prioritizes nodes with most allocated resources. - MostAllocated ScoringStrategyType = "MostAllocated" - // RequestedToCapacityRatio strategy allows specifying a custom shape function - // to score nodes based on the request to capacity ratio. - RequestedToCapacityRatio ScoringStrategyType = "RequestedToCapacityRatio" -) - -// ScoringStrategy define ScoringStrategyType for node resource plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType - - // Resources to consider when scoring. - // The default resource set includes "cpu" and "memory" with an equal weight. - // Allowed weights go from 1 to 100. - // Weight defaults to 1 if not specified or explicitly set to 0. - Resources []ResourceSpec - - // Arguments specific to RequestedToCapacityRatio strategy. - RequestedToCapacityRatio *RequestedToCapacityRatioParam -} - -// RequestedToCapacityRatioParam define RequestedToCapacityRatio parameters -type RequestedToCapacityRatioParam struct { - // Shape is a list of points defining the scoring function shape. - Shape []UtilizationShapePoint -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/conversion.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/conversion.go deleted file mode 100644 index 18115f93f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/conversion.go +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - v1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -var ( - // pluginArgConversionScheme is a scheme with internal and v1 registered, - // used for defaulting/converting typed PluginConfig Args. - // Access via getPluginArgConversionScheme() - pluginArgConversionScheme *runtime.Scheme - initPluginArgConversionScheme sync.Once -) - -func GetPluginArgConversionScheme() *runtime.Scheme { - initPluginArgConversionScheme.Do(func() { - // set up the scheme used for plugin arg conversion - pluginArgConversionScheme = runtime.NewScheme() - utilruntime.Must(AddToScheme(pluginArgConversionScheme)) - utilruntime.Must(config.AddToScheme(pluginArgConversionScheme)) - }) - return pluginArgConversionScheme -} - -func Convert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToInternalPluginConfigArgs(out) -} - -// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal -// types using a scheme, after applying defaults. -func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - prof := &out.Profiles[i] - for j := range prof.PluginConfig { - args := prof.PluginConfig[j].Args - if args == nil { - continue - } - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion) - if err != nil { - return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err) - } - prof.PluginConfig[j].Args = internalArgs - } - } - return nil -} - -func Convert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToExternalPluginConfigArgs(out) -} - -// convertToExternalPluginConfigArgs converts PluginConfig#Args into -// external (versioned) types using a scheme. -func convertToExternalPluginConfigArgs(out *v1.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - for j := range out.Profiles[i].PluginConfig { - args := out.Profiles[i].PluginConfig[j].Args - if args.Object == nil { - continue - } - if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown { - continue - } - externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion) - if err != nil { - return err - } - out.Profiles[i].PluginConfig[j].Args.Object = externalArgs - } - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go deleted file mode 100644 index 3fc8c1bdf..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/default_plugins.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - v1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" -) - -// getDefaultPlugins returns the default set of plugins. -func getDefaultPlugins() *v1.Plugins { - plugins := &v1.Plugins{ - MultiPoint: v1.PluginSet{ - Enabled: []v1.Plugin{ - {Name: names.PrioritySort}, - {Name: names.NodeUnschedulable}, - {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, - {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, - {Name: names.VolumeRestrictions}, - {Name: names.EBSLimits}, - {Name: names.GCEPDLimits}, - {Name: names.NodeVolumeLimits}, - {Name: names.AzureDiskLimits}, - {Name: names.VolumeBinding}, - {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, - {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, - {Name: names.DefaultBinder}, - }, - }, - } - applyFeatureGates(plugins) - - return plugins -} - -func applyFeatureGates(config *v1.Plugins) { - if utilfeature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness) { - config.MultiPoint.Enabled = append(config.MultiPoint.Enabled, v1.Plugin{Name: names.SchedulingGates}) - } - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicResourceAllocation) { - // This plugin should come before DefaultPreemption because if - // there is a problem with a Pod and PostFilter gets called to - // resolve the problem, it is better to first deallocate an - // idle ResourceClaim than it is to evict some Pod that might - // be doing useful work. - for i := range config.MultiPoint.Enabled { - if config.MultiPoint.Enabled[i].Name == names.DefaultPreemption { - extended := make([]v1.Plugin, 0, len(config.MultiPoint.Enabled)+1) - extended = append(extended, config.MultiPoint.Enabled[:i]...) - extended = append(extended, v1.Plugin{Name: names.DynamicResources}) - extended = append(extended, config.MultiPoint.Enabled[i:]...) - config.MultiPoint.Enabled = extended - break - } - } - } -} - -// mergePlugins merges the custom set into the given default one, handling disabled sets. -func mergePlugins(defaultPlugins, customPlugins *v1.Plugins) *v1.Plugins { - if customPlugins == nil { - return defaultPlugins - } - - defaultPlugins.MultiPoint = mergePluginSet(defaultPlugins.MultiPoint, customPlugins.MultiPoint) - defaultPlugins.PreEnqueue = mergePluginSet(defaultPlugins.PreEnqueue, customPlugins.PreEnqueue) - defaultPlugins.QueueSort = mergePluginSet(defaultPlugins.QueueSort, customPlugins.QueueSort) - defaultPlugins.PreFilter = mergePluginSet(defaultPlugins.PreFilter, customPlugins.PreFilter) - defaultPlugins.Filter = mergePluginSet(defaultPlugins.Filter, customPlugins.Filter) - defaultPlugins.PostFilter = mergePluginSet(defaultPlugins.PostFilter, customPlugins.PostFilter) - defaultPlugins.PreScore = mergePluginSet(defaultPlugins.PreScore, customPlugins.PreScore) - defaultPlugins.Score = mergePluginSet(defaultPlugins.Score, customPlugins.Score) - defaultPlugins.Reserve = mergePluginSet(defaultPlugins.Reserve, customPlugins.Reserve) - defaultPlugins.Permit = mergePluginSet(defaultPlugins.Permit, customPlugins.Permit) - defaultPlugins.PreBind = mergePluginSet(defaultPlugins.PreBind, customPlugins.PreBind) - defaultPlugins.Bind = mergePluginSet(defaultPlugins.Bind, customPlugins.Bind) - defaultPlugins.PostBind = mergePluginSet(defaultPlugins.PostBind, customPlugins.PostBind) - return defaultPlugins -} - -type pluginIndex struct { - index int - plugin v1.Plugin -} - -func mergePluginSet(defaultPluginSet, customPluginSet v1.PluginSet) v1.PluginSet { - disabledPlugins := sets.NewString() - enabledCustomPlugins := make(map[string]pluginIndex) - // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. - replacedPluginIndex := sets.NewInt() - var disabled []v1.Plugin - for _, disabledPlugin := range customPluginSet.Disabled { - // if the user is manually disabling any (or all, with "*") default plugins for an extension point, - // we need to track that so that the MultiPoint extension logic in the framework can know to skip - // inserting unspecified default plugins to this point. - disabled = append(disabled, v1.Plugin{Name: disabledPlugin.Name}) - disabledPlugins.Insert(disabledPlugin.Name) - } - - // With MultiPoint, we may now have some disabledPlugins in the default registry - // For example, we enable PluginX with Filter+Score through MultiPoint but disable its Score plugin by default. - for _, disabledPlugin := range defaultPluginSet.Disabled { - disabled = append(disabled, v1.Plugin{Name: disabledPlugin.Name}) - disabledPlugins.Insert(disabledPlugin.Name) - } - - for index, enabledPlugin := range customPluginSet.Enabled { - enabledCustomPlugins[enabledPlugin.Name] = pluginIndex{index, enabledPlugin} - } - var enabledPlugins []v1.Plugin - if !disabledPlugins.Has("*") { - for _, defaultEnabledPlugin := range defaultPluginSet.Enabled { - if disabledPlugins.Has(defaultEnabledPlugin.Name) { - continue - } - // The default plugin is explicitly re-configured, update the default plugin accordingly. - if customPlugin, ok := enabledCustomPlugins[defaultEnabledPlugin.Name]; ok { - klog.InfoS("Default plugin is explicitly re-configured; overriding", "plugin", defaultEnabledPlugin.Name) - // Update the default plugin in place to preserve order. - defaultEnabledPlugin = customPlugin.plugin - replacedPluginIndex.Insert(customPlugin.index) - } - enabledPlugins = append(enabledPlugins, defaultEnabledPlugin) - } - } - - // Append all the custom plugins which haven't replaced any default plugins. - // Note: duplicated custom plugins will still be appended here. - // If so, the instantiation of scheduler framework will detect it and abort. - for index, plugin := range customPluginSet.Enabled { - if !replacedPluginIndex.Has(index) { - enabledPlugins = append(enabledPlugins, plugin) - } - } - return v1.PluginSet{Enabled: enabledPlugins, Disabled: disabled} -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go deleted file mode 100644 index 8e86712fa..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/defaults.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/util/feature" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - configv1 "k8s.io/kube-scheduler/config/v1" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/pointer" -) - -var defaultResourceSpec = []configv1.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, -} - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func pluginsNames(p *configv1.Plugins) []string { - if p == nil { - return nil - } - extensions := []configv1.PluginSet{ - p.MultiPoint, - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.PreEnqueue, - p.QueueSort, - } - n := sets.NewString() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return n.List() -} - -func setDefaults_KubeSchedulerProfile(prof *configv1.KubeSchedulerProfile) { - // Set default plugins. - prof.Plugins = mergePlugins(getDefaultPlugins(), prof.Plugins) - // Set default plugin configs. - scheme := GetPluginArgConversionScheme() - existingConfigs := sets.NewString() - for j := range prof.PluginConfig { - existingConfigs.Insert(prof.PluginConfig[j].Name) - args := prof.PluginConfig[j].Args.Object - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - scheme.Default(args) - } - - // Append default configs for plugins that didn't have one explicitly set. - for _, name := range pluginsNames(prof.Plugins) { - if existingConfigs.Has(name) { - continue - } - gvk := configv1.SchemeGroupVersion.WithKind(name + "Args") - args, err := scheme.New(gvk) - if err != nil { - // This plugin is out-of-tree or doesn't require configuration. - continue - } - scheme.Default(args) - args.GetObjectKind().SetGroupVersionKind(gvk) - prof.PluginConfig = append(prof.PluginConfig, configv1.PluginConfig{ - Name: name, - Args: runtime.RawExtension{Object: args}, - }) - } -} - -// SetDefaults_KubeSchedulerConfiguration sets additional defaults -func SetDefaults_KubeSchedulerConfiguration(obj *configv1.KubeSchedulerConfiguration) { - if obj.Parallelism == nil { - obj.Parallelism = pointer.Int32(16) - } - - if len(obj.Profiles) == 0 { - obj.Profiles = append(obj.Profiles, configv1.KubeSchedulerProfile{}) - } - // Only apply a default scheduler name when there is a single profile. - // Validation will ensure that every profile has a non-empty unique name. - if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = pointer.String(v1.DefaultSchedulerName) - } - - // Add the default set of plugins and apply the configuration. - for i := range obj.Profiles { - prof := &obj.Profiles[i] - setDefaults_KubeSchedulerProfile(prof) - } - - if obj.PercentageOfNodesToScore == nil { - obj.PercentageOfNodesToScore = pointer.Int32(config.DefaultPercentageOfNodesToScore) - } - - if len(obj.LeaderElection.ResourceLock) == 0 { - // Use lease-based leader election to reduce cost. - // We migrated for EndpointsLease lock in 1.17 and starting in 1.20 we - // migrated to Lease lock. - obj.LeaderElection.ResourceLock = "leases" - } - if len(obj.LeaderElection.ResourceNamespace) == 0 { - obj.LeaderElection.ResourceNamespace = configv1.SchedulerDefaultLockObjectNamespace - } - if len(obj.LeaderElection.ResourceName) == 0 { - obj.LeaderElection.ResourceName = configv1.SchedulerDefaultLockObjectName - } - - if len(obj.ClientConnection.ContentType) == 0 { - obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" - } - // Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings. - if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 50.0 - } - if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 100 - } - - // Use the default LeaderElectionConfiguration options - componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) - - if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = pointer.Int64(1) - } - - if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = pointer.Int64(10) - } - - // Enable profiling by default in the scheduler - if obj.EnableProfiling == nil { - obj.EnableProfiling = pointer.Bool(true) - } - - // Enable contention profiling by default if profiling is enabled - if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = pointer.Bool(true) - } -} - -func SetDefaults_DefaultPreemptionArgs(obj *configv1.DefaultPreemptionArgs) { - if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = pointer.Int32(10) - } - if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = pointer.Int32(100) - } -} - -func SetDefaults_InterPodAffinityArgs(obj *configv1.InterPodAffinityArgs) { - if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = pointer.Int32(1) - } -} - -func SetDefaults_VolumeBindingArgs(obj *configv1.VolumeBindingArgs) { - if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = pointer.Int64(600) - } - if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { - obj.Shape = []configv1.UtilizationShapePoint{ - { - Utilization: 0, - Score: 0, - }, - { - Utilization: 100, - Score: int32(config.MaxCustomPriorityScore), - }, - } - } -} - -func SetDefaults_NodeResourcesBalancedAllocationArgs(obj *configv1.NodeResourcesBalancedAllocationArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultResourceSpec - return - } - // If the weight is not set or it is explicitly set to 0, then apply the default weight(1) instead. - for i := range obj.Resources { - if obj.Resources[i].Weight == 0 { - obj.Resources[i].Weight = 1 - } - } -} - -func SetDefaults_PodTopologySpreadArgs(obj *configv1.PodTopologySpreadArgs) { - if obj.DefaultingType == "" { - obj.DefaultingType = configv1.SystemDefaulting - } -} - -func SetDefaults_NodeResourcesFitArgs(obj *configv1.NodeResourcesFitArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &configv1.ScoringStrategy{ - Type: configv1.ScoringStrategyType(config.LeastAllocated), - Resources: defaultResourceSpec, - } - } - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go deleted file mode 100644 index 7fa215827..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config -// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/kube-scheduler/config/v1 -// +groupName=kubescheduler.config.k8s.io - -package v1 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go deleted file mode 100644 index 9a32736c8..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/register.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - v1 "k8s.io/kube-scheduler/config/v1" -) - -// GroupName is the group name used in this package -const GroupName = v1.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = v1.SchemeGroupVersion - -var ( - // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, - // defaulting and conversion init funcs are registered as well. - localSchemeBuilder = &v1.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go deleted file mode 100644 index d9dbc31ad..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.conversion.go +++ /dev/null @@ -1,944 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/component-base/config/v1alpha1" - v1 "k8s.io/kube-scheduler/config/v1" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1.DefaultPreemptionArgs)(nil), (*config.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(a.(*v1.DefaultPreemptionArgs), b.(*config.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DefaultPreemptionArgs)(nil), (*v1.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(a.(*config.DefaultPreemptionArgs), b.(*v1.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Extender_To_config_Extender(a.(*v1.Extender), b.(*config.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*v1.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Extender_To_v1_Extender(a.(*config.Extender), b.(*v1.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*v1.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*v1.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*v1.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*v1.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*v1.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*v1.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.InterPodAffinityArgs)(nil), (*config.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(a.(*v1.InterPodAffinityArgs), b.(*config.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.InterPodAffinityArgs)(nil), (*v1.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(a.(*config.InterPodAffinityArgs), b.(*v1.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.KubeSchedulerProfile)(nil), (*config.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(a.(*v1.KubeSchedulerProfile), b.(*config.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*v1.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeAffinityArgs)(nil), (*v1.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(a.(*config.NodeAffinityArgs), b.(*v1.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NodeResourcesBalancedAllocationArgs)(nil), (*config.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(a.(*v1.NodeResourcesBalancedAllocationArgs), b.(*config.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesBalancedAllocationArgs)(nil), (*v1.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(a.(*config.NodeResourcesBalancedAllocationArgs), b.(*v1.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.NodeResourcesFitArgs)(nil), (*config.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(a.(*v1.NodeResourcesFitArgs), b.(*config.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesFitArgs)(nil), (*v1.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(a.(*config.NodeResourcesFitArgs), b.(*v1.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Plugin_To_config_Plugin(a.(*v1.Plugin), b.(*config.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*v1.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugin_To_v1_Plugin(a.(*config.Plugin), b.(*v1.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PluginConfig_To_config_PluginConfig(a.(*v1.PluginConfig), b.(*config.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*v1.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginConfig_To_v1_PluginConfig(a.(*config.PluginConfig), b.(*v1.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PluginSet_To_config_PluginSet(a.(*v1.PluginSet), b.(*config.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*v1.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginSet_To_v1_PluginSet(a.(*config.PluginSet), b.(*v1.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Plugins_To_config_Plugins(a.(*v1.Plugins), b.(*config.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugins)(nil), (*v1.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugins_To_v1_Plugins(a.(*config.Plugins), b.(*v1.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.PodTopologySpreadArgs)(nil), (*config.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(a.(*v1.PodTopologySpreadArgs), b.(*config.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PodTopologySpreadArgs)(nil), (*v1.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(a.(*config.PodTopologySpreadArgs), b.(*v1.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.RequestedToCapacityRatioParam)(nil), (*config.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(a.(*v1.RequestedToCapacityRatioParam), b.(*config.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioParam)(nil), (*v1.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(a.(*config.RequestedToCapacityRatioParam), b.(*v1.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ResourceSpec_To_config_ResourceSpec(a.(*v1.ResourceSpec), b.(*config.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*v1.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ResourceSpec_To_v1_ResourceSpec(a.(*config.ResourceSpec), b.(*v1.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ScoringStrategy_To_config_ScoringStrategy(a.(*v1.ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*v1.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1_ScoringStrategy(a.(*config.ScoringStrategy), b.(*v1.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*v1.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*v1.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*v1.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1.VolumeBindingArgs)(nil), (*config.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(a.(*v1.VolumeBindingArgs), b.(*config.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.VolumeBindingArgs)(nil), (*v1.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(a.(*config.VolumeBindingArgs), b.(*v1.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*v1.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*v1.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := metav1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_v1_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1.DefaultPreemptionArgs, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := metav1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_config_DefaultPreemptionArgs_To_v1_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_v1_Extender_To_config_Extender(in *v1.Extender, out *config.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_v1_Extender_To_config_Extender is an autogenerated conversion function. -func Convert_v1_Extender_To_config_Extender(in *v1.Extender, out *config.Extender, s conversion.Scope) error { - return autoConvert_v1_Extender_To_config_Extender(in, out, s) -} - -func autoConvert_config_Extender_To_v1_Extender(in *config.Extender, out *v1.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*v1.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]v1.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_config_Extender_To_v1_Extender is an autogenerated conversion function. -func Convert_config_Extender_To_v1_Extender(in *config.Extender, out *v1.Extender, s conversion.Scope) error { - return autoConvert_config_Extender_To_v1_Extender(in, out, s) -} - -func autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function. -func Convert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_v1_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s) -} - -func autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource is an autogenerated conversion function. -func Convert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_config_ExtenderManagedResource_To_v1_ExtenderManagedResource(in, out, s) -} - -func autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_v1_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_config_ExtenderTLSConfig_To_v1_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - return nil -} - -// Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_v1_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1.InterPodAffinityArgs, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - return nil -} - -// Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_config_InterPodAffinityArgs_To_v1_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_v1_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int32_To_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := metav1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := metav1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]config.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_config_KubeSchedulerConfiguration_To_v1_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := metav1.Convert_int32_To_Pointer_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - // WARNING: in.HealthzBindAddress requires manual conversion: does not exist in peer-type - // WARNING: in.MetricsBindAddress requires manual conversion: does not exist in peer-type - if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := metav1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := metav1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]v1.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]v1.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - if err := metav1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(config.Plugins) - if err := Convert_v1_Plugins_To_config_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]config.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_v1_PluginConfig_To_config_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_v1_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1.KubeSchedulerProfile, s conversion.Scope) error { - if err := metav1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(v1.Plugins) - if err := Convert_config_Plugins_To_v1_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]v1.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_config_PluginConfig_To_v1_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs is an autogenerated conversion function. -func Convert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_v1_NodeAffinityArgs_To_config_NodeAffinityArgs(in, out, s) -} - -func autoConvert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs is an autogenerated conversion function. -func Convert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_config_NodeAffinityArgs_To_v1_NodeAffinityArgs(in, out, s) -} - -func autoConvert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_v1_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]v1.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_v1_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*v1.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesFitArgs_To_v1_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_v1_Plugin_To_config_Plugin(in *v1.Plugin, out *config.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := metav1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Plugin_To_config_Plugin is an autogenerated conversion function. -func Convert_v1_Plugin_To_config_Plugin(in *v1.Plugin, out *config.Plugin, s conversion.Scope) error { - return autoConvert_v1_Plugin_To_config_Plugin(in, out, s) -} - -func autoConvert_config_Plugin_To_v1_Plugin(in *config.Plugin, out *v1.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := metav1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugin_To_v1_Plugin is an autogenerated conversion function. -func Convert_config_Plugin_To_v1_Plugin(in *config.Plugin, out *v1.Plugin, s conversion.Scope) error { - return autoConvert_config_Plugin_To_v1_Plugin(in, out, s) -} - -func autoConvert_v1_PluginConfig_To_config_PluginConfig(in *v1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_v1_PluginConfig_To_config_PluginConfig is an autogenerated conversion function. -func Convert_v1_PluginConfig_To_config_PluginConfig(in *v1.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - return autoConvert_v1_PluginConfig_To_config_PluginConfig(in, out, s) -} - -func autoConvert_config_PluginConfig_To_v1_PluginConfig(in *config.PluginConfig, out *v1.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_config_PluginConfig_To_v1_PluginConfig is an autogenerated conversion function. -func Convert_config_PluginConfig_To_v1_PluginConfig(in *config.PluginConfig, out *v1.PluginConfig, s conversion.Scope) error { - return autoConvert_config_PluginConfig_To_v1_PluginConfig(in, out, s) -} - -func autoConvert_v1_PluginSet_To_config_PluginSet(in *v1.PluginSet, out *config.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_v1_PluginSet_To_config_PluginSet is an autogenerated conversion function. -func Convert_v1_PluginSet_To_config_PluginSet(in *v1.PluginSet, out *config.PluginSet, s conversion.Scope) error { - return autoConvert_v1_PluginSet_To_config_PluginSet(in, out, s) -} - -func autoConvert_config_PluginSet_To_v1_PluginSet(in *config.PluginSet, out *v1.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]v1.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]v1.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_config_PluginSet_To_v1_PluginSet is an autogenerated conversion function. -func Convert_config_PluginSet_To_v1_PluginSet(in *config.PluginSet, out *v1.PluginSet, s conversion.Scope) error { - return autoConvert_config_PluginSet_To_v1_PluginSet(in, out, s) -} - -func autoConvert_v1_Plugins_To_config_Plugins(in *v1.Plugins, out *config.Plugins, s conversion.Scope) error { - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_v1_PluginSet_To_config_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1_Plugins_To_config_Plugins is an autogenerated conversion function. -func Convert_v1_Plugins_To_config_Plugins(in *v1.Plugins, out *config.Plugins, s conversion.Scope) error { - return autoConvert_v1_Plugins_To_config_Plugins(in, out, s) -} - -func autoConvert_config_Plugins_To_v1_Plugins(in *config.Plugins, out *v1.Plugins, s conversion.Scope) error { - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugins_To_v1_Plugins is an autogenerated conversion function. -func Convert_config_Plugins_To_v1_Plugins(in *config.Plugins, out *v1.Plugins, s conversion.Scope) error { - return autoConvert_config_Plugins_To_v1_Plugins(in, out, s) -} - -func autoConvert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = config.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_v1_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = v1.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_config_PodTopologySpreadArgs_To_v1_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_v1_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]v1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_config_RequestedToCapacityRatioParam_To_v1_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in *v1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_v1_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function. -func Convert_v1_ResourceSpec_To_config_ResourceSpec(in *v1.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - return autoConvert_v1_ResourceSpec_To_config_ResourceSpec(in, out, s) -} - -func autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *v1.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_config_ResourceSpec_To_v1_ResourceSpec is an autogenerated conversion function. -func Convert_config_ResourceSpec_To_v1_ResourceSpec(in *config.ResourceSpec, out *v1.ResourceSpec, s conversion.Scope) error { - return autoConvert_config_ResourceSpec_To_v1_ResourceSpec(in, out, s) -} - -func autoConvert_v1_ScoringStrategy_To_config_ScoringStrategy(in *v1.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*config.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_v1_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1_ScoringStrategy_To_config_ScoringStrategy(in *v1.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1_ScoringStrategy(in *config.ScoringStrategy, out *v1.ScoringStrategy, s conversion.Scope) error { - out.Type = v1.ScoringStrategyType(in.Type) - out.Resources = *(*[]v1.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*v1.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1_ScoringStrategy(in *config.ScoringStrategy, out *v1.ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1_ScoringStrategy(in, out, s) -} - -func autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function. -func Convert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_v1_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s) -} - -func autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint is an autogenerated conversion function. -func Convert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_config_UtilizationShapePoint_To_v1_UtilizationShapePoint(in, out, s) -} - -func autoConvert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - if err := metav1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs is an autogenerated conversion function. -func Convert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_v1_VolumeBindingArgs_To_config_VolumeBindingArgs(in, out, s) -} - -func autoConvert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1.VolumeBindingArgs, s conversion.Scope) error { - if err := metav1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]v1.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs is an autogenerated conversion function. -func Convert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_config_VolumeBindingArgs_To_v1_VolumeBindingArgs(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go deleted file mode 100644 index 87181b430..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go deleted file mode 100644 index ac93d735c..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1/zz_generated.defaults.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1 "k8s.io/kube-scheduler/config/v1" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1.DefaultPreemptionArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultPreemptionArgs(obj.(*v1.DefaultPreemptionArgs)) }) - scheme.AddTypeDefaultingFunc(&v1.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*v1.InterPodAffinityArgs)) }) - scheme.AddTypeDefaultingFunc(&v1.KubeSchedulerConfiguration{}, func(obj interface{}) { - SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1.KubeSchedulerConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1.NodeResourcesBalancedAllocationArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesBalancedAllocationArgs(obj.(*v1.NodeResourcesBalancedAllocationArgs)) - }) - scheme.AddTypeDefaultingFunc(&v1.NodeResourcesFitArgs{}, func(obj interface{}) { SetObjectDefaults_NodeResourcesFitArgs(obj.(*v1.NodeResourcesFitArgs)) }) - scheme.AddTypeDefaultingFunc(&v1.PodTopologySpreadArgs{}, func(obj interface{}) { SetObjectDefaults_PodTopologySpreadArgs(obj.(*v1.PodTopologySpreadArgs)) }) - scheme.AddTypeDefaultingFunc(&v1.VolumeBindingArgs{}, func(obj interface{}) { SetObjectDefaults_VolumeBindingArgs(obj.(*v1.VolumeBindingArgs)) }) - return nil -} - -func SetObjectDefaults_DefaultPreemptionArgs(in *v1.DefaultPreemptionArgs) { - SetDefaults_DefaultPreemptionArgs(in) -} - -func SetObjectDefaults_InterPodAffinityArgs(in *v1.InterPodAffinityArgs) { - SetDefaults_InterPodAffinityArgs(in) -} - -func SetObjectDefaults_KubeSchedulerConfiguration(in *v1.KubeSchedulerConfiguration) { - SetDefaults_KubeSchedulerConfiguration(in) -} - -func SetObjectDefaults_NodeResourcesBalancedAllocationArgs(in *v1.NodeResourcesBalancedAllocationArgs) { - SetDefaults_NodeResourcesBalancedAllocationArgs(in) -} - -func SetObjectDefaults_NodeResourcesFitArgs(in *v1.NodeResourcesFitArgs) { - SetDefaults_NodeResourcesFitArgs(in) -} - -func SetObjectDefaults_PodTopologySpreadArgs(in *v1.PodTopologySpreadArgs) { - SetDefaults_PodTopologySpreadArgs(in) -} - -func SetObjectDefaults_VolumeBindingArgs(in *v1.VolumeBindingArgs) { - SetDefaults_VolumeBindingArgs(in) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/conversion.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/conversion.go deleted file mode 100644 index c0d89d75e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/conversion.go +++ /dev/null @@ -1,117 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kube-scheduler/config/v1beta2" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -var ( - // pluginArgConversionScheme is a scheme with internal and v1beta2 registered, - // used for defaulting/converting typed PluginConfig Args. - // Access via getPluginArgConversionScheme() - pluginArgConversionScheme *runtime.Scheme - initPluginArgConversionScheme sync.Once -) - -func GetPluginArgConversionScheme() *runtime.Scheme { - initPluginArgConversionScheme.Do(func() { - // set up the scheme used for plugin arg conversion - pluginArgConversionScheme = runtime.NewScheme() - utilruntime.Must(AddToScheme(pluginArgConversionScheme)) - utilruntime.Must(config.AddToScheme(pluginArgConversionScheme)) - }) - return pluginArgConversionScheme -} - -func Convert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1beta2.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToInternalPluginConfigArgs(out) -} - -// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal -// types using a scheme, after applying defaults. -func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - prof := &out.Profiles[i] - for j := range prof.PluginConfig { - args := prof.PluginConfig[j].Args - if args == nil { - continue - } - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion) - if err != nil { - return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err) - } - prof.PluginConfig[j].Args = internalArgs - } - } - return nil -} - -func Convert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1beta2.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToExternalPluginConfigArgs(out) -} - -// convertToExternalPluginConfigArgs converts PluginConfig#Args into -// external (versioned) types using a scheme. -func convertToExternalPluginConfigArgs(out *v1beta2.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - for j := range out.Profiles[i].PluginConfig { - args := out.Profiles[i].PluginConfig[j].Args - if args.Object == nil { - continue - } - if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown { - continue - } - externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion) - if err != nil { - return err - } - out.Profiles[i].PluginConfig[j].Args.Object = externalArgs - } - } - return nil -} - -// Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile uses auto coversion by -// ignoring per profile PercentageOfNodesToScore. -func Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta2.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in, out, s) -} - -func Convert_config_Plugins_To_v1beta2_Plugins(in *config.Plugins, out *v1beta2.Plugins, s conversion.Scope) error { - return autoConvert_config_Plugins_To_v1beta2_Plugins(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go deleted file mode 100644 index 37341a423..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/default_plugins.go +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - "k8s.io/kube-scheduler/config/v1beta2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" -) - -// getDefaultPlugins returns the default set of plugins. -func getDefaultPlugins() *v1beta2.Plugins { - plugins := &v1beta2.Plugins{ - QueueSort: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.PrioritySort}, - }, - }, - PreFilter: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.NodeResourcesFit}, - {Name: names.NodePorts}, - {Name: names.VolumeRestrictions}, - {Name: names.PodTopologySpread}, - {Name: names.InterPodAffinity}, - {Name: names.VolumeBinding}, - {Name: names.NodeAffinity}, - }, - }, - Filter: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.NodeUnschedulable}, - {Name: names.NodeName}, - {Name: names.TaintToleration}, - {Name: names.NodeAffinity}, - {Name: names.NodePorts}, - {Name: names.NodeResourcesFit}, - {Name: names.VolumeRestrictions}, - {Name: names.EBSLimits}, - {Name: names.GCEPDLimits}, - {Name: names.NodeVolumeLimits}, - {Name: names.AzureDiskLimits}, - {Name: names.VolumeBinding}, - {Name: names.VolumeZone}, - {Name: names.PodTopologySpread}, - {Name: names.InterPodAffinity}, - }, - }, - PostFilter: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.DefaultPreemption}, - }, - }, - PreScore: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.InterPodAffinity}, - {Name: names.PodTopologySpread}, - {Name: names.TaintToleration}, - {Name: names.NodeAffinity}, - }, - }, - Score: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(1)}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(1)}, - // Weight is doubled because: - // - This is a score coming from user preference. - // - It makes its signal comparable to NodeResourcesFit.LeastAllocated. - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.TaintToleration, Weight: pointer.Int32(1)}, - }, - }, - Reserve: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.VolumeBinding}, - }, - }, - PreBind: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.VolumeBinding}, - }, - }, - Bind: v1beta2.PluginSet{ - Enabled: []v1beta2.Plugin{ - {Name: names.DefaultBinder}, - }, - }, - } - applyFeatureGates(plugins) - - return plugins -} - -func applyFeatureGates(config *v1beta2.Plugins) { - if utilfeature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { - config.Score.Enabled = append(config.Score.Enabled, v1beta2.Plugin{Name: names.VolumeBinding, Weight: pointer.Int32(1)}) - } - if utilfeature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness) { - config.PreEnqueue.Enabled = append(config.PreEnqueue.Enabled, v1beta2.Plugin{Name: names.SchedulingGates}) - } -} - -// mergePlugins merges the custom set into the given default one, handling disabled sets. -func mergePlugins(defaultPlugins, customPlugins *v1beta2.Plugins) *v1beta2.Plugins { - if customPlugins == nil { - return defaultPlugins - } - - defaultPlugins.QueueSort = mergePluginSet(defaultPlugins.QueueSort, customPlugins.QueueSort) - defaultPlugins.PreFilter = mergePluginSet(defaultPlugins.PreFilter, customPlugins.PreFilter) - defaultPlugins.Filter = mergePluginSet(defaultPlugins.Filter, customPlugins.Filter) - defaultPlugins.PostFilter = mergePluginSet(defaultPlugins.PostFilter, customPlugins.PostFilter) - defaultPlugins.PreScore = mergePluginSet(defaultPlugins.PreScore, customPlugins.PreScore) - defaultPlugins.Score = mergePluginSet(defaultPlugins.Score, customPlugins.Score) - defaultPlugins.Reserve = mergePluginSet(defaultPlugins.Reserve, customPlugins.Reserve) - defaultPlugins.Permit = mergePluginSet(defaultPlugins.Permit, customPlugins.Permit) - defaultPlugins.PreBind = mergePluginSet(defaultPlugins.PreBind, customPlugins.PreBind) - defaultPlugins.Bind = mergePluginSet(defaultPlugins.Bind, customPlugins.Bind) - defaultPlugins.PostBind = mergePluginSet(defaultPlugins.PostBind, customPlugins.PostBind) - return defaultPlugins -} - -type pluginIndex struct { - index int - plugin v1beta2.Plugin -} - -func mergePluginSet(defaultPluginSet, customPluginSet v1beta2.PluginSet) v1beta2.PluginSet { - disabledPlugins := sets.NewString() - enabledCustomPlugins := make(map[string]pluginIndex) - // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. - replacedPluginIndex := sets.NewInt() - for _, disabledPlugin := range customPluginSet.Disabled { - disabledPlugins.Insert(disabledPlugin.Name) - } - for index, enabledPlugin := range customPluginSet.Enabled { - enabledCustomPlugins[enabledPlugin.Name] = pluginIndex{index, enabledPlugin} - } - var enabledPlugins []v1beta2.Plugin - if !disabledPlugins.Has("*") { - for _, defaultEnabledPlugin := range defaultPluginSet.Enabled { - if disabledPlugins.Has(defaultEnabledPlugin.Name) { - continue - } - // The default plugin is explicitly re-configured, update the default plugin accordingly. - if customPlugin, ok := enabledCustomPlugins[defaultEnabledPlugin.Name]; ok { - klog.InfoS("Default plugin is explicitly re-configured; overriding", "plugin", defaultEnabledPlugin.Name) - // Update the default plugin in place to preserve order. - defaultEnabledPlugin = customPlugin.plugin - replacedPluginIndex.Insert(customPlugin.index) - } - enabledPlugins = append(enabledPlugins, defaultEnabledPlugin) - } - } - - // Append all the custom plugins which haven't replaced any default plugins. - // Note: duplicated custom plugins will still be appended here. - // If so, the instantiation of scheduler framework will detect it and abort. - for index, plugin := range customPluginSet.Enabled { - if !replacedPluginIndex.Has(index) { - enabledPlugins = append(enabledPlugins, plugin) - } - } - return v1beta2.PluginSet{Enabled: enabledPlugins} -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/defaults.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/defaults.go deleted file mode 100644 index 1bf120806..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/defaults.go +++ /dev/null @@ -1,241 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/util/feature" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "k8s.io/kube-scheduler/config/v1beta2" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/pointer" -) - -var defaultResourceSpec = []v1beta2.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, -} - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func pluginsNames(p *v1beta2.Plugins) []string { - if p == nil { - return nil - } - extensions := []v1beta2.PluginSet{ - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.QueueSort, - } - n := sets.NewString() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return n.List() -} - -func setDefaults_KubeSchedulerProfile(prof *v1beta2.KubeSchedulerProfile) { - // Set default plugins. - prof.Plugins = mergePlugins(getDefaultPlugins(), prof.Plugins) - - // Set default plugin configs. - scheme := GetPluginArgConversionScheme() - existingConfigs := sets.NewString() - for j := range prof.PluginConfig { - existingConfigs.Insert(prof.PluginConfig[j].Name) - args := prof.PluginConfig[j].Args.Object - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - scheme.Default(args) - } - - // Append default configs for plugins that didn't have one explicitly set. - for _, name := range pluginsNames(prof.Plugins) { - if existingConfigs.Has(name) { - continue - } - gvk := v1beta2.SchemeGroupVersion.WithKind(name + "Args") - args, err := scheme.New(gvk) - if err != nil { - // This plugin is out-of-tree or doesn't require configuration. - continue - } - scheme.Default(args) - args.GetObjectKind().SetGroupVersionKind(gvk) - prof.PluginConfig = append(prof.PluginConfig, v1beta2.PluginConfig{ - Name: name, - Args: runtime.RawExtension{Object: args}, - }) - } -} - -// SetDefaults_KubeSchedulerConfiguration sets additional defaults -func SetDefaults_KubeSchedulerConfiguration(obj *v1beta2.KubeSchedulerConfiguration) { - if obj.Parallelism == nil { - obj.Parallelism = pointer.Int32(16) - } - - if len(obj.Profiles) == 0 { - obj.Profiles = append(obj.Profiles, v1beta2.KubeSchedulerProfile{}) - } - // Only apply a default scheduler name when there is a single profile. - // Validation will ensure that every profile has a non-empty unique name. - if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = pointer.String(v1.DefaultSchedulerName) - } - - // Add the default set of plugins and apply the configuration. - for i := range obj.Profiles { - prof := &obj.Profiles[i] - setDefaults_KubeSchedulerProfile(prof) - } - - if obj.PercentageOfNodesToScore == nil { - obj.PercentageOfNodesToScore = pointer.Int32(config.DefaultPercentageOfNodesToScore) - } - - if len(obj.LeaderElection.ResourceLock) == 0 { - // Use lease-based leader election to reduce cost. - // We migrated for EndpointsLease lock in 1.17 and starting in 1.20 we - // migrated to Lease lock. - obj.LeaderElection.ResourceLock = "leases" - } - if len(obj.LeaderElection.ResourceNamespace) == 0 { - obj.LeaderElection.ResourceNamespace = v1beta2.SchedulerDefaultLockObjectNamespace - } - if len(obj.LeaderElection.ResourceName) == 0 { - obj.LeaderElection.ResourceName = v1beta2.SchedulerDefaultLockObjectName - } - - if len(obj.ClientConnection.ContentType) == 0 { - obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" - } - // Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings. - if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 50.0 - } - if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 100 - } - - // Use the default LeaderElectionConfiguration options - componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) - - if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = pointer.Int64(1) - } - - if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = pointer.Int64(10) - } - - // Enable profiling by default in the scheduler - if obj.EnableProfiling == nil { - obj.EnableProfiling = pointer.Bool(true) - } - - // Enable contention profiling by default if profiling is enabled - if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = pointer.Bool(true) - } -} - -func SetDefaults_DefaultPreemptionArgs(obj *v1beta2.DefaultPreemptionArgs) { - if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = pointer.Int32(10) - } - if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = pointer.Int32(100) - } -} - -func SetDefaults_InterPodAffinityArgs(obj *v1beta2.InterPodAffinityArgs) { - if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = pointer.Int32(1) - } -} - -func SetDefaults_VolumeBindingArgs(obj *v1beta2.VolumeBindingArgs) { - if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = pointer.Int64(600) - } - if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { - obj.Shape = []v1beta2.UtilizationShapePoint{ - { - Utilization: 0, - Score: 0, - }, - { - Utilization: 100, - Score: int32(config.MaxCustomPriorityScore), - }, - } - } -} - -func SetDefaults_NodeResourcesBalancedAllocationArgs(obj *v1beta2.NodeResourcesBalancedAllocationArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultResourceSpec - return - } - // If the weight is not set or it is explicitly set to 0, then apply the default weight(1) instead. - for i := range obj.Resources { - if obj.Resources[i].Weight == 0 { - obj.Resources[i].Weight = 1 - } - } -} - -func SetDefaults_PodTopologySpreadArgs(obj *v1beta2.PodTopologySpreadArgs) { - if obj.DefaultingType == "" { - obj.DefaultingType = v1beta2.SystemDefaulting - } -} - -func SetDefaults_NodeResourcesFitArgs(obj *v1beta2.NodeResourcesFitArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &v1beta2.ScoringStrategy{ - Type: v1beta2.ScoringStrategyType(config.LeastAllocated), - Resources: defaultResourceSpec, - } - } - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/doc.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/doc.go deleted file mode 100644 index 0c8b9e17e..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config -// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1beta2 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/kube-scheduler/config/v1beta2 -// +groupName=kubescheduler.config.k8s.io - -package v1beta2 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/register.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/register.go deleted file mode 100644 index b8ca76de5..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/register.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/kube-scheduler/config/v1beta2" -) - -// GroupName is the group name used in this package -const GroupName = v1beta2.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = v1beta2.SchemeGroupVersion - -var ( - // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, - // defaulting and conversion init funcs are registered as well. - localSchemeBuilder = &v1beta2.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go deleted file mode 100644 index 642729e38..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.conversion.go +++ /dev/null @@ -1,943 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta2 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/component-base/config/v1alpha1" - v1beta2 "k8s.io/kube-scheduler/config/v1beta2" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta2.DefaultPreemptionArgs)(nil), (*config.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(a.(*v1beta2.DefaultPreemptionArgs), b.(*config.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DefaultPreemptionArgs)(nil), (*v1beta2.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(a.(*config.DefaultPreemptionArgs), b.(*v1beta2.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Extender_To_config_Extender(a.(*v1beta2.Extender), b.(*config.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*v1beta2.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Extender_To_v1beta2_Extender(a.(*config.Extender), b.(*v1beta2.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*v1beta2.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*v1beta2.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*v1beta2.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*v1beta2.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*v1beta2.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*v1beta2.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.InterPodAffinityArgs)(nil), (*config.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(a.(*v1beta2.InterPodAffinityArgs), b.(*config.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.InterPodAffinityArgs)(nil), (*v1beta2.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(a.(*config.InterPodAffinityArgs), b.(*v1beta2.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.KubeSchedulerProfile)(nil), (*config.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(a.(*v1beta2.KubeSchedulerProfile), b.(*config.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*v1beta2.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeAffinityArgs)(nil), (*v1beta2.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(a.(*config.NodeAffinityArgs), b.(*v1beta2.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.NodeResourcesBalancedAllocationArgs)(nil), (*config.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(a.(*v1beta2.NodeResourcesBalancedAllocationArgs), b.(*config.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesBalancedAllocationArgs)(nil), (*v1beta2.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(a.(*config.NodeResourcesBalancedAllocationArgs), b.(*v1beta2.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.NodeResourcesFitArgs)(nil), (*config.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(a.(*v1beta2.NodeResourcesFitArgs), b.(*config.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesFitArgs)(nil), (*v1beta2.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(a.(*config.NodeResourcesFitArgs), b.(*v1beta2.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Plugin_To_config_Plugin(a.(*v1beta2.Plugin), b.(*config.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*v1beta2.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugin_To_v1beta2_Plugin(a.(*config.Plugin), b.(*v1beta2.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PluginConfig_To_config_PluginConfig(a.(*v1beta2.PluginConfig), b.(*config.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*v1beta2.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginConfig_To_v1beta2_PluginConfig(a.(*config.PluginConfig), b.(*v1beta2.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PluginSet_To_config_PluginSet(a.(*v1beta2.PluginSet), b.(*config.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*v1beta2.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginSet_To_v1beta2_PluginSet(a.(*config.PluginSet), b.(*v1beta2.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_Plugins_To_config_Plugins(a.(*v1beta2.Plugins), b.(*config.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.PodTopologySpreadArgs)(nil), (*config.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(a.(*v1beta2.PodTopologySpreadArgs), b.(*config.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PodTopologySpreadArgs)(nil), (*v1beta2.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(a.(*config.PodTopologySpreadArgs), b.(*v1beta2.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.RequestedToCapacityRatioParam)(nil), (*config.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(a.(*v1beta2.RequestedToCapacityRatioParam), b.(*config.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioParam)(nil), (*v1beta2.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(a.(*config.RequestedToCapacityRatioParam), b.(*v1beta2.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ResourceSpec_To_config_ResourceSpec(a.(*v1beta2.ResourceSpec), b.(*config.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*v1beta2.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ResourceSpec_To_v1beta2_ResourceSpec(a.(*config.ResourceSpec), b.(*v1beta2.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(a.(*v1beta2.ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*v1beta2.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(a.(*config.ScoringStrategy), b.(*v1beta2.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*v1beta2.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*v1beta2.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*v1beta2.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta2.VolumeBindingArgs)(nil), (*config.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(a.(*v1beta2.VolumeBindingArgs), b.(*config.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.VolumeBindingArgs)(nil), (*v1beta2.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(a.(*config.VolumeBindingArgs), b.(*v1beta2.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*v1beta2.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*v1beta2.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta2.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta2.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.Plugins)(nil), (*v1beta2.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugins_To_v1beta2_Plugins(a.(*config.Plugins), b.(*v1beta2.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta2.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1beta2.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1beta2.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1beta2.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_v1beta2_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1beta2.DefaultPreemptionArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1beta2.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_config_DefaultPreemptionArgs_To_v1beta2_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_v1beta2_Extender_To_config_Extender(in *v1beta2.Extender, out *config.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_v1beta2_Extender_To_config_Extender is an autogenerated conversion function. -func Convert_v1beta2_Extender_To_config_Extender(in *v1beta2.Extender, out *config.Extender, s conversion.Scope) error { - return autoConvert_v1beta2_Extender_To_config_Extender(in, out, s) -} - -func autoConvert_config_Extender_To_v1beta2_Extender(in *config.Extender, out *v1beta2.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*v1beta2.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]v1beta2.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_config_Extender_To_v1beta2_Extender is an autogenerated conversion function. -func Convert_config_Extender_To_v1beta2_Extender(in *config.Extender, out *v1beta2.Extender, s conversion.Scope) error { - return autoConvert_config_Extender_To_v1beta2_Extender(in, out, s) -} - -func autoConvert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1beta2.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function. -func Convert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1beta2.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_v1beta2_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s) -} - -func autoConvert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1beta2.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource is an autogenerated conversion function. -func Convert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1beta2.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_config_ExtenderManagedResource_To_v1beta2_ExtenderManagedResource(in, out, s) -} - -func autoConvert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1beta2.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1beta2.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_v1beta2_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1beta2.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1beta2.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_config_ExtenderTLSConfig_To_v1beta2_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1beta2.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1beta2.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_v1beta2_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1beta2.InterPodAffinityArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - return nil -} - -// Convert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1beta2.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_config_InterPodAffinityArgs_To_v1beta2_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_v1beta2_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1beta2.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.HealthzBindAddress, &out.HealthzBindAddress, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.MetricsBindAddress, &out.MetricsBindAddress, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := v1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]config.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_config_KubeSchedulerConfiguration_To_v1beta2_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1beta2.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.HealthzBindAddress, &out.HealthzBindAddress, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.MetricsBindAddress, &out.MetricsBindAddress, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := v1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := v1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]v1beta2.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]v1beta2.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1beta2.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - if err := v1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(config.Plugins) - if err := Convert_v1beta2_Plugins_To_config_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]config.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_v1beta2_PluginConfig_To_config_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1beta2.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_v1beta2_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_config_KubeSchedulerProfile_To_v1beta2_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta2.KubeSchedulerProfile, s conversion.Scope) error { - if err := v1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - // WARNING: in.PercentageOfNodesToScore requires manual conversion: does not exist in peer-type - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(v1beta2.Plugins) - if err := Convert_config_Plugins_To_v1beta2_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]v1beta2.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_config_PluginConfig_To_v1beta2_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -func autoConvert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta2.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta2.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeAffinityArgs_To_config_NodeAffinityArgs(in, out, s) -} - -func autoConvert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1beta2.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs is an autogenerated conversion function. -func Convert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1beta2.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_config_NodeAffinityArgs_To_v1beta2_NodeAffinityArgs(in, out, s) -} - -func autoConvert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1beta2.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1beta2.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1beta2.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]v1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1beta2.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1beta2_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1beta2.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1beta2.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1beta2.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*v1beta2.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1beta2.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesFitArgs_To_v1beta2_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_v1beta2_Plugin_To_config_Plugin(in *v1beta2.Plugin, out *config.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := v1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_Plugin_To_config_Plugin is an autogenerated conversion function. -func Convert_v1beta2_Plugin_To_config_Plugin(in *v1beta2.Plugin, out *config.Plugin, s conversion.Scope) error { - return autoConvert_v1beta2_Plugin_To_config_Plugin(in, out, s) -} - -func autoConvert_config_Plugin_To_v1beta2_Plugin(in *config.Plugin, out *v1beta2.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := v1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugin_To_v1beta2_Plugin is an autogenerated conversion function. -func Convert_config_Plugin_To_v1beta2_Plugin(in *config.Plugin, out *v1beta2.Plugin, s conversion.Scope) error { - return autoConvert_config_Plugin_To_v1beta2_Plugin(in, out, s) -} - -func autoConvert_v1beta2_PluginConfig_To_config_PluginConfig(in *v1beta2.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_PluginConfig_To_config_PluginConfig is an autogenerated conversion function. -func Convert_v1beta2_PluginConfig_To_config_PluginConfig(in *v1beta2.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - return autoConvert_v1beta2_PluginConfig_To_config_PluginConfig(in, out, s) -} - -func autoConvert_config_PluginConfig_To_v1beta2_PluginConfig(in *config.PluginConfig, out *v1beta2.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_config_PluginConfig_To_v1beta2_PluginConfig is an autogenerated conversion function. -func Convert_config_PluginConfig_To_v1beta2_PluginConfig(in *config.PluginConfig, out *v1beta2.PluginConfig, s conversion.Scope) error { - return autoConvert_config_PluginConfig_To_v1beta2_PluginConfig(in, out, s) -} - -func autoConvert_v1beta2_PluginSet_To_config_PluginSet(in *v1beta2.PluginSet, out *config.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1beta2_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1beta2_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_v1beta2_PluginSet_To_config_PluginSet is an autogenerated conversion function. -func Convert_v1beta2_PluginSet_To_config_PluginSet(in *v1beta2.PluginSet, out *config.PluginSet, s conversion.Scope) error { - return autoConvert_v1beta2_PluginSet_To_config_PluginSet(in, out, s) -} - -func autoConvert_config_PluginSet_To_v1beta2_PluginSet(in *config.PluginSet, out *v1beta2.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]v1beta2.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1beta2_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]v1beta2.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1beta2_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_config_PluginSet_To_v1beta2_PluginSet is an autogenerated conversion function. -func Convert_config_PluginSet_To_v1beta2_PluginSet(in *config.PluginSet, out *v1beta2.PluginSet, s conversion.Scope) error { - return autoConvert_config_PluginSet_To_v1beta2_PluginSet(in, out, s) -} - -func autoConvert_v1beta2_Plugins_To_config_Plugins(in *v1beta2.Plugins, out *config.Plugins, s conversion.Scope) error { - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_v1beta2_PluginSet_To_config_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_Plugins_To_config_Plugins is an autogenerated conversion function. -func Convert_v1beta2_Plugins_To_config_Plugins(in *v1beta2.Plugins, out *config.Plugins, s conversion.Scope) error { - return autoConvert_v1beta2_Plugins_To_config_Plugins(in, out, s) -} - -func autoConvert_config_Plugins_To_v1beta2_Plugins(in *config.Plugins, out *v1beta2.Plugins, s conversion.Scope) error { - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta2_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1beta2.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = config.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1beta2.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_v1beta2_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1beta2.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = v1beta2.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1beta2.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_config_PodTopologySpreadArgs_To_v1beta2_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1beta2.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1beta2.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_v1beta2_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1beta2.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]v1beta2.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1beta2.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_config_RequestedToCapacityRatioParam_To_v1beta2_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_v1beta2_ResourceSpec_To_config_ResourceSpec(in *v1beta2.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_v1beta2_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function. -func Convert_v1beta2_ResourceSpec_To_config_ResourceSpec(in *v1beta2.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - return autoConvert_v1beta2_ResourceSpec_To_config_ResourceSpec(in, out, s) -} - -func autoConvert_config_ResourceSpec_To_v1beta2_ResourceSpec(in *config.ResourceSpec, out *v1beta2.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_config_ResourceSpec_To_v1beta2_ResourceSpec is an autogenerated conversion function. -func Convert_config_ResourceSpec_To_v1beta2_ResourceSpec(in *config.ResourceSpec, out *v1beta2.ResourceSpec, s conversion.Scope) error { - return autoConvert_config_ResourceSpec_To_v1beta2_ResourceSpec(in, out, s) -} - -func autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *v1beta2.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*config.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *v1beta2.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *v1beta2.ScoringStrategy, s conversion.Scope) error { - out.Type = v1beta2.ScoringStrategyType(in.Type) - out.Resources = *(*[]v1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*v1beta2.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *v1beta2.ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in, out, s) -} - -func autoConvert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1beta2.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function. -func Convert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1beta2.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_v1beta2_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s) -} - -func autoConvert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1beta2.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint is an autogenerated conversion function. -func Convert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1beta2.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_config_UtilizationShapePoint_To_v1beta2_UtilizationShapePoint(in, out, s) -} - -func autoConvert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1beta2.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs is an autogenerated conversion function. -func Convert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1beta2.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_v1beta2_VolumeBindingArgs_To_config_VolumeBindingArgs(in, out, s) -} - -func autoConvert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1beta2.VolumeBindingArgs, s conversion.Scope) error { - if err := v1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]v1beta2.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs is an autogenerated conversion function. -func Convert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1beta2.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_config_VolumeBindingArgs_To_v1beta2_VolumeBindingArgs(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index e38909ba4..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta2 diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go deleted file mode 100644 index 359ee6437..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2/zz_generated.defaults.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1beta2 "k8s.io/kube-scheduler/config/v1beta2" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1beta2.DefaultPreemptionArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultPreemptionArgs(obj.(*v1beta2.DefaultPreemptionArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*v1beta2.InterPodAffinityArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.KubeSchedulerConfiguration{}, func(obj interface{}) { - SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1beta2.KubeSchedulerConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1beta2.NodeResourcesBalancedAllocationArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesBalancedAllocationArgs(obj.(*v1beta2.NodeResourcesBalancedAllocationArgs)) - }) - scheme.AddTypeDefaultingFunc(&v1beta2.NodeResourcesFitArgs{}, func(obj interface{}) { SetObjectDefaults_NodeResourcesFitArgs(obj.(*v1beta2.NodeResourcesFitArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.PodTopologySpreadArgs{}, func(obj interface{}) { SetObjectDefaults_PodTopologySpreadArgs(obj.(*v1beta2.PodTopologySpreadArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta2.VolumeBindingArgs{}, func(obj interface{}) { SetObjectDefaults_VolumeBindingArgs(obj.(*v1beta2.VolumeBindingArgs)) }) - return nil -} - -func SetObjectDefaults_DefaultPreemptionArgs(in *v1beta2.DefaultPreemptionArgs) { - SetDefaults_DefaultPreemptionArgs(in) -} - -func SetObjectDefaults_InterPodAffinityArgs(in *v1beta2.InterPodAffinityArgs) { - SetDefaults_InterPodAffinityArgs(in) -} - -func SetObjectDefaults_KubeSchedulerConfiguration(in *v1beta2.KubeSchedulerConfiguration) { - SetDefaults_KubeSchedulerConfiguration(in) -} - -func SetObjectDefaults_NodeResourcesBalancedAllocationArgs(in *v1beta2.NodeResourcesBalancedAllocationArgs) { - SetDefaults_NodeResourcesBalancedAllocationArgs(in) -} - -func SetObjectDefaults_NodeResourcesFitArgs(in *v1beta2.NodeResourcesFitArgs) { - SetDefaults_NodeResourcesFitArgs(in) -} - -func SetObjectDefaults_PodTopologySpreadArgs(in *v1beta2.PodTopologySpreadArgs) { - SetDefaults_PodTopologySpreadArgs(in) -} - -func SetObjectDefaults_VolumeBindingArgs(in *v1beta2.VolumeBindingArgs) { - SetDefaults_VolumeBindingArgs(in) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/conversion.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/conversion.go deleted file mode 100644 index bbb8ff4f2..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/conversion.go +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "fmt" - "sync" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/kube-scheduler/config/v1beta3" - "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -var ( - // pluginArgConversionScheme is a scheme with internal and v1beta3 registered, - // used for defaulting/converting typed PluginConfig Args. - // Access via getPluginArgConversionScheme() - pluginArgConversionScheme *runtime.Scheme - initPluginArgConversionScheme sync.Once -) - -func GetPluginArgConversionScheme() *runtime.Scheme { - initPluginArgConversionScheme.Do(func() { - // set up the scheme used for plugin arg conversion - pluginArgConversionScheme = runtime.NewScheme() - utilruntime.Must(AddToScheme(pluginArgConversionScheme)) - utilruntime.Must(config.AddToScheme(pluginArgConversionScheme)) - }) - return pluginArgConversionScheme -} - -func Convert_v1beta3_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1beta3.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_v1beta3_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToInternalPluginConfigArgs(out) -} - -// convertToInternalPluginConfigArgs converts PluginConfig#Args into internal -// types using a scheme, after applying defaults. -func convertToInternalPluginConfigArgs(out *config.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - prof := &out.Profiles[i] - for j := range prof.PluginConfig { - args := prof.PluginConfig[j].Args - if args == nil { - continue - } - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - internalArgs, err := scheme.ConvertToVersion(args, config.SchemeGroupVersion) - if err != nil { - return fmt.Errorf("converting .Profiles[%d].PluginConfig[%d].Args into internal type: %w", i, j, err) - } - prof.PluginConfig[j].Args = internalArgs - } - } - return nil -} - -func Convert_config_KubeSchedulerConfiguration_To_v1beta3_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1beta3.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := autoConvert_config_KubeSchedulerConfiguration_To_v1beta3_KubeSchedulerConfiguration(in, out, s); err != nil { - return err - } - return convertToExternalPluginConfigArgs(out) -} - -// convertToExternalPluginConfigArgs converts PluginConfig#Args into -// external (versioned) types using a scheme. -func convertToExternalPluginConfigArgs(out *v1beta3.KubeSchedulerConfiguration) error { - scheme := GetPluginArgConversionScheme() - for i := range out.Profiles { - for j := range out.Profiles[i].PluginConfig { - args := out.Profiles[i].PluginConfig[j].Args - if args.Object == nil { - continue - } - if _, isUnknown := args.Object.(*runtime.Unknown); isUnknown { - continue - } - externalArgs, err := scheme.ConvertToVersion(args.Object, SchemeGroupVersion) - if err != nil { - return err - } - out.Profiles[i].PluginConfig[j].Args.Object = externalArgs - } - } - return nil -} - -// Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile called auto coversion by -// ignoring per profile PercentageOfNodesToScore. -func Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta3.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in, out, s) -} - -// Convert_config_Plugins_To_v1beta3_Plugins is an autogenerated conversion function. -func Convert_config_Plugins_To_v1beta3_Plugins(in *config.Plugins, out *v1beta3.Plugins, s conversion.Scope) error { - return autoConvert_config_Plugins_To_v1beta3_Plugins(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go deleted file mode 100644 index 13d479fec..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/default_plugins.go +++ /dev/null @@ -1,144 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "k8s.io/apimachinery/pkg/util/sets" - utilfeature "k8s.io/apiserver/pkg/util/feature" - "k8s.io/klog/v2" - "k8s.io/kube-scheduler/config/v1beta3" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/framework/plugins/names" - "k8s.io/utils/pointer" -) - -// getDefaultPlugins returns the default set of plugins. -func getDefaultPlugins() *v1beta3.Plugins { - plugins := &v1beta3.Plugins{ - MultiPoint: v1beta3.PluginSet{ - Enabled: []v1beta3.Plugin{ - {Name: names.PrioritySort}, - {Name: names.NodeUnschedulable}, - {Name: names.NodeName}, - {Name: names.TaintToleration, Weight: pointer.Int32(3)}, - {Name: names.NodeAffinity, Weight: pointer.Int32(2)}, - {Name: names.NodePorts}, - {Name: names.NodeResourcesFit, Weight: pointer.Int32(1)}, - {Name: names.VolumeRestrictions}, - {Name: names.EBSLimits}, - {Name: names.GCEPDLimits}, - {Name: names.NodeVolumeLimits}, - {Name: names.AzureDiskLimits}, - {Name: names.VolumeBinding}, - {Name: names.VolumeZone}, - {Name: names.PodTopologySpread, Weight: pointer.Int32(2)}, - {Name: names.InterPodAffinity, Weight: pointer.Int32(2)}, - {Name: names.DefaultPreemption}, - {Name: names.NodeResourcesBalancedAllocation, Weight: pointer.Int32(1)}, - {Name: names.ImageLocality, Weight: pointer.Int32(1)}, - {Name: names.DefaultBinder}, - }, - }, - } - applyFeatureGates(plugins) - - return plugins -} - -func applyFeatureGates(config *v1beta3.Plugins) { - if utilfeature.DefaultFeatureGate.Enabled(features.PodSchedulingReadiness) { - config.MultiPoint.Enabled = append(config.MultiPoint.Enabled, v1beta3.Plugin{Name: names.SchedulingGates}) - } -} - -// mergePlugins merges the custom set into the given default one, handling disabled sets. -func mergePlugins(defaultPlugins, customPlugins *v1beta3.Plugins) *v1beta3.Plugins { - if customPlugins == nil { - return defaultPlugins - } - - defaultPlugins.MultiPoint = mergePluginSet(defaultPlugins.MultiPoint, customPlugins.MultiPoint) - defaultPlugins.QueueSort = mergePluginSet(defaultPlugins.QueueSort, customPlugins.QueueSort) - defaultPlugins.PreFilter = mergePluginSet(defaultPlugins.PreFilter, customPlugins.PreFilter) - defaultPlugins.Filter = mergePluginSet(defaultPlugins.Filter, customPlugins.Filter) - defaultPlugins.PostFilter = mergePluginSet(defaultPlugins.PostFilter, customPlugins.PostFilter) - defaultPlugins.PreScore = mergePluginSet(defaultPlugins.PreScore, customPlugins.PreScore) - defaultPlugins.Score = mergePluginSet(defaultPlugins.Score, customPlugins.Score) - defaultPlugins.Reserve = mergePluginSet(defaultPlugins.Reserve, customPlugins.Reserve) - defaultPlugins.Permit = mergePluginSet(defaultPlugins.Permit, customPlugins.Permit) - defaultPlugins.PreBind = mergePluginSet(defaultPlugins.PreBind, customPlugins.PreBind) - defaultPlugins.Bind = mergePluginSet(defaultPlugins.Bind, customPlugins.Bind) - defaultPlugins.PostBind = mergePluginSet(defaultPlugins.PostBind, customPlugins.PostBind) - return defaultPlugins -} - -type pluginIndex struct { - index int - plugin v1beta3.Plugin -} - -func mergePluginSet(defaultPluginSet, customPluginSet v1beta3.PluginSet) v1beta3.PluginSet { - disabledPlugins := sets.NewString() - enabledCustomPlugins := make(map[string]pluginIndex) - // replacedPluginIndex is a set of index of plugins, which have replaced the default plugins. - replacedPluginIndex := sets.NewInt() - var disabled []v1beta3.Plugin - for _, disabledPlugin := range customPluginSet.Disabled { - // if the user is manually disabling any (or all, with "*") default plugins for an extension point, - // we need to track that so that the MultiPoint extension logic in the framework can know to skip - // inserting unspecified default plugins to this point. - disabled = append(disabled, v1beta3.Plugin{Name: disabledPlugin.Name}) - disabledPlugins.Insert(disabledPlugin.Name) - } - - // With MultiPoint, we may now have some disabledPlugins in the default registry - // For example, we enable PluginX with Filter+Score through MultiPoint but disable its Score plugin by default. - for _, disabledPlugin := range defaultPluginSet.Disabled { - disabled = append(disabled, v1beta3.Plugin{Name: disabledPlugin.Name}) - disabledPlugins.Insert(disabledPlugin.Name) - } - - for index, enabledPlugin := range customPluginSet.Enabled { - enabledCustomPlugins[enabledPlugin.Name] = pluginIndex{index, enabledPlugin} - } - var enabledPlugins []v1beta3.Plugin - if !disabledPlugins.Has("*") { - for _, defaultEnabledPlugin := range defaultPluginSet.Enabled { - if disabledPlugins.Has(defaultEnabledPlugin.Name) { - continue - } - // The default plugin is explicitly re-configured, update the default plugin accordingly. - if customPlugin, ok := enabledCustomPlugins[defaultEnabledPlugin.Name]; ok { - klog.InfoS("Default plugin is explicitly re-configured; overriding", "plugin", defaultEnabledPlugin.Name) - // Update the default plugin in place to preserve order. - defaultEnabledPlugin = customPlugin.plugin - replacedPluginIndex.Insert(customPlugin.index) - } - enabledPlugins = append(enabledPlugins, defaultEnabledPlugin) - } - } - - // Append all the custom plugins which haven't replaced any default plugins. - // Note: duplicated custom plugins will still be appended here. - // If so, the instantiation of scheduler framework will detect it and abort. - for index, plugin := range customPluginSet.Enabled { - if !replacedPluginIndex.Has(index) { - enabledPlugins = append(enabledPlugins, plugin) - } - } - return v1beta3.PluginSet{Enabled: enabledPlugins, Disabled: disabled} -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go deleted file mode 100644 index b3816d36a..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/defaults.go +++ /dev/null @@ -1,242 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apiserver/pkg/util/feature" - componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" - "k8s.io/kube-scheduler/config/v1beta3" - "k8s.io/kubernetes/pkg/features" - "k8s.io/kubernetes/pkg/scheduler/apis/config" - "k8s.io/utils/pointer" -) - -var defaultResourceSpec = []v1beta3.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, -} - -func addDefaultingFuncs(scheme *runtime.Scheme) error { - return RegisterDefaults(scheme) -} - -func pluginsNames(p *v1beta3.Plugins) []string { - if p == nil { - return nil - } - extensions := []v1beta3.PluginSet{ - p.MultiPoint, - p.PreFilter, - p.Filter, - p.PostFilter, - p.Reserve, - p.PreScore, - p.Score, - p.PreBind, - p.Bind, - p.PostBind, - p.Permit, - p.QueueSort, - } - n := sets.NewString() - for _, e := range extensions { - for _, pg := range e.Enabled { - n.Insert(pg.Name) - } - } - return n.List() -} - -func setDefaults_KubeSchedulerProfile(prof *v1beta3.KubeSchedulerProfile) { - // Set default plugins. - prof.Plugins = mergePlugins(getDefaultPlugins(), prof.Plugins) - // Set default plugin configs. - scheme := GetPluginArgConversionScheme() - existingConfigs := sets.NewString() - for j := range prof.PluginConfig { - existingConfigs.Insert(prof.PluginConfig[j].Name) - args := prof.PluginConfig[j].Args.Object - if _, isUnknown := args.(*runtime.Unknown); isUnknown { - continue - } - scheme.Default(args) - } - - // Append default configs for plugins that didn't have one explicitly set. - for _, name := range pluginsNames(prof.Plugins) { - if existingConfigs.Has(name) { - continue - } - gvk := v1beta3.SchemeGroupVersion.WithKind(name + "Args") - args, err := scheme.New(gvk) - if err != nil { - // This plugin is out-of-tree or doesn't require configuration. - continue - } - scheme.Default(args) - args.GetObjectKind().SetGroupVersionKind(gvk) - prof.PluginConfig = append(prof.PluginConfig, v1beta3.PluginConfig{ - Name: name, - Args: runtime.RawExtension{Object: args}, - }) - } -} - -// SetDefaults_KubeSchedulerConfiguration sets additional defaults -func SetDefaults_KubeSchedulerConfiguration(obj *v1beta3.KubeSchedulerConfiguration) { - if obj.Parallelism == nil { - obj.Parallelism = pointer.Int32(16) - } - - if len(obj.Profiles) == 0 { - obj.Profiles = append(obj.Profiles, v1beta3.KubeSchedulerProfile{}) - } - // Only apply a default scheduler name when there is a single profile. - // Validation will ensure that every profile has a non-empty unique name. - if len(obj.Profiles) == 1 && obj.Profiles[0].SchedulerName == nil { - obj.Profiles[0].SchedulerName = pointer.String(v1.DefaultSchedulerName) - } - - // Add the default set of plugins and apply the configuration. - for i := range obj.Profiles { - prof := &obj.Profiles[i] - setDefaults_KubeSchedulerProfile(prof) - } - - if obj.PercentageOfNodesToScore == nil { - percentageOfNodesToScore := int32(config.DefaultPercentageOfNodesToScore) - obj.PercentageOfNodesToScore = &percentageOfNodesToScore - } - - if len(obj.LeaderElection.ResourceLock) == 0 { - // Use lease-based leader election to reduce cost. - // We migrated for EndpointsLease lock in 1.17 and starting in 1.20 we - // migrated to Lease lock. - obj.LeaderElection.ResourceLock = "leases" - } - if len(obj.LeaderElection.ResourceNamespace) == 0 { - obj.LeaderElection.ResourceNamespace = v1beta3.SchedulerDefaultLockObjectNamespace - } - if len(obj.LeaderElection.ResourceName) == 0 { - obj.LeaderElection.ResourceName = v1beta3.SchedulerDefaultLockObjectName - } - - if len(obj.ClientConnection.ContentType) == 0 { - obj.ClientConnection.ContentType = "application/vnd.kubernetes.protobuf" - } - // Scheduler has an opinion about QPS/Burst, setting specific defaults for itself, instead of generic settings. - if obj.ClientConnection.QPS == 0.0 { - obj.ClientConnection.QPS = 50.0 - } - if obj.ClientConnection.Burst == 0 { - obj.ClientConnection.Burst = 100 - } - - // Use the default LeaderElectionConfiguration options - componentbaseconfigv1alpha1.RecommendedDefaultLeaderElectionConfiguration(&obj.LeaderElection) - - if obj.PodInitialBackoffSeconds == nil { - obj.PodInitialBackoffSeconds = pointer.Int64(1) - } - - if obj.PodMaxBackoffSeconds == nil { - obj.PodMaxBackoffSeconds = pointer.Int64(10) - } - - // Enable profiling by default in the scheduler - if obj.EnableProfiling == nil { - obj.EnableProfiling = pointer.Bool(true) - } - - // Enable contention profiling by default if profiling is enabled - if *obj.EnableProfiling && obj.EnableContentionProfiling == nil { - obj.EnableContentionProfiling = pointer.Bool(true) - } -} - -func SetDefaults_DefaultPreemptionArgs(obj *v1beta3.DefaultPreemptionArgs) { - if obj.MinCandidateNodesPercentage == nil { - obj.MinCandidateNodesPercentage = pointer.Int32(10) - } - if obj.MinCandidateNodesAbsolute == nil { - obj.MinCandidateNodesAbsolute = pointer.Int32(100) - } -} - -func SetDefaults_InterPodAffinityArgs(obj *v1beta3.InterPodAffinityArgs) { - if obj.HardPodAffinityWeight == nil { - obj.HardPodAffinityWeight = pointer.Int32(1) - } -} - -func SetDefaults_VolumeBindingArgs(obj *v1beta3.VolumeBindingArgs) { - if obj.BindTimeoutSeconds == nil { - obj.BindTimeoutSeconds = pointer.Int64(600) - } - if len(obj.Shape) == 0 && feature.DefaultFeatureGate.Enabled(features.VolumeCapacityPriority) { - obj.Shape = []v1beta3.UtilizationShapePoint{ - { - Utilization: 0, - Score: 0, - }, - { - Utilization: 100, - Score: int32(config.MaxCustomPriorityScore), - }, - } - } -} - -func SetDefaults_NodeResourcesBalancedAllocationArgs(obj *v1beta3.NodeResourcesBalancedAllocationArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultResourceSpec - return - } - // If the weight is not set or it is explicitly set to 0, then apply the default weight(1) instead. - for i := range obj.Resources { - if obj.Resources[i].Weight == 0 { - obj.Resources[i].Weight = 1 - } - } -} - -func SetDefaults_PodTopologySpreadArgs(obj *v1beta3.PodTopologySpreadArgs) { - if obj.DefaultingType == "" { - obj.DefaultingType = v1beta3.SystemDefaulting - } -} - -func SetDefaults_NodeResourcesFitArgs(obj *v1beta3.NodeResourcesFitArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &v1beta3.ScoringStrategy{ - Type: v1beta3.ScoringStrategyType(config.LeastAllocated), - Resources: defaultResourceSpec, - } - } - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/doc.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/doc.go deleted file mode 100644 index e9976588f..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=k8s.io/kubernetes/pkg/scheduler/apis/config -// +k8s:conversion-gen-external-types=k8s.io/kube-scheduler/config/v1beta3 -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=k8s.io/kube-scheduler/config/v1beta3 -// +groupName=kubescheduler.config.k8s.io - -package v1beta3 // import "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3" diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/register.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/register.go deleted file mode 100644 index 2af0952e6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/register.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "k8s.io/kube-scheduler/config/v1beta3" -) - -// GroupName is the group name used in this package -const GroupName = v1beta3.GroupName - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = v1beta3.SchemeGroupVersion - -var ( - // localSchemeBuilder extends the SchemeBuilder instance with the external types. In this package, - // defaulting and conversion init funcs are registered as well. - localSchemeBuilder = &v1beta3.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addDefaultingFuncs) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go deleted file mode 100644 index 1e8fd12ba..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.conversion.go +++ /dev/null @@ -1,933 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta3 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - v1alpha1 "k8s.io/component-base/config/v1alpha1" - v1beta3 "k8s.io/kube-scheduler/config/v1beta3" - config "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*v1beta3.DefaultPreemptionArgs)(nil), (*config.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(a.(*v1beta3.DefaultPreemptionArgs), b.(*config.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.DefaultPreemptionArgs)(nil), (*v1beta3.DefaultPreemptionArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_DefaultPreemptionArgs_To_v1beta3_DefaultPreemptionArgs(a.(*config.DefaultPreemptionArgs), b.(*v1beta3.DefaultPreemptionArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.Extender)(nil), (*config.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_Extender_To_config_Extender(a.(*v1beta3.Extender), b.(*config.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Extender)(nil), (*v1beta3.Extender)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Extender_To_v1beta3_Extender(a.(*config.Extender), b.(*v1beta3.Extender), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.ExtenderManagedResource)(nil), (*config.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_ExtenderManagedResource_To_config_ExtenderManagedResource(a.(*v1beta3.ExtenderManagedResource), b.(*config.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderManagedResource)(nil), (*v1beta3.ExtenderManagedResource)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderManagedResource_To_v1beta3_ExtenderManagedResource(a.(*config.ExtenderManagedResource), b.(*v1beta3.ExtenderManagedResource), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.ExtenderTLSConfig)(nil), (*config.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_ExtenderTLSConfig_To_config_ExtenderTLSConfig(a.(*v1beta3.ExtenderTLSConfig), b.(*config.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ExtenderTLSConfig)(nil), (*v1beta3.ExtenderTLSConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ExtenderTLSConfig_To_v1beta3_ExtenderTLSConfig(a.(*config.ExtenderTLSConfig), b.(*v1beta3.ExtenderTLSConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.InterPodAffinityArgs)(nil), (*config.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_InterPodAffinityArgs_To_config_InterPodAffinityArgs(a.(*v1beta3.InterPodAffinityArgs), b.(*config.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.InterPodAffinityArgs)(nil), (*v1beta3.InterPodAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_InterPodAffinityArgs_To_v1beta3_InterPodAffinityArgs(a.(*config.InterPodAffinityArgs), b.(*v1beta3.InterPodAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.KubeSchedulerProfile)(nil), (*config.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_KubeSchedulerProfile_To_config_KubeSchedulerProfile(a.(*v1beta3.KubeSchedulerProfile), b.(*config.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.NodeAffinityArgs)(nil), (*config.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs(a.(*v1beta3.NodeAffinityArgs), b.(*config.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeAffinityArgs)(nil), (*v1beta3.NodeAffinityArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeAffinityArgs_To_v1beta3_NodeAffinityArgs(a.(*config.NodeAffinityArgs), b.(*v1beta3.NodeAffinityArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.NodeResourcesBalancedAllocationArgs)(nil), (*config.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(a.(*v1beta3.NodeResourcesBalancedAllocationArgs), b.(*config.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesBalancedAllocationArgs)(nil), (*v1beta3.NodeResourcesBalancedAllocationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta3_NodeResourcesBalancedAllocationArgs(a.(*config.NodeResourcesBalancedAllocationArgs), b.(*v1beta3.NodeResourcesBalancedAllocationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.NodeResourcesFitArgs)(nil), (*config.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(a.(*v1beta3.NodeResourcesFitArgs), b.(*config.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesFitArgs)(nil), (*v1beta3.NodeResourcesFitArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesFitArgs_To_v1beta3_NodeResourcesFitArgs(a.(*config.NodeResourcesFitArgs), b.(*v1beta3.NodeResourcesFitArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.Plugin)(nil), (*config.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_Plugin_To_config_Plugin(a.(*v1beta3.Plugin), b.(*config.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.Plugin)(nil), (*v1beta3.Plugin)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugin_To_v1beta3_Plugin(a.(*config.Plugin), b.(*v1beta3.Plugin), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.PluginConfig)(nil), (*config.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_PluginConfig_To_config_PluginConfig(a.(*v1beta3.PluginConfig), b.(*config.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginConfig)(nil), (*v1beta3.PluginConfig)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginConfig_To_v1beta3_PluginConfig(a.(*config.PluginConfig), b.(*v1beta3.PluginConfig), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.PluginSet)(nil), (*config.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_PluginSet_To_config_PluginSet(a.(*v1beta3.PluginSet), b.(*config.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PluginSet)(nil), (*v1beta3.PluginSet)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PluginSet_To_v1beta3_PluginSet(a.(*config.PluginSet), b.(*v1beta3.PluginSet), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.Plugins)(nil), (*config.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_Plugins_To_config_Plugins(a.(*v1beta3.Plugins), b.(*config.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.PodTopologySpreadArgs)(nil), (*config.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(a.(*v1beta3.PodTopologySpreadArgs), b.(*config.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PodTopologySpreadArgs)(nil), (*v1beta3.PodTopologySpreadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PodTopologySpreadArgs_To_v1beta3_PodTopologySpreadArgs(a.(*config.PodTopologySpreadArgs), b.(*v1beta3.PodTopologySpreadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.RequestedToCapacityRatioParam)(nil), (*config.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(a.(*v1beta3.RequestedToCapacityRatioParam), b.(*config.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.RequestedToCapacityRatioParam)(nil), (*v1beta3.RequestedToCapacityRatioParam)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_RequestedToCapacityRatioParam_To_v1beta3_RequestedToCapacityRatioParam(a.(*config.RequestedToCapacityRatioParam), b.(*v1beta3.RequestedToCapacityRatioParam), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.ResourceSpec)(nil), (*config.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_ResourceSpec_To_config_ResourceSpec(a.(*v1beta3.ResourceSpec), b.(*config.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ResourceSpec)(nil), (*v1beta3.ResourceSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ResourceSpec_To_v1beta3_ResourceSpec(a.(*config.ResourceSpec), b.(*v1beta3.ResourceSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(a.(*v1beta3.ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*v1beta3.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(a.(*config.ScoringStrategy), b.(*v1beta3.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.UtilizationShapePoint)(nil), (*config.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_UtilizationShapePoint_To_config_UtilizationShapePoint(a.(*v1beta3.UtilizationShapePoint), b.(*config.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.UtilizationShapePoint)(nil), (*v1beta3.UtilizationShapePoint)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_UtilizationShapePoint_To_v1beta3_UtilizationShapePoint(a.(*config.UtilizationShapePoint), b.(*v1beta3.UtilizationShapePoint), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*v1beta3.VolumeBindingArgs)(nil), (*config.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_VolumeBindingArgs_To_config_VolumeBindingArgs(a.(*v1beta3.VolumeBindingArgs), b.(*config.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.VolumeBindingArgs)(nil), (*v1beta3.VolumeBindingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_VolumeBindingArgs_To_v1beta3_VolumeBindingArgs(a.(*config.VolumeBindingArgs), b.(*v1beta3.VolumeBindingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerConfiguration)(nil), (*v1beta3.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerConfiguration_To_v1beta3_KubeSchedulerConfiguration(a.(*config.KubeSchedulerConfiguration), b.(*v1beta3.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.KubeSchedulerProfile)(nil), (*v1beta3.KubeSchedulerProfile)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(a.(*config.KubeSchedulerProfile), b.(*v1beta3.KubeSchedulerProfile), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.Plugins)(nil), (*v1beta3.Plugins)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_Plugins_To_v1beta3_Plugins(a.(*config.Plugins), b.(*v1beta3.Plugins), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*v1beta3.KubeSchedulerConfiguration)(nil), (*config.KubeSchedulerConfiguration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(a.(*v1beta3.KubeSchedulerConfiguration), b.(*config.KubeSchedulerConfiguration), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta3_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1beta3.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_v1beta3_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in *v1beta3.DefaultPreemptionArgs, out *config.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_v1beta3_DefaultPreemptionArgs_To_config_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_config_DefaultPreemptionArgs_To_v1beta3_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1beta3.DefaultPreemptionArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_DefaultPreemptionArgs_To_v1beta3_DefaultPreemptionArgs is an autogenerated conversion function. -func Convert_config_DefaultPreemptionArgs_To_v1beta3_DefaultPreemptionArgs(in *config.DefaultPreemptionArgs, out *v1beta3.DefaultPreemptionArgs, s conversion.Scope) error { - return autoConvert_config_DefaultPreemptionArgs_To_v1beta3_DefaultPreemptionArgs(in, out, s) -} - -func autoConvert_v1beta3_Extender_To_config_Extender(in *v1beta3.Extender, out *config.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*config.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]config.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_v1beta3_Extender_To_config_Extender is an autogenerated conversion function. -func Convert_v1beta3_Extender_To_config_Extender(in *v1beta3.Extender, out *config.Extender, s conversion.Scope) error { - return autoConvert_v1beta3_Extender_To_config_Extender(in, out, s) -} - -func autoConvert_config_Extender_To_v1beta3_Extender(in *config.Extender, out *v1beta3.Extender, s conversion.Scope) error { - out.URLPrefix = in.URLPrefix - out.FilterVerb = in.FilterVerb - out.PreemptVerb = in.PreemptVerb - out.PrioritizeVerb = in.PrioritizeVerb - out.Weight = in.Weight - out.BindVerb = in.BindVerb - out.EnableHTTPS = in.EnableHTTPS - out.TLSConfig = (*v1beta3.ExtenderTLSConfig)(unsafe.Pointer(in.TLSConfig)) - out.HTTPTimeout = in.HTTPTimeout - out.NodeCacheCapable = in.NodeCacheCapable - out.ManagedResources = *(*[]v1beta3.ExtenderManagedResource)(unsafe.Pointer(&in.ManagedResources)) - out.Ignorable = in.Ignorable - return nil -} - -// Convert_config_Extender_To_v1beta3_Extender is an autogenerated conversion function. -func Convert_config_Extender_To_v1beta3_Extender(in *config.Extender, out *v1beta3.Extender, s conversion.Scope) error { - return autoConvert_config_Extender_To_v1beta3_Extender(in, out, s) -} - -func autoConvert_v1beta3_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1beta3.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_v1beta3_ExtenderManagedResource_To_config_ExtenderManagedResource is an autogenerated conversion function. -func Convert_v1beta3_ExtenderManagedResource_To_config_ExtenderManagedResource(in *v1beta3.ExtenderManagedResource, out *config.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_v1beta3_ExtenderManagedResource_To_config_ExtenderManagedResource(in, out, s) -} - -func autoConvert_config_ExtenderManagedResource_To_v1beta3_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1beta3.ExtenderManagedResource, s conversion.Scope) error { - out.Name = in.Name - out.IgnoredByScheduler = in.IgnoredByScheduler - return nil -} - -// Convert_config_ExtenderManagedResource_To_v1beta3_ExtenderManagedResource is an autogenerated conversion function. -func Convert_config_ExtenderManagedResource_To_v1beta3_ExtenderManagedResource(in *config.ExtenderManagedResource, out *v1beta3.ExtenderManagedResource, s conversion.Scope) error { - return autoConvert_config_ExtenderManagedResource_To_v1beta3_ExtenderManagedResource(in, out, s) -} - -func autoConvert_v1beta3_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1beta3.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_v1beta3_ExtenderTLSConfig_To_config_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_v1beta3_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in *v1beta3.ExtenderTLSConfig, out *config.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_v1beta3_ExtenderTLSConfig_To_config_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_config_ExtenderTLSConfig_To_v1beta3_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1beta3.ExtenderTLSConfig, s conversion.Scope) error { - out.Insecure = in.Insecure - out.ServerName = in.ServerName - out.CertFile = in.CertFile - out.KeyFile = in.KeyFile - out.CAFile = in.CAFile - out.CertData = *(*[]byte)(unsafe.Pointer(&in.CertData)) - out.KeyData = *(*[]byte)(unsafe.Pointer(&in.KeyData)) - out.CAData = *(*[]byte)(unsafe.Pointer(&in.CAData)) - return nil -} - -// Convert_config_ExtenderTLSConfig_To_v1beta3_ExtenderTLSConfig is an autogenerated conversion function. -func Convert_config_ExtenderTLSConfig_To_v1beta3_ExtenderTLSConfig(in *config.ExtenderTLSConfig, out *v1beta3.ExtenderTLSConfig, s conversion.Scope) error { - return autoConvert_config_ExtenderTLSConfig_To_v1beta3_ExtenderTLSConfig(in, out, s) -} - -func autoConvert_v1beta3_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1beta3.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_InterPodAffinityArgs_To_config_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_v1beta3_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in *v1beta3.InterPodAffinityArgs, out *config.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_v1beta3_InterPodAffinityArgs_To_config_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_config_InterPodAffinityArgs_To_v1beta3_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1beta3.InterPodAffinityArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.HardPodAffinityWeight, &out.HardPodAffinityWeight, s); err != nil { - return err - } - return nil -} - -// Convert_config_InterPodAffinityArgs_To_v1beta3_InterPodAffinityArgs is an autogenerated conversion function. -func Convert_config_InterPodAffinityArgs_To_v1beta3_InterPodAffinityArgs(in *config.InterPodAffinityArgs, out *v1beta3.InterPodAffinityArgs, s conversion.Scope) error { - return autoConvert_config_InterPodAffinityArgs_To_v1beta3_InterPodAffinityArgs(in, out, s) -} - -func autoConvert_v1beta3_KubeSchedulerConfiguration_To_config_KubeSchedulerConfiguration(in *v1beta3.KubeSchedulerConfiguration, out *config.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_LeaderElectionConfiguration_To_config_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_ClientConnectionConfiguration_To_config_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - if err := v1alpha1.Convert_v1alpha1_DebuggingConfiguration_To_config_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := v1.Convert_Pointer_int64_To_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int64_To_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]config.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_v1beta3_KubeSchedulerProfile_To_config_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]config.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_config_KubeSchedulerConfiguration_To_v1beta3_KubeSchedulerConfiguration(in *config.KubeSchedulerConfiguration, out *v1beta3.KubeSchedulerConfiguration, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.Parallelism, &out.Parallelism, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_LeaderElectionConfiguration_To_v1alpha1_LeaderElectionConfiguration(&in.LeaderElection, &out.LeaderElection, s); err != nil { - return err - } - if err := v1alpha1.Convert_config_ClientConnectionConfiguration_To_v1alpha1_ClientConnectionConfiguration(&in.ClientConnection, &out.ClientConnection, s); err != nil { - return err - } - // WARNING: in.HealthzBindAddress requires manual conversion: does not exist in peer-type - // WARNING: in.MetricsBindAddress requires manual conversion: does not exist in peer-type - if err := v1alpha1.Convert_config_DebuggingConfiguration_To_v1alpha1_DebuggingConfiguration(&in.DebuggingConfiguration, &out.DebuggingConfiguration, s); err != nil { - return err - } - out.PercentageOfNodesToScore = (*int32)(unsafe.Pointer(in.PercentageOfNodesToScore)) - if err := v1.Convert_int64_To_Pointer_int64(&in.PodInitialBackoffSeconds, &out.PodInitialBackoffSeconds, s); err != nil { - return err - } - if err := v1.Convert_int64_To_Pointer_int64(&in.PodMaxBackoffSeconds, &out.PodMaxBackoffSeconds, s); err != nil { - return err - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]v1beta3.KubeSchedulerProfile, len(*in)) - for i := range *in { - if err := Convert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Profiles = nil - } - out.Extenders = *(*[]v1beta3.Extender)(unsafe.Pointer(&in.Extenders)) - return nil -} - -func autoConvert_v1beta3_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1beta3.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - if err := v1.Convert_Pointer_string_To_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(config.Plugins) - if err := Convert_v1beta3_Plugins_To_config_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]config.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_v1beta3_PluginConfig_To_config_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -// Convert_v1beta3_KubeSchedulerProfile_To_config_KubeSchedulerProfile is an autogenerated conversion function. -func Convert_v1beta3_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in *v1beta3.KubeSchedulerProfile, out *config.KubeSchedulerProfile, s conversion.Scope) error { - return autoConvert_v1beta3_KubeSchedulerProfile_To_config_KubeSchedulerProfile(in, out, s) -} - -func autoConvert_config_KubeSchedulerProfile_To_v1beta3_KubeSchedulerProfile(in *config.KubeSchedulerProfile, out *v1beta3.KubeSchedulerProfile, s conversion.Scope) error { - if err := v1.Convert_string_To_Pointer_string(&in.SchedulerName, &out.SchedulerName, s); err != nil { - return err - } - // WARNING: in.PercentageOfNodesToScore requires manual conversion: does not exist in peer-type - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(v1beta3.Plugins) - if err := Convert_config_Plugins_To_v1beta3_Plugins(*in, *out, s); err != nil { - return err - } - } else { - out.Plugins = nil - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]v1beta3.PluginConfig, len(*in)) - for i := range *in { - if err := Convert_config_PluginConfig_To_v1beta3_PluginConfig(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.PluginConfig = nil - } - return nil -} - -func autoConvert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta3.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs is an autogenerated conversion function. -func Convert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs(in *v1beta3.NodeAffinityArgs, out *config.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_v1beta3_NodeAffinityArgs_To_config_NodeAffinityArgs(in, out, s) -} - -func autoConvert_config_NodeAffinityArgs_To_v1beta3_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1beta3.NodeAffinityArgs, s conversion.Scope) error { - out.AddedAffinity = (*corev1.NodeAffinity)(unsafe.Pointer(in.AddedAffinity)) - return nil -} - -// Convert_config_NodeAffinityArgs_To_v1beta3_NodeAffinityArgs is an autogenerated conversion function. -func Convert_config_NodeAffinityArgs_To_v1beta3_NodeAffinityArgs(in *config.NodeAffinityArgs, out *v1beta3.NodeAffinityArgs, s conversion.Scope) error { - return autoConvert_config_NodeAffinityArgs_To_v1beta3_NodeAffinityArgs(in, out, s) -} - -func autoConvert_v1beta3_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1beta3.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1beta3_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_v1beta3_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in *v1beta3.NodeResourcesBalancedAllocationArgs, out *config.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_v1beta3_NodeResourcesBalancedAllocationArgs_To_config_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1beta3_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1beta3.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - out.Resources = *(*[]v1beta3.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta3_NodeResourcesBalancedAllocationArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesBalancedAllocationArgs_To_v1beta3_NodeResourcesBalancedAllocationArgs(in *config.NodeResourcesBalancedAllocationArgs, out *v1beta3.NodeResourcesBalancedAllocationArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesBalancedAllocationArgs_To_v1beta3_NodeResourcesBalancedAllocationArgs(in, out, s) -} - -func autoConvert_v1beta3_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1beta3.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_v1beta3_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_v1beta3_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in *v1beta3.NodeResourcesFitArgs, out *config.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_v1beta3_NodeResourcesFitArgs_To_config_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesFitArgs_To_v1beta3_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1beta3.NodeResourcesFitArgs, s conversion.Scope) error { - out.IgnoredResources = *(*[]string)(unsafe.Pointer(&in.IgnoredResources)) - out.IgnoredResourceGroups = *(*[]string)(unsafe.Pointer(&in.IgnoredResourceGroups)) - out.ScoringStrategy = (*v1beta3.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -// Convert_config_NodeResourcesFitArgs_To_v1beta3_NodeResourcesFitArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesFitArgs_To_v1beta3_NodeResourcesFitArgs(in *config.NodeResourcesFitArgs, out *v1beta3.NodeResourcesFitArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesFitArgs_To_v1beta3_NodeResourcesFitArgs(in, out, s) -} - -func autoConvert_v1beta3_Plugin_To_config_Plugin(in *v1beta3.Plugin, out *config.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := v1.Convert_Pointer_int32_To_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_Plugin_To_config_Plugin is an autogenerated conversion function. -func Convert_v1beta3_Plugin_To_config_Plugin(in *v1beta3.Plugin, out *config.Plugin, s conversion.Scope) error { - return autoConvert_v1beta3_Plugin_To_config_Plugin(in, out, s) -} - -func autoConvert_config_Plugin_To_v1beta3_Plugin(in *config.Plugin, out *v1beta3.Plugin, s conversion.Scope) error { - out.Name = in.Name - if err := v1.Convert_int32_To_Pointer_int32(&in.Weight, &out.Weight, s); err != nil { - return err - } - return nil -} - -// Convert_config_Plugin_To_v1beta3_Plugin is an autogenerated conversion function. -func Convert_config_Plugin_To_v1beta3_Plugin(in *config.Plugin, out *v1beta3.Plugin, s conversion.Scope) error { - return autoConvert_config_Plugin_To_v1beta3_Plugin(in, out, s) -} - -func autoConvert_v1beta3_PluginConfig_To_config_PluginConfig(in *v1beta3.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_RawExtension_To_runtime_Object(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_PluginConfig_To_config_PluginConfig is an autogenerated conversion function. -func Convert_v1beta3_PluginConfig_To_config_PluginConfig(in *v1beta3.PluginConfig, out *config.PluginConfig, s conversion.Scope) error { - return autoConvert_v1beta3_PluginConfig_To_config_PluginConfig(in, out, s) -} - -func autoConvert_config_PluginConfig_To_v1beta3_PluginConfig(in *config.PluginConfig, out *v1beta3.PluginConfig, s conversion.Scope) error { - out.Name = in.Name - if err := runtime.Convert_runtime_Object_To_runtime_RawExtension(&in.Args, &out.Args, s); err != nil { - return err - } - return nil -} - -// Convert_config_PluginConfig_To_v1beta3_PluginConfig is an autogenerated conversion function. -func Convert_config_PluginConfig_To_v1beta3_PluginConfig(in *config.PluginConfig, out *v1beta3.PluginConfig, s conversion.Scope) error { - return autoConvert_config_PluginConfig_To_v1beta3_PluginConfig(in, out, s) -} - -func autoConvert_v1beta3_PluginSet_To_config_PluginSet(in *v1beta3.PluginSet, out *config.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1beta3_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]config.Plugin, len(*in)) - for i := range *in { - if err := Convert_v1beta3_Plugin_To_config_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_v1beta3_PluginSet_To_config_PluginSet is an autogenerated conversion function. -func Convert_v1beta3_PluginSet_To_config_PluginSet(in *v1beta3.PluginSet, out *config.PluginSet, s conversion.Scope) error { - return autoConvert_v1beta3_PluginSet_To_config_PluginSet(in, out, s) -} - -func autoConvert_config_PluginSet_To_v1beta3_PluginSet(in *config.PluginSet, out *v1beta3.PluginSet, s conversion.Scope) error { - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]v1beta3.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1beta3_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Enabled = nil - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]v1beta3.Plugin, len(*in)) - for i := range *in { - if err := Convert_config_Plugin_To_v1beta3_Plugin(&(*in)[i], &(*out)[i], s); err != nil { - return err - } - } - } else { - out.Disabled = nil - } - return nil -} - -// Convert_config_PluginSet_To_v1beta3_PluginSet is an autogenerated conversion function. -func Convert_config_PluginSet_To_v1beta3_PluginSet(in *config.PluginSet, out *v1beta3.PluginSet, s conversion.Scope) error { - return autoConvert_config_PluginSet_To_v1beta3_PluginSet(in, out, s) -} - -func autoConvert_v1beta3_Plugins_To_config_Plugins(in *v1beta3.Plugins, out *config.Plugins, s conversion.Scope) error { - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_v1beta3_PluginSet_To_config_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_Plugins_To_config_Plugins is an autogenerated conversion function. -func Convert_v1beta3_Plugins_To_config_Plugins(in *v1beta3.Plugins, out *config.Plugins, s conversion.Scope) error { - return autoConvert_v1beta3_Plugins_To_config_Plugins(in, out, s) -} - -func autoConvert_config_Plugins_To_v1beta3_Plugins(in *config.Plugins, out *v1beta3.Plugins, s conversion.Scope) error { - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.PreEnqueue, &out.PreEnqueue, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.QueueSort, &out.QueueSort, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.PreFilter, &out.PreFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.Filter, &out.Filter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.PostFilter, &out.PostFilter, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.PreScore, &out.PreScore, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.Score, &out.Score, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.Reserve, &out.Reserve, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.Permit, &out.Permit, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.PreBind, &out.PreBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.Bind, &out.Bind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.PostBind, &out.PostBind, s); err != nil { - return err - } - if err := Convert_config_PluginSet_To_v1beta3_PluginSet(&in.MultiPoint, &out.MultiPoint, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta3_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1beta3.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = config.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_v1beta3_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_v1beta3_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in *v1beta3.PodTopologySpreadArgs, out *config.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_v1beta3_PodTopologySpreadArgs_To_config_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_config_PodTopologySpreadArgs_To_v1beta3_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1beta3.PodTopologySpreadArgs, s conversion.Scope) error { - out.DefaultConstraints = *(*[]corev1.TopologySpreadConstraint)(unsafe.Pointer(&in.DefaultConstraints)) - out.DefaultingType = v1beta3.PodTopologySpreadConstraintsDefaulting(in.DefaultingType) - return nil -} - -// Convert_config_PodTopologySpreadArgs_To_v1beta3_PodTopologySpreadArgs is an autogenerated conversion function. -func Convert_config_PodTopologySpreadArgs_To_v1beta3_PodTopologySpreadArgs(in *config.PodTopologySpreadArgs, out *v1beta3.PodTopologySpreadArgs, s conversion.Scope) error { - return autoConvert_config_PodTopologySpreadArgs_To_v1beta3_PodTopologySpreadArgs(in, out, s) -} - -func autoConvert_v1beta3_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1beta3.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1beta3_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_v1beta3_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in *v1beta3.RequestedToCapacityRatioParam, out *config.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_v1beta3_RequestedToCapacityRatioParam_To_config_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_config_RequestedToCapacityRatioParam_To_v1beta3_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1beta3.RequestedToCapacityRatioParam, s conversion.Scope) error { - out.Shape = *(*[]v1beta3.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_RequestedToCapacityRatioParam_To_v1beta3_RequestedToCapacityRatioParam is an autogenerated conversion function. -func Convert_config_RequestedToCapacityRatioParam_To_v1beta3_RequestedToCapacityRatioParam(in *config.RequestedToCapacityRatioParam, out *v1beta3.RequestedToCapacityRatioParam, s conversion.Scope) error { - return autoConvert_config_RequestedToCapacityRatioParam_To_v1beta3_RequestedToCapacityRatioParam(in, out, s) -} - -func autoConvert_v1beta3_ResourceSpec_To_config_ResourceSpec(in *v1beta3.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_v1beta3_ResourceSpec_To_config_ResourceSpec is an autogenerated conversion function. -func Convert_v1beta3_ResourceSpec_To_config_ResourceSpec(in *v1beta3.ResourceSpec, out *config.ResourceSpec, s conversion.Scope) error { - return autoConvert_v1beta3_ResourceSpec_To_config_ResourceSpec(in, out, s) -} - -func autoConvert_config_ResourceSpec_To_v1beta3_ResourceSpec(in *config.ResourceSpec, out *v1beta3.ResourceSpec, s conversion.Scope) error { - out.Name = in.Name - out.Weight = in.Weight - return nil -} - -// Convert_config_ResourceSpec_To_v1beta3_ResourceSpec is an autogenerated conversion function. -func Convert_config_ResourceSpec_To_v1beta3_ResourceSpec(in *config.ResourceSpec, out *v1beta3.ResourceSpec, s conversion.Scope) error { - return autoConvert_config_ResourceSpec_To_v1beta3_ResourceSpec(in, out, s) -} - -func autoConvert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in *v1beta3.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]config.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*config.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in *v1beta3.ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in *config.ScoringStrategy, out *v1beta3.ScoringStrategy, s conversion.Scope) error { - out.Type = v1beta3.ScoringStrategyType(in.Type) - out.Resources = *(*[]v1beta3.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.RequestedToCapacityRatio = (*v1beta3.RequestedToCapacityRatioParam)(unsafe.Pointer(in.RequestedToCapacityRatio)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in *config.ScoringStrategy, out *v1beta3.ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in, out, s) -} - -func autoConvert_v1beta3_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1beta3.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_v1beta3_UtilizationShapePoint_To_config_UtilizationShapePoint is an autogenerated conversion function. -func Convert_v1beta3_UtilizationShapePoint_To_config_UtilizationShapePoint(in *v1beta3.UtilizationShapePoint, out *config.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_v1beta3_UtilizationShapePoint_To_config_UtilizationShapePoint(in, out, s) -} - -func autoConvert_config_UtilizationShapePoint_To_v1beta3_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1beta3.UtilizationShapePoint, s conversion.Scope) error { - out.Utilization = in.Utilization - out.Score = in.Score - return nil -} - -// Convert_config_UtilizationShapePoint_To_v1beta3_UtilizationShapePoint is an autogenerated conversion function. -func Convert_config_UtilizationShapePoint_To_v1beta3_UtilizationShapePoint(in *config.UtilizationShapePoint, out *v1beta3.UtilizationShapePoint, s conversion.Scope) error { - return autoConvert_config_UtilizationShapePoint_To_v1beta3_UtilizationShapePoint(in, out, s) -} - -func autoConvert_v1beta3_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1beta3.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int64_To_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]config.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_v1beta3_VolumeBindingArgs_To_config_VolumeBindingArgs is an autogenerated conversion function. -func Convert_v1beta3_VolumeBindingArgs_To_config_VolumeBindingArgs(in *v1beta3.VolumeBindingArgs, out *config.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_v1beta3_VolumeBindingArgs_To_config_VolumeBindingArgs(in, out, s) -} - -func autoConvert_config_VolumeBindingArgs_To_v1beta3_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1beta3.VolumeBindingArgs, s conversion.Scope) error { - if err := v1.Convert_int64_To_Pointer_int64(&in.BindTimeoutSeconds, &out.BindTimeoutSeconds, s); err != nil { - return err - } - out.Shape = *(*[]v1beta3.UtilizationShapePoint)(unsafe.Pointer(&in.Shape)) - return nil -} - -// Convert_config_VolumeBindingArgs_To_v1beta3_VolumeBindingArgs is an autogenerated conversion function. -func Convert_config_VolumeBindingArgs_To_v1beta3_VolumeBindingArgs(in *config.VolumeBindingArgs, out *v1beta3.VolumeBindingArgs, s conversion.Scope) error { - return autoConvert_config_VolumeBindingArgs_To_v1beta3_VolumeBindingArgs(in, out, s) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.deepcopy.go deleted file mode 100644 index e735e37db..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.deepcopy.go +++ /dev/null @@ -1,22 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta3 diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.defaults.go deleted file mode 100644 index 3c28679f6..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3/zz_generated.defaults.go +++ /dev/null @@ -1,73 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta3 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" - v1beta3 "k8s.io/kube-scheduler/config/v1beta3" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&v1beta3.DefaultPreemptionArgs{}, func(obj interface{}) { SetObjectDefaults_DefaultPreemptionArgs(obj.(*v1beta3.DefaultPreemptionArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta3.InterPodAffinityArgs{}, func(obj interface{}) { SetObjectDefaults_InterPodAffinityArgs(obj.(*v1beta3.InterPodAffinityArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta3.KubeSchedulerConfiguration{}, func(obj interface{}) { - SetObjectDefaults_KubeSchedulerConfiguration(obj.(*v1beta3.KubeSchedulerConfiguration)) - }) - scheme.AddTypeDefaultingFunc(&v1beta3.NodeResourcesBalancedAllocationArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesBalancedAllocationArgs(obj.(*v1beta3.NodeResourcesBalancedAllocationArgs)) - }) - scheme.AddTypeDefaultingFunc(&v1beta3.NodeResourcesFitArgs{}, func(obj interface{}) { SetObjectDefaults_NodeResourcesFitArgs(obj.(*v1beta3.NodeResourcesFitArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta3.PodTopologySpreadArgs{}, func(obj interface{}) { SetObjectDefaults_PodTopologySpreadArgs(obj.(*v1beta3.PodTopologySpreadArgs)) }) - scheme.AddTypeDefaultingFunc(&v1beta3.VolumeBindingArgs{}, func(obj interface{}) { SetObjectDefaults_VolumeBindingArgs(obj.(*v1beta3.VolumeBindingArgs)) }) - return nil -} - -func SetObjectDefaults_DefaultPreemptionArgs(in *v1beta3.DefaultPreemptionArgs) { - SetDefaults_DefaultPreemptionArgs(in) -} - -func SetObjectDefaults_InterPodAffinityArgs(in *v1beta3.InterPodAffinityArgs) { - SetDefaults_InterPodAffinityArgs(in) -} - -func SetObjectDefaults_KubeSchedulerConfiguration(in *v1beta3.KubeSchedulerConfiguration) { - SetDefaults_KubeSchedulerConfiguration(in) -} - -func SetObjectDefaults_NodeResourcesBalancedAllocationArgs(in *v1beta3.NodeResourcesBalancedAllocationArgs) { - SetDefaults_NodeResourcesBalancedAllocationArgs(in) -} - -func SetObjectDefaults_NodeResourcesFitArgs(in *v1beta3.NodeResourcesFitArgs) { - SetDefaults_NodeResourcesFitArgs(in) -} - -func SetObjectDefaults_PodTopologySpreadArgs(in *v1beta3.PodTopologySpreadArgs) { - SetDefaults_PodTopologySpreadArgs(in) -} - -func SetObjectDefaults_VolumeBindingArgs(in *v1beta3.VolumeBindingArgs) { - SetDefaults_VolumeBindingArgs(in) -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go deleted file mode 100644 index f5baa6221..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/apis/config/zz_generated.deepcopy.go +++ /dev/null @@ -1,562 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package config - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DefaultPreemptionArgs) DeepCopyInto(out *DefaultPreemptionArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DefaultPreemptionArgs. -func (in *DefaultPreemptionArgs) DeepCopy() *DefaultPreemptionArgs { - if in == nil { - return nil - } - out := new(DefaultPreemptionArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DefaultPreemptionArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Extender) DeepCopyInto(out *Extender) { - *out = *in - if in.TLSConfig != nil { - in, out := &in.TLSConfig, &out.TLSConfig - *out = new(ExtenderTLSConfig) - (*in).DeepCopyInto(*out) - } - out.HTTPTimeout = in.HTTPTimeout - if in.ManagedResources != nil { - in, out := &in.ManagedResources, &out.ManagedResources - *out = make([]ExtenderManagedResource, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Extender. -func (in *Extender) DeepCopy() *Extender { - if in == nil { - return nil - } - out := new(Extender) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderManagedResource) DeepCopyInto(out *ExtenderManagedResource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderManagedResource. -func (in *ExtenderManagedResource) DeepCopy() *ExtenderManagedResource { - if in == nil { - return nil - } - out := new(ExtenderManagedResource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExtenderTLSConfig) DeepCopyInto(out *ExtenderTLSConfig) { - *out = *in - if in.CertData != nil { - in, out := &in.CertData, &out.CertData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.KeyData != nil { - in, out := &in.KeyData, &out.KeyData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.CAData != nil { - in, out := &in.CAData, &out.CAData - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtenderTLSConfig. -func (in *ExtenderTLSConfig) DeepCopy() *ExtenderTLSConfig { - if in == nil { - return nil - } - out := new(ExtenderTLSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InterPodAffinityArgs) DeepCopyInto(out *InterPodAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InterPodAffinityArgs. -func (in *InterPodAffinityArgs) DeepCopy() *InterPodAffinityArgs { - if in == nil { - return nil - } - out := new(InterPodAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InterPodAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerConfiguration) DeepCopyInto(out *KubeSchedulerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - out.LeaderElection = in.LeaderElection - out.ClientConnection = in.ClientConnection - out.DebuggingConfiguration = in.DebuggingConfiguration - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.Profiles != nil { - in, out := &in.Profiles, &out.Profiles - *out = make([]KubeSchedulerProfile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Extenders != nil { - in, out := &in.Extenders, &out.Extenders - *out = make([]Extender, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerConfiguration. -func (in *KubeSchedulerConfiguration) DeepCopy() *KubeSchedulerConfiguration { - if in == nil { - return nil - } - out := new(KubeSchedulerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *KubeSchedulerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KubeSchedulerProfile) DeepCopyInto(out *KubeSchedulerProfile) { - *out = *in - if in.PercentageOfNodesToScore != nil { - in, out := &in.PercentageOfNodesToScore, &out.PercentageOfNodesToScore - *out = new(int32) - **out = **in - } - if in.Plugins != nil { - in, out := &in.Plugins, &out.Plugins - *out = new(Plugins) - (*in).DeepCopyInto(*out) - } - if in.PluginConfig != nil { - in, out := &in.PluginConfig, &out.PluginConfig - *out = make([]PluginConfig, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KubeSchedulerProfile. -func (in *KubeSchedulerProfile) DeepCopy() *KubeSchedulerProfile { - if in == nil { - return nil - } - out := new(KubeSchedulerProfile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinityArgs) DeepCopyInto(out *NodeAffinityArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.AddedAffinity != nil { - in, out := &in.AddedAffinity, &out.AddedAffinity - *out = new(v1.NodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinityArgs. -func (in *NodeAffinityArgs) DeepCopy() *NodeAffinityArgs { - if in == nil { - return nil - } - out := new(NodeAffinityArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeAffinityArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyInto(out *NodeResourcesBalancedAllocationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesBalancedAllocationArgs. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopy() *NodeResourcesBalancedAllocationArgs { - if in == nil { - return nil - } - out := new(NodeResourcesBalancedAllocationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesBalancedAllocationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesFitArgs) DeepCopyInto(out *NodeResourcesFitArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.IgnoredResources != nil { - in, out := &in.IgnoredResources, &out.IgnoredResources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.IgnoredResourceGroups != nil { - in, out := &in.IgnoredResourceGroups, &out.IgnoredResourceGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesFitArgs. -func (in *NodeResourcesFitArgs) DeepCopy() *NodeResourcesFitArgs { - if in == nil { - return nil - } - out := new(NodeResourcesFitArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesFitArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugin) DeepCopyInto(out *Plugin) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugin. -func (in *Plugin) DeepCopy() *Plugin { - if in == nil { - return nil - } - out := new(Plugin) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginConfig) DeepCopyInto(out *PluginConfig) { - *out = *in - if in.Args != nil { - out.Args = in.Args.DeepCopyObject() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginConfig. -func (in *PluginConfig) DeepCopy() *PluginConfig { - if in == nil { - return nil - } - out := new(PluginConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PluginSet) DeepCopyInto(out *PluginSet) { - *out = *in - if in.Enabled != nil { - in, out := &in.Enabled, &out.Enabled - *out = make([]Plugin, len(*in)) - copy(*out, *in) - } - if in.Disabled != nil { - in, out := &in.Disabled, &out.Disabled - *out = make([]Plugin, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PluginSet. -func (in *PluginSet) DeepCopy() *PluginSet { - if in == nil { - return nil - } - out := new(PluginSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Plugins) DeepCopyInto(out *Plugins) { - *out = *in - in.PreEnqueue.DeepCopyInto(&out.PreEnqueue) - in.QueueSort.DeepCopyInto(&out.QueueSort) - in.PreFilter.DeepCopyInto(&out.PreFilter) - in.Filter.DeepCopyInto(&out.Filter) - in.PostFilter.DeepCopyInto(&out.PostFilter) - in.PreScore.DeepCopyInto(&out.PreScore) - in.Score.DeepCopyInto(&out.Score) - in.Reserve.DeepCopyInto(&out.Reserve) - in.Permit.DeepCopyInto(&out.Permit) - in.PreBind.DeepCopyInto(&out.PreBind) - in.Bind.DeepCopyInto(&out.Bind) - in.PostBind.DeepCopyInto(&out.PostBind) - in.MultiPoint.DeepCopyInto(&out.MultiPoint) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Plugins. -func (in *Plugins) DeepCopy() *Plugins { - if in == nil { - return nil - } - out := new(Plugins) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTopologySpreadArgs) DeepCopyInto(out *PodTopologySpreadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultConstraints != nil { - in, out := &in.DefaultConstraints, &out.DefaultConstraints - *out = make([]v1.TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTopologySpreadArgs. -func (in *PodTopologySpreadArgs) DeepCopy() *PodTopologySpreadArgs { - if in == nil { - return nil - } - out := new(PodTopologySpreadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTopologySpreadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RequestedToCapacityRatioParam) DeepCopyInto(out *RequestedToCapacityRatioParam) { - *out = *in - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestedToCapacityRatioParam. -func (in *RequestedToCapacityRatioParam) DeepCopy() *RequestedToCapacityRatioParam { - if in == nil { - return nil - } - out := new(RequestedToCapacityRatioParam) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec. -func (in *ResourceSpec) DeepCopy() *ResourceSpec { - if in == nil { - return nil - } - out := new(ResourceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]ResourceSpec, len(*in)) - copy(*out, *in) - } - if in.RequestedToCapacityRatio != nil { - in, out := &in.RequestedToCapacityRatio, &out.RequestedToCapacityRatio - *out = new(RequestedToCapacityRatioParam) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UtilizationShapePoint) DeepCopyInto(out *UtilizationShapePoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UtilizationShapePoint. -func (in *UtilizationShapePoint) DeepCopy() *UtilizationShapePoint { - if in == nil { - return nil - } - out := new(UtilizationShapePoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeBindingArgs) DeepCopyInto(out *VolumeBindingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Shape != nil { - in, out := &in.Shape, &out.Shape - *out = make([]UtilizationShapePoint, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeBindingArgs. -func (in *VolumeBindingArgs) DeepCopy() *VolumeBindingArgs { - if in == nil { - return nil - } - out := new(VolumeBindingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeBindingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/names/names.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/names/names.go deleted file mode 100644 index 3741f0a10..000000000 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/names/names.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package names - -const ( - PrioritySort = "PrioritySort" - DefaultBinder = "DefaultBinder" - DefaultPreemption = "DefaultPreemption" - DynamicResources = "DynamicResources" - ImageLocality = "ImageLocality" - InterPodAffinity = "InterPodAffinity" - NodeAffinity = "NodeAffinity" - NodeName = "NodeName" - NodePorts = "NodePorts" - NodeResourcesBalancedAllocation = "NodeResourcesBalancedAllocation" - NodeResourcesFit = "NodeResourcesFit" - NodeUnschedulable = "NodeUnschedulable" - NodeVolumeLimits = "NodeVolumeLimits" - AzureDiskLimits = "AzureDiskLimits" - CinderLimits = "CinderLimits" - EBSLimits = "EBSLimits" - GCEPDLimits = "GCEPDLimits" - PodTopologySpread = "PodTopologySpread" - SchedulingGates = "SchedulingGates" - SelectorSpread = "SelectorSpread" - TaintToleration = "TaintToleration" - VolumeBinding = "VolumeBinding" - VolumeRestrictions = "VolumeRestrictions" - VolumeZone = "VolumeZone" -) diff --git a/vendor/modules.txt b/vendor/modules.txt index 8b3e23603..8673951f8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -86,6 +86,9 @@ github.com/go-openapi/jsonreference/internal # github.com/go-openapi/swag v0.22.3 ## explicit; go 1.18 github.com/go-openapi/swag +# github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 +## explicit; go 1.13 +github.com/go-task/slim-sprig # github.com/gogo/protobuf v1.3.2 => github.com/gogo/protobuf v1.3.2 ## explicit; go 1.15 github.com/gogo/protobuf/gogoproto @@ -127,6 +130,9 @@ github.com/google/go-cmp/cmp/internal/value ## explicit; go 1.12 github.com/google/gofuzz github.com/google/gofuzz/bytesource +# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 +## explicit; go 1.14 +github.com/google/pprof/profile # github.com/google/uuid v1.3.0 ## explicit github.com/google/uuid @@ -168,7 +174,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/k8stopologyawareschedwg/deployer v0.12.1-0.20230322120411-111a4d4522b1 +# github.com/k8stopologyawareschedwg/deployer v0.12.2 ## explicit; go 1.19 github.com/k8stopologyawareschedwg/deployer/pkg/assets/rte github.com/k8stopologyawareschedwg/deployer/pkg/clientutil @@ -260,11 +266,21 @@ github.com/modern-go/reflect2 # github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 ## explicit github.com/munnerz/goautoneg -# github.com/onsi/ginkgo/v2 v2.9.5 => github.com/onsi/ginkgo/v2 v2.4.0 +# github.com/onsi/ginkgo/v2 v2.9.5 ## explicit; go 1.18 github.com/onsi/ginkgo/v2 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter +github.com/onsi/ginkgo/v2/ginkgo +github.com/onsi/ginkgo/v2/ginkgo/build +github.com/onsi/ginkgo/v2/ginkgo/command +github.com/onsi/ginkgo/v2/ginkgo/generators +github.com/onsi/ginkgo/v2/ginkgo/internal +github.com/onsi/ginkgo/v2/ginkgo/labels +github.com/onsi/ginkgo/v2/ginkgo/outline +github.com/onsi/ginkgo/v2/ginkgo/run +github.com/onsi/ginkgo/v2/ginkgo/unfocus +github.com/onsi/ginkgo/v2/ginkgo/watch github.com/onsi/ginkgo/v2/internal github.com/onsi/ginkgo/v2/internal/global github.com/onsi/ginkgo/v2/internal/interrupt_handler @@ -272,7 +288,7 @@ github.com/onsi/ginkgo/v2/internal/parallel_support github.com/onsi/ginkgo/v2/internal/testingtproxy github.com/onsi/ginkgo/v2/reporters github.com/onsi/ginkgo/v2/types -# github.com/onsi/gomega v1.27.7 => github.com/onsi/gomega v1.23.0 +# github.com/onsi/gomega v1.27.7 ## explicit; go 1.18 github.com/onsi/gomega github.com/onsi/gomega/format @@ -296,7 +312,7 @@ github.com/openshift/client-go/config/applyconfigurations/config/v1 github.com/openshift/client-go/config/applyconfigurations/internal github.com/openshift/client-go/config/clientset/versioned/scheme github.com/openshift/client-go/config/clientset/versioned/typed/config/v1 -# github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e => github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e +# github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e ## explicit; go 1.18 github.com/openshift/machine-config-operator/pkg/apis/machineconfiguration.openshift.io/v1 # github.com/pkg/errors v0.9.1 @@ -352,7 +368,7 @@ go.uber.org/ratelimit golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.12.0 => golang.org/x/net v0.17.0 +# golang.org/x/net v0.17.0 ## explicit; go 1.17 golang.org/x/net/context golang.org/x/net/html @@ -370,7 +386,7 @@ golang.org/x/net/trace ## explicit; go 1.17 golang.org/x/oauth2 golang.org/x/oauth2/internal -# golang.org/x/sys v0.13.0 => golang.org/x/sys v0.13.0 +# golang.org/x/sys v0.13.0 ## explicit; go 1.17 golang.org/x/sys/execabs golang.org/x/sys/plan9 @@ -407,6 +423,7 @@ golang.org/x/time/rate # golang.org/x/tools v0.10.0 ## explicit; go 1.18 golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/ast/inspector golang.org/x/tools/imports golang.org/x/tools/internal/event golang.org/x/tools/internal/event/core @@ -534,7 +551,7 @@ gopkg.in/yaml.v3 # howett.net/plist v1.0.0 ## explicit; go 1.12 howett.net/plist -# k8s.io/api v0.27.2 => k8s.io/api v0.26.7 +# k8s.io/api v0.27.2 => k8s.io/api v0.26.10 ## explicit; go 1.19 k8s.io/api/admission/v1 k8s.io/api/admission/v1beta1 @@ -589,7 +606,7 @@ k8s.io/api/scheduling/v1beta1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apiextensions-apiserver v0.27.2 => k8s.io/apiextensions-apiserver v0.26.7 +# k8s.io/apiextensions-apiserver v0.27.2 => k8s.io/apiextensions-apiserver v0.26.10 ## explicit; go 1.19 k8s.io/apiextensions-apiserver/pkg/apis/apiextensions k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1 @@ -598,7 +615,7 @@ k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/scheme k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1 k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1beta1 -# k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.26.7 +# k8s.io/apimachinery v0.27.2 => k8s.io/apimachinery v0.26.10 ## explicit; go 1.19 k8s.io/apimachinery/pkg/api/equality k8s.io/apimachinery/pkg/api/errors @@ -650,11 +667,7 @@ k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/json k8s.io/apimachinery/third_party/forked/golang/netutil k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/apiserver v0.26.7 => k8s.io/apiserver v0.26.7 -## explicit; go 1.19 -k8s.io/apiserver/pkg/features -k8s.io/apiserver/pkg/util/feature -# k8s.io/client-go v0.27.2 => k8s.io/client-go v0.26.7 +# k8s.io/client-go v0.27.2 => k8s.io/client-go v0.26.10 ## explicit; go 1.19 k8s.io/client-go/applyconfigurations/admissionregistration/v1 k8s.io/client-go/applyconfigurations/admissionregistration/v1alpha1 @@ -909,7 +922,7 @@ k8s.io/client-go/util/homedir k8s.io/client-go/util/keyutil k8s.io/client-go/util/retry k8s.io/client-go/util/workqueue -# k8s.io/code-generator v0.26.7 => k8s.io/code-generator v0.26.7 +# k8s.io/code-generator v0.26.10 => k8s.io/code-generator v0.26.10 ## explicit; go 1.19 k8s.io/code-generator k8s.io/code-generator/cmd/client-gen @@ -944,7 +957,7 @@ k8s.io/code-generator/cmd/set-gen k8s.io/code-generator/pkg/namer k8s.io/code-generator/pkg/util k8s.io/code-generator/third_party/forked/golang/reflect -# k8s.io/component-base v0.27.2 => k8s.io/component-base v0.26.7 +# k8s.io/component-base v0.27.2 => k8s.io/component-base v0.26.10 ## explicit; go 1.19 k8s.io/component-base/cli/flag k8s.io/component-base/config @@ -997,29 +1010,15 @@ k8s.io/kube-openapi/pkg/spec3 k8s.io/kube-openapi/pkg/util/proto k8s.io/kube-openapi/pkg/util/sets k8s.io/kube-openapi/pkg/validation/spec -# k8s.io/kube-scheduler v0.26.7 => k8s.io/kube-scheduler v0.26.7 -## explicit; go 1.19 -k8s.io/kube-scheduler/config/v1 -k8s.io/kube-scheduler/config/v1beta2 -k8s.io/kube-scheduler/config/v1beta3 -# k8s.io/kubectl v0.25.1 => k8s.io/kubectl v0.26.7 +# k8s.io/kubectl v0.25.1 => k8s.io/kubectl v0.26.10 ## explicit; go 1.19 k8s.io/kubectl/pkg/util/qos k8s.io/kubectl/pkg/util/resource -# k8s.io/kubelet v0.26.7 => k8s.io/kubelet v0.26.7 +# k8s.io/kubelet v0.26.10 => k8s.io/kubelet v0.26.10 ## explicit; go 1.19 k8s.io/kubelet/config/v1beta1 k8s.io/kubelet/pkg/apis/deviceplugin/v1beta1 k8s.io/kubelet/pkg/apis/podresources/v1 -# k8s.io/kubernetes v1.26.7 -## explicit; go 1.19 -k8s.io/kubernetes/pkg/features -k8s.io/kubernetes/pkg/scheduler/apis/config -k8s.io/kubernetes/pkg/scheduler/apis/config/scheme -k8s.io/kubernetes/pkg/scheduler/apis/config/v1 -k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2 -k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3 -k8s.io/kubernetes/pkg/scheduler/framework/plugins/names # k8s.io/utils v0.0.0-20230209194617-a36077c30491 ## explicit; go 1.18 k8s.io/utils/buffer @@ -1090,12 +1089,6 @@ sigs.k8s.io/controller-runtime/pkg/webhook/internal/metrics ## explicit; go 1.18 sigs.k8s.io/json sigs.k8s.io/json/internal/golang/encoding/json -# sigs.k8s.io/scheduler-plugins v0.24.9 -## explicit; go 1.17 -sigs.k8s.io/scheduler-plugins/apis/config -sigs.k8s.io/scheduler-plugins/apis/config/scheme -sigs.k8s.io/scheduler-plugins/apis/config/v1beta2 -sigs.k8s.io/scheduler-plugins/apis/config/v1beta3 # sigs.k8s.io/structured-merge-diff/v4 v4.2.3 ## explicit; go 1.13 sigs.k8s.io/structured-merge-diff/v4/fieldpath @@ -1107,36 +1100,31 @@ sigs.k8s.io/structured-merge-diff/v4/value sigs.k8s.io/yaml # github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 # golang.org/x/text => golang.org/x/text v0.3.8 -# k8s.io/api => k8s.io/api v0.26.7 -# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.7 -# k8s.io/apimachinery => k8s.io/apimachinery v0.26.7 -# k8s.io/apiserver => k8s.io/apiserver v0.26.7 -# k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.7 -# k8s.io/client-go => k8s.io/client-go v0.26.7 -# k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.7 -# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.7 -# k8s.io/code-generator => k8s.io/code-generator v0.26.7 -# k8s.io/component-base => k8s.io/component-base v0.26.7 -# k8s.io/component-helpers => k8s.io/component-helpers v0.26.7 -# k8s.io/controller-manager => k8s.io/controller-manager v0.26.7 -# k8s.io/cri-api => k8s.io/cri-api v0.26.7 -# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.7 -# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.7 -# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.7 -# k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.7 -# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.7 -# k8s.io/kubectl => k8s.io/kubectl v0.26.7 -# k8s.io/kubelet => k8s.io/kubelet v0.26.7 -# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.7 -# k8s.io/metrics => k8s.io/metrics v0.26.7 -# k8s.io/mount-utils => k8s.io/mount-utils v0.26.7 -# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.7 -# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.7 +# k8s.io/api => k8s.io/api v0.26.10 +# k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.26.10 +# k8s.io/apimachinery => k8s.io/apimachinery v0.26.10 +# k8s.io/apiserver => k8s.io/apiserver v0.26.10 +# k8s.io/cli-runtime => k8s.io/cli-runtime v0.26.10 +# k8s.io/client-go => k8s.io/client-go v0.26.10 +# k8s.io/cloud-provider => k8s.io/cloud-provider v0.26.10 +# k8s.io/cluster-bootstrap => k8s.io/cluster-bootstrap v0.26.10 +# k8s.io/code-generator => k8s.io/code-generator v0.26.10 +# k8s.io/component-base => k8s.io/component-base v0.26.10 +# k8s.io/component-helpers => k8s.io/component-helpers v0.26.10 +# k8s.io/controller-manager => k8s.io/controller-manager v0.26.10 +# k8s.io/cri-api => k8s.io/cri-api v0.26.10 +# k8s.io/csi-translation-lib => k8s.io/csi-translation-lib v0.26.10 +# k8s.io/kube-aggregator => k8s.io/kube-aggregator v0.26.10 +# k8s.io/kube-controller-manager => k8s.io/kube-controller-manager v0.26.10 +# k8s.io/kube-proxy => k8s.io/kube-proxy v0.26.10 +# k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.26.10 +# k8s.io/kubectl => k8s.io/kubectl v0.26.10 +# k8s.io/kubelet => k8s.io/kubelet v0.26.10 +# k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.26.10 +# k8s.io/metrics => k8s.io/metrics v0.26.10 +# k8s.io/mount-utils => k8s.io/mount-utils v0.26.10 +# k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.26.10 +# k8s.io/sample-apiserver => k8s.io/sample-apiserver v0.26.10 # sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.14.6 # github.com/containerd/containerd => github.com/containerd/containerd v1.4.11 -# github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.4.0 -# github.com/onsi/gomega => github.com/onsi/gomega v1.23.0 -# github.com/openshift/machine-config-operator => github.com/openshift/machine-config-operator v0.0.1-0.20230724174830-7b54f1dcce4e -# golang.org/x/net => golang.org/x/net v0.17.0 -# golang.org/x/sys => golang.org/x/sys v0.13.0 # google.golang.org/grpc => google.golang.org/grpc v1.58.3 diff --git a/vendor/sigs.k8s.io/scheduler-plugins/LICENSE b/vendor/sigs.k8s.io/scheduler-plugins/LICENSE deleted file mode 100644 index 8dada3eda..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/doc.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/doc.go deleted file mode 100644 index babf36114..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=kubescheduler.config.k8s.io - -// Package config of the API. -package config // import "sigs.k8s.io/scheduler-plugins/apis/config" diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/register.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/register.go deleted file mode 100644 index 513f83470..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/register.go +++ /dev/null @@ -1,54 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: schedconfig.GroupName, Version: runtime.APIVersionInternal} - -var ( - localSchemeBuilder = &schedconfig.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CoschedulingArgs{}, - &NodeResourcesAllocatableArgs{}, - &TargetLoadPackingArgs{}, - &LoadVariationRiskBalancingArgs{}, - &NodeResourceTopologyMatchArgs{}, - &PreemptionTolerationArgs{}, - &TopologicalSortArgs{}, - &NetworkOverheadArgs{}, - ) - return nil -} - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/scheme/scheme.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/scheme/scheme.go deleted file mode 100644 index 0f108d606..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/scheme/scheme.go +++ /dev/null @@ -1,47 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package scheme - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - kubeschedulerscheme "k8s.io/kubernetes/pkg/scheduler/apis/config/scheme" - - "sigs.k8s.io/scheduler-plugins/apis/config" - "sigs.k8s.io/scheduler-plugins/apis/config/v1beta2" - "sigs.k8s.io/scheduler-plugins/apis/config/v1beta3" -) - -var ( - // Re-use the in-tree Scheme. - Scheme = kubeschedulerscheme.Scheme - - // Codecs provides access to encoding and decoding for the scheme. - Codecs = serializer.NewCodecFactory(Scheme, serializer.EnableStrict) -) - -func init() { - AddToScheme(Scheme) -} - -// AddToScheme builds the kubescheduler scheme using all known versions of the kubescheduler api. -func AddToScheme(scheme *runtime.Scheme) { - utilruntime.Must(config.AddToScheme(scheme)) - utilruntime.Must(v1beta2.AddToScheme(scheme)) - utilruntime.Must(v1beta3.AddToScheme(scheme)) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/types.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/types.go deleted file mode 100644 index 348f543d9..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/types.go +++ /dev/null @@ -1,182 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package config - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CoschedulingArgs defines the parameters for Coscheduling plugin. -type CoschedulingArgs struct { - metav1.TypeMeta - - // PermitWaitingTimeSeconds is the waiting timeout in seconds. - PermitWaitingTimeSeconds int64 -} - -// ModeType is a "string" type. -type ModeType string - -const ( - // Least is the string "Least". - Least ModeType = "Least" - // Most is the string "Most". - Most ModeType = "Most" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesAllocatableArgs holds arguments used to configure NodeResourcesAllocatable plugin. -type NodeResourcesAllocatableArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be considered when scoring. - // Allowed weights start from 1. - // An example resource set might include "cpu" (millicores) and "memory" (bytes) - // with weights of 1<<20 and 1 respectfully. That would mean 1 MiB has equivalent - // weight as 1 millicore. - Resources []schedconfig.ResourceSpec `json:"resources,omitempty"` - - // Whether to prioritize nodes with least or most allocatable resources. - Mode ModeType `json:"mode,omitempty"` -} - -// MetricProviderType is a "string" type. -type MetricProviderType string - -const ( - KubernetesMetricsServer MetricProviderType = "KubernetesMetricsServer" - Prometheus MetricProviderType = "Prometheus" - SignalFx MetricProviderType = "SignalFx" -) - -// Denote the spec of the metric provider -type MetricProviderSpec struct { - // Types of the metric provider - Type MetricProviderType - // The address of the metric provider - Address string - // The authentication token of the metric provider - Token string - // Whether to enable the InsureSkipVerify options for https requests on Metric Providers. - InsecureSkipVerify bool -} - -// TrimaranSpec holds common parameters for trimaran plugins -type TrimaranSpec struct { - // Metric Provider to use when using load watcher as a library - MetricProvider MetricProviderSpec - // Address of load watcher service - WatcherAddress string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TargetLoadPackingArgs holds arguments used to configure TargetLoadPacking plugin. -type TargetLoadPackingArgs struct { - metav1.TypeMeta - - // Common parameters for trimaran plugins - TrimaranSpec - // Default requests to use for best effort QoS - DefaultRequests v1.ResourceList - // Default requests multiplier for busrtable QoS - DefaultRequestsMultiplier string - // Node target CPU Utilization for bin packing - TargetUtilization int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LoadVariationRiskBalancingArgs holds arguments used to configure LoadVariationRiskBalancing plugin. -type LoadVariationRiskBalancingArgs struct { - metav1.TypeMeta - - // Common parameters for trimaran plugins - TrimaranSpec - // Multiplier of standard deviation in risk value - SafeVarianceMargin float64 - // Root power of standard deviation in risk value - SafeVarianceSensitivity float64 -} - -// ScoringStrategyType is a "string" type. -type ScoringStrategyType string - -const ( - // MostAllocated strategy favors node with the least amount of available resource - MostAllocated ScoringStrategyType = "MostAllocated" - // BalancedAllocation strategy favors nodes with balanced resource usage rate - BalancedAllocation ScoringStrategyType = "BalancedAllocation" - // LeastAllocated strategy favors node with the most amount of available resource - LeastAllocated ScoringStrategyType = "LeastAllocated" -) - -// ScoringStrategy define ScoringStrategyType for node resource topology plugin -type ScoringStrategy struct { - // Type selects which strategy to run. - Type ScoringStrategyType - - // Resources a list of pairs to be considered while scoring - // allowed weights start from 1. - Resources []schedconfig.ResourceSpec -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourceTopologyMatchArgs holds arguments used to configure the NodeResourceTopologyMatch plugin -type NodeResourceTopologyMatchArgs struct { - metav1.TypeMeta - - // ScoringStrategy a scoring model that determine how the plugin will score the nodes. - ScoringStrategy ScoringStrategy - // If > 0, enables the caching facilities of the reserve plugin - which must be enabled - CacheResyncPeriodSeconds int64 -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PreemptionTolerationArgs reuses DefaultPluginArgs. -type PreemptionTolerationArgs schedconfig.DefaultPreemptionArgs - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type TopologicalSortArgs struct { - metav1.TypeMeta - - // Namespaces to be considered by TopologySort plugin - Namespaces []string -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type NetworkOverheadArgs struct { - metav1.TypeMeta - - // Namespaces to be considered by NetworkMinCost plugin - Namespaces []string - - // Preferred weights (Default: UserDefined) - WeightsName string - - // The NetworkTopology CRD name - NetworkTopologyName string -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/conversion.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/conversion.go deleted file mode 100644 index 11fbf5234..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/conversion.go +++ /dev/null @@ -1,94 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "unsafe" - - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/conversion" - - "sigs.k8s.io/scheduler-plugins/apis/config" -) - -// This file stores all necessary manual conversion bits, to leave zz_generated*.go intact after code generation. - -func Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { - return autoConvert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in, out, s) -} - -func Convert_v1beta2_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(in *LoadVariationRiskBalancingArgs, out *config.LoadVariationRiskBalancingArgs, s conversion.Scope) error { - if err := autoConvert_v1beta2_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - if err := Convert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec(&in.MetricProvider, &out.TrimaranSpec.MetricProvider, s); err != nil { - return err - } - return v1.Convert_Pointer_string_To_string(&in.WatcherAddress, &out.TrimaranSpec.WatcherAddress, s) -} - -func Convert_config_LoadVariationRiskBalancingArgs_To_v1beta2_LoadVariationRiskBalancingArgs(in *config.LoadVariationRiskBalancingArgs, out *LoadVariationRiskBalancingArgs, s conversion.Scope) error { - if err := autoConvert_config_LoadVariationRiskBalancingArgs_To_v1beta2_LoadVariationRiskBalancingArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - if err := Convert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec(&in.TrimaranSpec.MetricProvider, &out.MetricProvider, s); err != nil { - return err - } - return v1.Convert_string_To_Pointer_string(&in.TrimaranSpec.WatcherAddress, &out.WatcherAddress, s) -} - -func Convert_v1beta2_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(in *NodeResourceTopologyMatchArgs, out *config.NodeResourceTopologyMatchArgs, s conversion.Scope) error { - if err := autoConvert_v1beta2_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - out.ScoringStrategy = *(*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -func Convert_config_NodeResourceTopologyMatchArgs_To_v1beta2_NodeResourceTopologyMatchArgs(in *config.NodeResourceTopologyMatchArgs, out *NodeResourceTopologyMatchArgs, s conversion.Scope) error { - if err := autoConvert_config_NodeResourceTopologyMatchArgs_To_v1beta2_NodeResourceTopologyMatchArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - out.ScoringStrategy = (*ScoringStrategy)(unsafe.Pointer(&in.ScoringStrategy)) - return nil -} - -func Convert_v1beta2_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(in *TargetLoadPackingArgs, out *config.TargetLoadPackingArgs, s conversion.Scope) error { - if err := autoConvert_v1beta2_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - if err := Convert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec(&in.MetricProvider, &out.TrimaranSpec.MetricProvider, s); err != nil { - return err - } - return v1.Convert_Pointer_string_To_string(&in.WatcherAddress, &out.TrimaranSpec.WatcherAddress, s) -} - -func Convert_config_TargetLoadPackingArgs_To_v1beta2_TargetLoadPackingArgs(in *config.TargetLoadPackingArgs, out *TargetLoadPackingArgs, s conversion.Scope) error { - if err := autoConvert_config_TargetLoadPackingArgs_To_v1beta2_TargetLoadPackingArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - if err := Convert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec(&in.TrimaranSpec.MetricProvider, &out.MetricProvider, s); err != nil { - return err - } - return v1.Convert_string_To_Pointer_string(&in.TrimaranSpec.WatcherAddress, &out.WatcherAddress, s) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/defaults.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/defaults.go deleted file mode 100644 index 2f917fdb3..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/defaults.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "strconv" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - schedulerconfigv1beta2 "k8s.io/kube-scheduler/config/v1beta2" - k8sschedulerconfigv1beta2 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta2" -) - -var ( - defaultPermitWaitingTimeSeconds int64 = 60 - defaultDeniedPGExpirationTimeSeconds int64 = 20 - - defaultNodeResourcesAllocatableMode = Least - - // defaultResourcesToWeightMap is used to set the default resourceToWeight map for CPU and memory - // used by the NodeResourcesAllocatable scoring plugin. - // The base unit for CPU is millicore, while the base using for memory is a byte. - // The default CPU weight is 1<<20 and default memory weight is 1. That means a millicore - // has a weighted score equivalent to 1 MiB. - defaultNodeResourcesAllocatableResourcesToWeightMap = []schedulerconfigv1beta2.ResourceSpec{ - {Name: "cpu", Weight: 1 << 20}, {Name: "memory", Weight: 1}, - } - - // Defaults for TargetLoadPacking plugin - - // Default 1 core CPU usage for containers without requests and limits i.e. Best Effort QoS. - DefaultRequestsMilliCores int64 = 1000 - // DefaultRequestsMultiplier for containers without limits predicted as 1.5*requests i.e. Burstable QoS class - DefaultRequestsMultiplier = "1.5" - // DefaultTargetUtilizationPercent Recommended to keep -10 than desired limit. - DefaultTargetUtilizationPercent int64 = 40 - - // Defaults for LoadVariationRiskBalancing plugin - - // Risk is usually calculated as average (aka. mu) plus standard deviation (aka. sigma). - // In order to allow customization in the calculation of risk, two parameters are provided: - // Margin and Sensitivity. Margin is a multiplier of sigma, and Sensitivity is a root power of sigma. - // For example, Margin=3 and Sensitivity=2 leads to a risk evaluated as: mu + 3 sqrt(sigma). - // The default value for both parameters is 1, leading to: mu + sigma. - // DefaultSafeVarianceMargin is one - DefaultSafeVarianceMargin = 1.0 - // DefaultSafeVarianceSensitivity is one - DefaultSafeVarianceSensitivity = 1.0 - - // Defaults for MetricProviderSpec - // DefaultMetricProviderType is the Kubernetes metrics server - DefaultMetricProviderType = KubernetesMetricsServer - // DefaultInsecureSkipVerify is whether to skip the certificate verification - DefaultInsecureSkipVerify = true - - defaultResourceSpec = []schedulerconfigv1beta2.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, - } -) - -// SetDefaults_CoschedulingArgs sets the default parameters for Coscheduling plugin. -func SetDefaults_CoschedulingArgs(obj *CoschedulingArgs) { - if obj.PermitWaitingTimeSeconds == nil { - obj.PermitWaitingTimeSeconds = &defaultPermitWaitingTimeSeconds - } - if obj.DeniedPGExpirationTimeSeconds == nil { - obj.DeniedPGExpirationTimeSeconds = &defaultDeniedPGExpirationTimeSeconds - } -} - -// SetDefaults_NodeResourcesAllocatableArgs sets the defaults parameters for NodeResourceAllocatable. -func SetDefaults_NodeResourcesAllocatableArgs(obj *NodeResourcesAllocatableArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultNodeResourcesAllocatableResourcesToWeightMap - } - - if obj.Mode == "" { - obj.Mode = defaultNodeResourcesAllocatableMode - } -} - -// SetDefaults_TargetLoadPackingArgs sets the default parameters for TargetLoadPacking plugin -func SetDefaults_TargetLoadPackingArgs(args *TargetLoadPackingArgs) { - if args.DefaultRequests == nil { - args.DefaultRequests = v1.ResourceList{v1.ResourceCPU: resource.MustParse( - strconv.FormatInt(DefaultRequestsMilliCores, 10) + "m")} - } - if args.DefaultRequestsMultiplier == nil { - args.DefaultRequestsMultiplier = &DefaultRequestsMultiplier - } - if args.TargetUtilization == nil || *args.TargetUtilization <= 0 { - args.TargetUtilization = &DefaultTargetUtilizationPercent - } - if args.WatcherAddress == nil && args.MetricProvider.Type == "" { - args.MetricProvider.Type = MetricProviderType(DefaultMetricProviderType) - } - if args.MetricProvider.Type == Prometheus && args.MetricProvider.InsecureSkipVerify == nil { - args.MetricProvider.InsecureSkipVerify = &DefaultInsecureSkipVerify - } -} - -// SetDefaults_LoadVariationRiskBalancingArgs sets the default parameters for LoadVariationRiskBalancing plugin -func SetDefaults_LoadVariationRiskBalancingArgs(args *LoadVariationRiskBalancingArgs) { - if args.WatcherAddress == nil && args.MetricProvider.Type == "" { - args.MetricProvider.Type = MetricProviderType(DefaultMetricProviderType) - } - if args.SafeVarianceMargin == nil || *args.SafeVarianceMargin < 0 { - args.SafeVarianceMargin = &DefaultSafeVarianceMargin - } - if args.SafeVarianceSensitivity == nil || *args.SafeVarianceSensitivity < 0 { - args.SafeVarianceSensitivity = &DefaultSafeVarianceSensitivity - } - if args.MetricProvider.Type == Prometheus && args.MetricProvider.InsecureSkipVerify == nil { - args.MetricProvider.InsecureSkipVerify = &DefaultInsecureSkipVerify - } -} - -// SetDefaults_NodeResourceTopologyMatchArgs sets the default parameters for NodeResourceTopologyMatch plugin. -func SetDefaults_NodeResourceTopologyMatchArgs(obj *NodeResourceTopologyMatchArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &ScoringStrategy{ - Type: LeastAllocated, - Resources: defaultResourceSpec, - } - } - - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} - -// SetDefaults_PreemptionTolerationArgs reuses SetDefaults_DefaultPreemptionArgs -func SetDefaults_PreemptionTolerationArgs(obj *PreemptionTolerationArgs) { - k8sschedulerconfigv1beta2.SetDefaults_DefaultPreemptionArgs((*schedulerconfigv1beta2.DefaultPreemptionArgs)(obj)) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/doc.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/doc.go deleted file mode 100644 index 3bd1bbe8e..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/doc.go +++ /dev/null @@ -1,24 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=sigs.k8s.io/scheduler-plugins/apis/config -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=sigs.k8s.io/scheduler-plugins/apis/config/v1beta2 -// +groupName=kubescheduler.config.k8s.io - -// Package v1beta2 is the v1beta2 version of the API. -package v1beta2 // import "sigs.k8s.io/scheduler-plugins/apis/config/v1beta2" diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/register.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/register.go deleted file mode 100644 index 4fd0e4c56..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - schedschemev1beta2 "k8s.io/kube-scheduler/config/v1beta2" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: schedconfig.GroupName, Version: "v1beta2"} - -var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - localSchemeBuilder = &schedschemev1beta2.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CoschedulingArgs{}, - &NodeResourcesAllocatableArgs{}, - &TargetLoadPackingArgs{}, - &LoadVariationRiskBalancingArgs{}, - &NodeResourceTopologyMatchArgs{}, - &PreemptionTolerationArgs{}, - ) - return nil -} - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) - localSchemeBuilder.Register(RegisterDefaults) - localSchemeBuilder.Register(RegisterConversions) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/types.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/types.go deleted file mode 100644 index c1f19ae1d..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/types.go +++ /dev/null @@ -1,151 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerconfigv1beta2 "k8s.io/kube-scheduler/config/v1beta2" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CoschedulingArgs defines the scheduling parameters for Coscheduling plugin. -type CoschedulingArgs struct { - metav1.TypeMeta `json:",inline"` - - // PermitWaitingTimeSeconds is the waiting timeout in seconds. - PermitWaitingTimeSeconds *int64 `json:"permitWaitingTimeSeconds,omitempty"` - // DeniedPGExpirationTimeSeconds is the expiration time of the denied podgroup store. - DeniedPGExpirationTimeSeconds *int64 `json:"deniedPGExpirationTimeSeconds,omitempty"` -} - -// ModeType is a type "string". -type ModeType string - -const ( - // Least is the string "Least". - Least ModeType = "Least" - // Most is the string "Most". - Most ModeType = "Most" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesAllocatableArgs holds arguments used to configure NodeResourcesAllocatable plugin. -type NodeResourcesAllocatableArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be considered when scoring. - // Allowed weights start from 1. - // An example resource set might include "cpu" (millicores) and "memory" (bytes) - // with weights of 1<<20 and 1 respectfully. That would mean 1 MiB has equivalent - // weight as 1 millicore. - Resources []schedulerconfigv1beta2.ResourceSpec `json:"resources,omitempty"` - - // Whether to prioritize nodes with least or most allocatable resources. - Mode ModeType `json:"mode,omitempty"` -} - -// MetricProviderType is a "string" type. -type MetricProviderType string - -const ( - KubernetesMetricsServer MetricProviderType = "KubernetesMetricsServer" - Prometheus MetricProviderType = "Prometheus" - SignalFx MetricProviderType = "SignalFx" -) - -// Denote the spec of the metric provider -type MetricProviderSpec struct { - // Types of the metric provider - Type MetricProviderType `json:"type,omitempty"` - // The address of the metric provider - Address *string `json:"address,omitempty"` - // The authentication token of the metric provider - Token *string `json:"token,omitempty"` - // Whether to enable the InsureSkipVerify options for https requests on Prometheus Metric Provider. - InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TargetLoadPackingArgs holds arguments used to configure TargetLoadPacking plugin. -type TargetLoadPackingArgs struct { - metav1.TypeMeta `json:",inline"` - - // Default requests to use for best effort QoS - DefaultRequests v1.ResourceList `json:"defaultRequests,omitempty"` - // Default requests multiplier for busrtable QoS - DefaultRequestsMultiplier *string `json:"defaultRequestsMultiplier,omitempty"` - // Node target CPU Utilization for bin packing - TargetUtilization *int64 `json:"targetUtilization,omitempty"` - // Specify the metric provider type, address and token using MetricProviderSpec - MetricProvider MetricProviderSpec `json:"metricProvider,omitempty"` - // Address of load watcher service - WatcherAddress *string `json:"watcherAddress,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LoadVariationRiskBalancingArgs holds arguments used to configure LoadVariationRiskBalancing plugin. -type LoadVariationRiskBalancingArgs struct { - metav1.TypeMeta `json:",inline"` - - // Metric Provider specification when using load watcher as library - MetricProvider MetricProviderSpec `json:"metricProvider,omitempty"` - // Address of load watcher service - WatcherAddress *string `json:"watcherAddress,omitempty"` - // Multiplier of standard deviation in risk value - SafeVarianceMargin *float64 `json:"safeVarianceMargin,omitempty"` - // Root power of standard deviation in risk value - SafeVarianceSensitivity *float64 `json:"safeVarianceSensitivity,omitempty"` -} - -// ScoringStrategyType is a "string" type. -type ScoringStrategyType string - -const ( - // MostAllocated strategy favors node with the least amount of available resource - MostAllocated ScoringStrategyType = "MostAllocated" - // BalancedAllocation strategy favors nodes with balanced resource usage rate - BalancedAllocation ScoringStrategyType = "BalancedAllocation" - // LeastAllocated strategy favors node with the most amount of available resource - LeastAllocated ScoringStrategyType = "LeastAllocated" -) - -type ScoringStrategy struct { - Type ScoringStrategyType `json:"type,omitempty"` - Resources []schedulerconfigv1beta2.ResourceSpec `json:"resources,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourceTopologyMatchArgs holds arguments used to configure the NodeResourceTopologyMatch plugin -type NodeResourceTopologyMatchArgs struct { - metav1.TypeMeta `json:",inline"` - - // ScoringStrategy a scoring model that determine how the plugin will score the nodes. - ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"` - // If > 0, enables the caching facilities of the reserve plugin - which must be enabled - CacheResyncPeriodSeconds *int64 `json:"cacheResyncPeriodSeconds,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PreemptionTolerationArgs reuses DefaultPluginArgs. -type PreemptionTolerationArgs schedulerconfigv1beta2.DefaultPreemptionArgs diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.conversion.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.conversion.go deleted file mode 100644 index 5775d2665..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.conversion.go +++ /dev/null @@ -1,320 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta2 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - configv1beta2 "k8s.io/kube-scheduler/config/v1beta2" - apisconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - config "sigs.k8s.io/scheduler-plugins/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*config.CoschedulingArgs)(nil), (*CoschedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(a.(*config.CoschedulingArgs), b.(*CoschedulingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*MetricProviderSpec)(nil), (*config.MetricProviderSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec(a.(*MetricProviderSpec), b.(*config.MetricProviderSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.MetricProviderSpec)(nil), (*MetricProviderSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec(a.(*config.MetricProviderSpec), b.(*MetricProviderSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeResourcesAllocatableArgs)(nil), (*config.NodeResourcesAllocatableArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(a.(*NodeResourcesAllocatableArgs), b.(*config.NodeResourcesAllocatableArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesAllocatableArgs)(nil), (*NodeResourcesAllocatableArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesAllocatableArgs_To_v1beta2_NodeResourcesAllocatableArgs(a.(*config.NodeResourcesAllocatableArgs), b.(*NodeResourcesAllocatableArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PreemptionTolerationArgs)(nil), (*config.PreemptionTolerationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(a.(*PreemptionTolerationArgs), b.(*config.PreemptionTolerationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PreemptionTolerationArgs)(nil), (*PreemptionTolerationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PreemptionTolerationArgs_To_v1beta2_PreemptionTolerationArgs(a.(*config.PreemptionTolerationArgs), b.(*PreemptionTolerationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(a.(*ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(a.(*config.ScoringStrategy), b.(*ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.LoadVariationRiskBalancingArgs)(nil), (*LoadVariationRiskBalancingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_LoadVariationRiskBalancingArgs_To_v1beta2_LoadVariationRiskBalancingArgs(a.(*config.LoadVariationRiskBalancingArgs), b.(*LoadVariationRiskBalancingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.NodeResourceTopologyMatchArgs)(nil), (*NodeResourceTopologyMatchArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourceTopologyMatchArgs_To_v1beta2_NodeResourceTopologyMatchArgs(a.(*config.NodeResourceTopologyMatchArgs), b.(*NodeResourceTopologyMatchArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.TargetLoadPackingArgs)(nil), (*TargetLoadPackingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_TargetLoadPackingArgs_To_v1beta2_TargetLoadPackingArgs(a.(*config.TargetLoadPackingArgs), b.(*TargetLoadPackingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*CoschedulingArgs)(nil), (*config.CoschedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(a.(*CoschedulingArgs), b.(*config.CoschedulingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*LoadVariationRiskBalancingArgs)(nil), (*config.LoadVariationRiskBalancingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(a.(*LoadVariationRiskBalancingArgs), b.(*config.LoadVariationRiskBalancingArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*NodeResourceTopologyMatchArgs)(nil), (*config.NodeResourceTopologyMatchArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(a.(*NodeResourceTopologyMatchArgs), b.(*config.NodeResourceTopologyMatchArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*TargetLoadPackingArgs)(nil), (*config.TargetLoadPackingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta2_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(a.(*TargetLoadPackingArgs), b.(*config.TargetLoadPackingArgs), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int64_To_int64(&in.PermitWaitingTimeSeconds, &out.PermitWaitingTimeSeconds, s); err != nil { - return err - } - // WARNING: in.DeniedPGExpirationTimeSeconds requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { - if err := v1.Convert_int64_To_Pointer_int64(&in.PermitWaitingTimeSeconds, &out.PermitWaitingTimeSeconds, s); err != nil { - return err - } - return nil -} - -// Convert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs is an autogenerated conversion function. -func Convert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { - return autoConvert_config_CoschedulingArgs_To_v1beta2_CoschedulingArgs(in, out, s) -} - -func autoConvert_v1beta2_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(in *LoadVariationRiskBalancingArgs, out *config.LoadVariationRiskBalancingArgs, s conversion.Scope) error { - // WARNING: in.MetricProvider requires manual conversion: does not exist in peer-type - // WARNING: in.WatcherAddress requires manual conversion: does not exist in peer-type - if err := v1.Convert_Pointer_float64_To_float64(&in.SafeVarianceMargin, &out.SafeVarianceMargin, s); err != nil { - return err - } - if err := v1.Convert_Pointer_float64_To_float64(&in.SafeVarianceSensitivity, &out.SafeVarianceSensitivity, s); err != nil { - return err - } - return nil -} - -func autoConvert_config_LoadVariationRiskBalancingArgs_To_v1beta2_LoadVariationRiskBalancingArgs(in *config.LoadVariationRiskBalancingArgs, out *LoadVariationRiskBalancingArgs, s conversion.Scope) error { - // WARNING: in.TrimaranSpec requires manual conversion: does not exist in peer-type - if err := v1.Convert_float64_To_Pointer_float64(&in.SafeVarianceMargin, &out.SafeVarianceMargin, s); err != nil { - return err - } - if err := v1.Convert_float64_To_Pointer_float64(&in.SafeVarianceSensitivity, &out.SafeVarianceSensitivity, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec(in *MetricProviderSpec, out *config.MetricProviderSpec, s conversion.Scope) error { - out.Type = config.MetricProviderType(in.Type) - if err := v1.Convert_Pointer_string_To_string(&in.Address, &out.Address, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.Token, &out.Token, s); err != nil { - return err - } - if err := v1.Convert_Pointer_bool_To_bool(&in.InsecureSkipVerify, &out.InsecureSkipVerify, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec is an autogenerated conversion function. -func Convert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec(in *MetricProviderSpec, out *config.MetricProviderSpec, s conversion.Scope) error { - return autoConvert_v1beta2_MetricProviderSpec_To_config_MetricProviderSpec(in, out, s) -} - -func autoConvert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec(in *config.MetricProviderSpec, out *MetricProviderSpec, s conversion.Scope) error { - out.Type = MetricProviderType(in.Type) - if err := v1.Convert_string_To_Pointer_string(&in.Address, &out.Address, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.Token, &out.Token, s); err != nil { - return err - } - if err := v1.Convert_bool_To_Pointer_bool(&in.InsecureSkipVerify, &out.InsecureSkipVerify, s); err != nil { - return err - } - return nil -} - -// Convert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec is an autogenerated conversion function. -func Convert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec(in *config.MetricProviderSpec, out *MetricProviderSpec, s conversion.Scope) error { - return autoConvert_config_MetricProviderSpec_To_v1beta2_MetricProviderSpec(in, out, s) -} - -func autoConvert_v1beta2_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(in *NodeResourceTopologyMatchArgs, out *config.NodeResourceTopologyMatchArgs, s conversion.Scope) error { - // WARNING: in.ScoringStrategy requires manual conversion: inconvertible types (*sigs.k8s.io/scheduler-plugins/apis/config/v1beta2.ScoringStrategy vs sigs.k8s.io/scheduler-plugins/apis/config.ScoringStrategy) - if err := v1.Convert_Pointer_int64_To_int64(&in.CacheResyncPeriodSeconds, &out.CacheResyncPeriodSeconds, s); err != nil { - return err - } - return nil -} - -func autoConvert_config_NodeResourceTopologyMatchArgs_To_v1beta2_NodeResourceTopologyMatchArgs(in *config.NodeResourceTopologyMatchArgs, out *NodeResourceTopologyMatchArgs, s conversion.Scope) error { - // WARNING: in.ScoringStrategy requires manual conversion: inconvertible types (sigs.k8s.io/scheduler-plugins/apis/config.ScoringStrategy vs *sigs.k8s.io/scheduler-plugins/apis/config/v1beta2.ScoringStrategy) - if err := v1.Convert_int64_To_Pointer_int64(&in.CacheResyncPeriodSeconds, &out.CacheResyncPeriodSeconds, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta2_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(in *NodeResourcesAllocatableArgs, out *config.NodeResourcesAllocatableArgs, s conversion.Scope) error { - out.Resources = *(*[]apisconfig.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.Mode = config.ModeType(in.Mode) - return nil -} - -// Convert_v1beta2_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs is an autogenerated conversion function. -func Convert_v1beta2_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(in *NodeResourcesAllocatableArgs, out *config.NodeResourcesAllocatableArgs, s conversion.Scope) error { - return autoConvert_v1beta2_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesAllocatableArgs_To_v1beta2_NodeResourcesAllocatableArgs(in *config.NodeResourcesAllocatableArgs, out *NodeResourcesAllocatableArgs, s conversion.Scope) error { - out.Resources = *(*[]configv1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.Mode = ModeType(in.Mode) - return nil -} - -// Convert_config_NodeResourcesAllocatableArgs_To_v1beta2_NodeResourcesAllocatableArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesAllocatableArgs_To_v1beta2_NodeResourcesAllocatableArgs(in *config.NodeResourcesAllocatableArgs, out *NodeResourcesAllocatableArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesAllocatableArgs_To_v1beta2_NodeResourcesAllocatableArgs(in, out, s) -} - -func autoConvert_v1beta2_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(in *PreemptionTolerationArgs, out *config.PreemptionTolerationArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta2_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs is an autogenerated conversion function. -func Convert_v1beta2_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(in *PreemptionTolerationArgs, out *config.PreemptionTolerationArgs, s conversion.Scope) error { - return autoConvert_v1beta2_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(in, out, s) -} - -func autoConvert_config_PreemptionTolerationArgs_To_v1beta2_PreemptionTolerationArgs(in *config.PreemptionTolerationArgs, out *PreemptionTolerationArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_PreemptionTolerationArgs_To_v1beta2_PreemptionTolerationArgs is an autogenerated conversion function. -func Convert_config_PreemptionTolerationArgs_To_v1beta2_PreemptionTolerationArgs(in *config.PreemptionTolerationArgs, out *PreemptionTolerationArgs, s conversion.Scope) error { - return autoConvert_config_PreemptionTolerationArgs_To_v1beta2_PreemptionTolerationArgs(in, out, s) -} - -func autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]apisconfig.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1beta2_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { - out.Type = ScoringStrategyType(in.Type) - out.Resources = *(*[]configv1beta2.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1beta2_ScoringStrategy(in, out, s) -} - -func autoConvert_v1beta2_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(in *TargetLoadPackingArgs, out *config.TargetLoadPackingArgs, s conversion.Scope) error { - out.DefaultRequests = *(*corev1.ResourceList)(unsafe.Pointer(&in.DefaultRequests)) - if err := v1.Convert_Pointer_string_To_string(&in.DefaultRequestsMultiplier, &out.DefaultRequestsMultiplier, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int64_To_int64(&in.TargetUtilization, &out.TargetUtilization, s); err != nil { - return err - } - // WARNING: in.MetricProvider requires manual conversion: does not exist in peer-type - // WARNING: in.WatcherAddress requires manual conversion: does not exist in peer-type - return nil -} - -func autoConvert_config_TargetLoadPackingArgs_To_v1beta2_TargetLoadPackingArgs(in *config.TargetLoadPackingArgs, out *TargetLoadPackingArgs, s conversion.Scope) error { - // WARNING: in.TrimaranSpec requires manual conversion: does not exist in peer-type - out.DefaultRequests = *(*corev1.ResourceList)(unsafe.Pointer(&in.DefaultRequests)) - if err := v1.Convert_string_To_Pointer_string(&in.DefaultRequestsMultiplier, &out.DefaultRequestsMultiplier, s); err != nil { - return err - } - if err := v1.Convert_int64_To_Pointer_int64(&in.TargetUtilization, &out.TargetUtilization, s); err != nil { - return err - } - return nil -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index e8325dd56..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,304 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - configv1beta2 "k8s.io/kube-scheduler/config/v1beta2" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CoschedulingArgs) DeepCopyInto(out *CoschedulingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.PermitWaitingTimeSeconds != nil { - in, out := &in.PermitWaitingTimeSeconds, &out.PermitWaitingTimeSeconds - *out = new(int64) - **out = **in - } - if in.DeniedPGExpirationTimeSeconds != nil { - in, out := &in.DeniedPGExpirationTimeSeconds, &out.DeniedPGExpirationTimeSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoschedulingArgs. -func (in *CoschedulingArgs) DeepCopy() *CoschedulingArgs { - if in == nil { - return nil - } - out := new(CoschedulingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CoschedulingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadVariationRiskBalancingArgs) DeepCopyInto(out *LoadVariationRiskBalancingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - in.MetricProvider.DeepCopyInto(&out.MetricProvider) - if in.WatcherAddress != nil { - in, out := &in.WatcherAddress, &out.WatcherAddress - *out = new(string) - **out = **in - } - if in.SafeVarianceMargin != nil { - in, out := &in.SafeVarianceMargin, &out.SafeVarianceMargin - *out = new(float64) - **out = **in - } - if in.SafeVarianceSensitivity != nil { - in, out := &in.SafeVarianceSensitivity, &out.SafeVarianceSensitivity - *out = new(float64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadVariationRiskBalancingArgs. -func (in *LoadVariationRiskBalancingArgs) DeepCopy() *LoadVariationRiskBalancingArgs { - if in == nil { - return nil - } - out := new(LoadVariationRiskBalancingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LoadVariationRiskBalancingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricProviderSpec) DeepCopyInto(out *MetricProviderSpec) { - *out = *in - if in.Address != nil { - in, out := &in.Address, &out.Address - *out = new(string) - **out = **in - } - if in.Token != nil { - in, out := &in.Token, &out.Token - *out = new(string) - **out = **in - } - if in.InsecureSkipVerify != nil { - in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricProviderSpec. -func (in *MetricProviderSpec) DeepCopy() *MetricProviderSpec { - if in == nil { - return nil - } - out := new(MetricProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourceTopologyMatchArgs) DeepCopyInto(out *NodeResourceTopologyMatchArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - if in.CacheResyncPeriodSeconds != nil { - in, out := &in.CacheResyncPeriodSeconds, &out.CacheResyncPeriodSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceTopologyMatchArgs. -func (in *NodeResourceTopologyMatchArgs) DeepCopy() *NodeResourceTopologyMatchArgs { - if in == nil { - return nil - } - out := new(NodeResourceTopologyMatchArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourceTopologyMatchArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesAllocatableArgs) DeepCopyInto(out *NodeResourcesAllocatableArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]configv1beta2.ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesAllocatableArgs. -func (in *NodeResourcesAllocatableArgs) DeepCopy() *NodeResourcesAllocatableArgs { - if in == nil { - return nil - } - out := new(NodeResourcesAllocatableArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesAllocatableArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreemptionTolerationArgs) DeepCopyInto(out *PreemptionTolerationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.MinCandidateNodesPercentage != nil { - in, out := &in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage - *out = new(int32) - **out = **in - } - if in.MinCandidateNodesAbsolute != nil { - in, out := &in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptionTolerationArgs. -func (in *PreemptionTolerationArgs) DeepCopy() *PreemptionTolerationArgs { - if in == nil { - return nil - } - out := new(PreemptionTolerationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PreemptionTolerationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]configv1beta2.ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetLoadPackingArgs) DeepCopyInto(out *TargetLoadPackingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DefaultRequests != nil { - in, out := &in.DefaultRequests, &out.DefaultRequests - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequestsMultiplier != nil { - in, out := &in.DefaultRequestsMultiplier, &out.DefaultRequestsMultiplier - *out = new(string) - **out = **in - } - if in.TargetUtilization != nil { - in, out := &in.TargetUtilization, &out.TargetUtilization - *out = new(int64) - **out = **in - } - in.MetricProvider.DeepCopyInto(&out.MetricProvider) - if in.WatcherAddress != nil { - in, out := &in.WatcherAddress, &out.WatcherAddress - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetLoadPackingArgs. -func (in *TargetLoadPackingArgs) DeepCopy() *TargetLoadPackingArgs { - if in == nil { - return nil - } - out := new(TargetLoadPackingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TargetLoadPackingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.defaults.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.defaults.go deleted file mode 100644 index a0b61820f..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta2/zz_generated.defaults.go +++ /dev/null @@ -1,69 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta2 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CoschedulingArgs{}, func(obj interface{}) { SetObjectDefaults_CoschedulingArgs(obj.(*CoschedulingArgs)) }) - scheme.AddTypeDefaultingFunc(&LoadVariationRiskBalancingArgs{}, func(obj interface{}) { - SetObjectDefaults_LoadVariationRiskBalancingArgs(obj.(*LoadVariationRiskBalancingArgs)) - }) - scheme.AddTypeDefaultingFunc(&NodeResourceTopologyMatchArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourceTopologyMatchArgs(obj.(*NodeResourceTopologyMatchArgs)) - }) - scheme.AddTypeDefaultingFunc(&NodeResourcesAllocatableArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesAllocatableArgs(obj.(*NodeResourcesAllocatableArgs)) - }) - scheme.AddTypeDefaultingFunc(&PreemptionTolerationArgs{}, func(obj interface{}) { SetObjectDefaults_PreemptionTolerationArgs(obj.(*PreemptionTolerationArgs)) }) - scheme.AddTypeDefaultingFunc(&TargetLoadPackingArgs{}, func(obj interface{}) { SetObjectDefaults_TargetLoadPackingArgs(obj.(*TargetLoadPackingArgs)) }) - return nil -} - -func SetObjectDefaults_CoschedulingArgs(in *CoschedulingArgs) { - SetDefaults_CoschedulingArgs(in) -} - -func SetObjectDefaults_LoadVariationRiskBalancingArgs(in *LoadVariationRiskBalancingArgs) { - SetDefaults_LoadVariationRiskBalancingArgs(in) -} - -func SetObjectDefaults_NodeResourceTopologyMatchArgs(in *NodeResourceTopologyMatchArgs) { - SetDefaults_NodeResourceTopologyMatchArgs(in) -} - -func SetObjectDefaults_NodeResourcesAllocatableArgs(in *NodeResourcesAllocatableArgs) { - SetDefaults_NodeResourcesAllocatableArgs(in) -} - -func SetObjectDefaults_PreemptionTolerationArgs(in *PreemptionTolerationArgs) { - SetDefaults_PreemptionTolerationArgs(in) -} - -func SetObjectDefaults_TargetLoadPackingArgs(in *TargetLoadPackingArgs) { - SetDefaults_TargetLoadPackingArgs(in) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/conversion.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/conversion.go deleted file mode 100644 index 5df73573a..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/conversion.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "unsafe" - - "k8s.io/apimachinery/pkg/conversion" - - "sigs.k8s.io/scheduler-plugins/apis/config" -) - -// This file stores all necessary manual conversion bits, to leave zz_generated*.go intact after code generation. - -func Convert_v1beta3_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(in *NodeResourceTopologyMatchArgs, out *config.NodeResourceTopologyMatchArgs, s conversion.Scope) error { - if err := autoConvert_v1beta3_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(in, out, s); err != nil { - return err - } - // Manual conversions. - out.ScoringStrategy = *(*config.ScoringStrategy)(unsafe.Pointer(in.ScoringStrategy)) - return nil -} - -func Convert_config_NodeResourceTopologyMatchArgs_To_v1beta3_NodeResourceTopologyMatchArgs(in *config.NodeResourceTopologyMatchArgs, out *NodeResourceTopologyMatchArgs, s conversion.Scope) error { - if err := autoConvert_config_NodeResourceTopologyMatchArgs_To_v1beta3_NodeResourceTopologyMatchArgs(in, out, s); err != nil { - return err - } - out.ScoringStrategy = (*ScoringStrategy)(unsafe.Pointer(&in.ScoringStrategy)) - return nil -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/defaults.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/defaults.go deleted file mode 100644 index 637beaf61..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/defaults.go +++ /dev/null @@ -1,181 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "strconv" - - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - schedulerconfigv1beta3 "k8s.io/kube-scheduler/config/v1beta3" - k8sschedulerconfigv1beta3 "k8s.io/kubernetes/pkg/scheduler/apis/config/v1beta3" -) - -var ( - defaultPermitWaitingTimeSeconds int64 = 60 - - defaultNodeResourcesAllocatableMode = Least - - // defaultResourcesToWeightMap is used to set the default resourceToWeight map for CPU and memory - // used by the NodeResourcesAllocatable scoring plugin. - // The base unit for CPU is millicore, while the base using for memory is a byte. - // The default CPU weight is 1<<20 and default memory weight is 1. That means a millicore - // has a weighted score equivalent to 1 MiB. - defaultNodeResourcesAllocatableResourcesToWeightMap = []schedulerconfigv1beta3.ResourceSpec{ - {Name: "cpu", Weight: 1 << 20}, {Name: "memory", Weight: 1}, - } - - // Defaults for TargetLoadPacking plugin - - // Default 1 core CPU usage for containers without requests and limits i.e. Best Effort QoS. - DefaultRequestsMilliCores int64 = 1000 - // DefaultRequestsMultiplier for containers without limits predicted as 1.5*requests i.e. Burstable QoS class - DefaultRequestsMultiplier = "1.5" - // DefaultTargetUtilizationPercent Recommended to keep -10 than desired limit. - DefaultTargetUtilizationPercent int64 = 40 - - // Defaults for LoadVariationRiskBalancing plugin - - // Risk is usually calculated as average (aka. mu) plus standard deviation (aka. sigma). - // In order to allow customization in the calculation of risk, two parameters are provided: - // Margin and Sensitivity. Margin is a multiplier of sigma, and Sensitivity is a root power of sigma. - // For example, Margin=3 and Sensitivity=2 leads to a risk evaluated as: mu + 3 sqrt(sigma). - // The default value for both parameters is 1, leading to: mu + sigma. - // DefaultSafeVarianceMargin is one - DefaultSafeVarianceMargin = 1.0 - // DefaultSafeVarianceSensitivity is one - DefaultSafeVarianceSensitivity = 1.0 - - // DefaultMetricProviderType is the Kubernetes metrics server - DefaultMetricProviderType = KubernetesMetricsServer - // DefaultInsecureSkipVerify is whether to skip the certificate verification - DefaultInsecureSkipVerify = true - - defaultResourceSpec = []schedulerconfigv1beta3.ResourceSpec{ - {Name: string(v1.ResourceCPU), Weight: 1}, - {Name: string(v1.ResourceMemory), Weight: 1}, - } - - // Defaults for NetworkOverhead - // DefaultWeightsName contains the default costs to be used by networkAware plugins - DefaultWeightsName = "UserDefined" - // DefaultNetworkTopologyName contains the networkTopology CR name to be used by networkAware plugins - DefaultNetworkTopologyName = "nt-default" -) - -// SetDefaults_CoschedulingArgs sets the default parameters for Coscheduling plugin. -func SetDefaults_CoschedulingArgs(obj *CoschedulingArgs) { - if obj.PermitWaitingTimeSeconds == nil { - obj.PermitWaitingTimeSeconds = &defaultPermitWaitingTimeSeconds - } -} - -// SetDefaults_NodeResourcesAllocatableArgs sets the defaults parameters for NodeResourceAllocatable. -func SetDefaults_NodeResourcesAllocatableArgs(obj *NodeResourcesAllocatableArgs) { - if len(obj.Resources) == 0 { - obj.Resources = defaultNodeResourcesAllocatableResourcesToWeightMap - } - - if obj.Mode == "" { - obj.Mode = defaultNodeResourcesAllocatableMode - } -} - -// SetDefaultTrimaranSpec sets the default parameters for common Trimaran plugins -func SetDefaultTrimaranSpec(args *TrimaranSpec) { - if args.WatcherAddress == nil && args.MetricProvider.Type == "" { - args.MetricProvider.Type = MetricProviderType(DefaultMetricProviderType) - } - if args.MetricProvider.Type == Prometheus && args.MetricProvider.InsecureSkipVerify == nil { - args.MetricProvider.InsecureSkipVerify = &DefaultInsecureSkipVerify - } -} - -// SetDefaults_TargetLoadPackingArgs sets the default parameters for TargetLoadPacking plugin -func SetDefaults_TargetLoadPackingArgs(args *TargetLoadPackingArgs) { - SetDefaultTrimaranSpec(&args.TrimaranSpec) - if args.DefaultRequests == nil { - args.DefaultRequests = v1.ResourceList{v1.ResourceCPU: resource.MustParse( - strconv.FormatInt(DefaultRequestsMilliCores, 10) + "m")} - } - if args.DefaultRequestsMultiplier == nil { - args.DefaultRequestsMultiplier = &DefaultRequestsMultiplier - } - if args.TargetUtilization == nil || *args.TargetUtilization <= 0 { - args.TargetUtilization = &DefaultTargetUtilizationPercent - } -} - -// SetDefaults_LoadVariationRiskBalancingArgs sets the default parameters for LoadVariationRiskBalancing plugin -func SetDefaults_LoadVariationRiskBalancingArgs(args *LoadVariationRiskBalancingArgs) { - SetDefaultTrimaranSpec(&args.TrimaranSpec) - if args.SafeVarianceMargin == nil || *args.SafeVarianceMargin < 0 { - args.SafeVarianceMargin = &DefaultSafeVarianceMargin - } - if args.SafeVarianceSensitivity == nil || *args.SafeVarianceSensitivity < 0 { - args.SafeVarianceSensitivity = &DefaultSafeVarianceSensitivity - } -} - -// SetDefaults_NodeResourceTopologyMatchArgs sets the default parameters for NodeResourceTopologyMatch plugin. -func SetDefaults_NodeResourceTopologyMatchArgs(obj *NodeResourceTopologyMatchArgs) { - if obj.ScoringStrategy == nil { - obj.ScoringStrategy = &ScoringStrategy{ - Type: LeastAllocated, - Resources: defaultResourceSpec, - } - } - - if len(obj.ScoringStrategy.Resources) == 0 { - // If no resources specified, use the default set. - obj.ScoringStrategy.Resources = append(obj.ScoringStrategy.Resources, defaultResourceSpec...) - } - - for i := range obj.ScoringStrategy.Resources { - if obj.ScoringStrategy.Resources[i].Weight == 0 { - obj.ScoringStrategy.Resources[i].Weight = 1 - } - } -} - -// SetDefaults_PreemptionTolerationArgs reuses SetDefaults_DefaultPreemptionArgs -func SetDefaults_PreemptionTolerationArgs(obj *PreemptionTolerationArgs) { - k8sschedulerconfigv1beta3.SetDefaults_DefaultPreemptionArgs((*schedulerconfigv1beta3.DefaultPreemptionArgs)(obj)) -} - -// SetDefaults_TopologicalSortArgs sets the default parameters for TopologicalSortArgs plugin. -func SetDefaults_TopologicalSortArgs(obj *TopologicalSortArgs) { - if len(obj.Namespaces) == 0 { - obj.Namespaces = []string{metav1.NamespaceDefault} - } -} - -// SetDefaults_NetworkOverheadArgs sets the default parameters for NetworkMinCostArgs plugin. -func SetDefaults_NetworkOverheadArgs(obj *NetworkOverheadArgs) { - if len(obj.Namespaces) == 0 { - obj.Namespaces = []string{metav1.NamespaceDefault} - } - - if obj.WeightsName == nil { - obj.WeightsName = &DefaultWeightsName - } - - if obj.NetworkTopologyName == nil { - obj.NetworkTopologyName = &DefaultNetworkTopologyName - } -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/doc.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/doc.go deleted file mode 100644 index 609f3a33b..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:conversion-gen=sigs.k8s.io/scheduler-plugins/apis/config -// +k8s:defaulter-gen=TypeMeta -// +k8s:defaulter-gen-input=sigs.k8s.io/scheduler-plugins/apis/config/v1beta3 - -// +groupName=kubescheduler.config.k8s.io - -// Package v1beta3 is the v1beta3 version of the API. -package v1beta3 // import "sigs.k8s.io/scheduler-plugins/apis/config/v1beta3" diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/register.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/register.go deleted file mode 100644 index 639421130..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - schedschemev1beta3 "k8s.io/kube-scheduler/config/v1beta3" - schedconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: schedconfig.GroupName, Version: "v1beta3"} - -var ( - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - localSchemeBuilder = &schedschemev1beta3.SchemeBuilder - // AddToScheme is a global function that registers this API group & version to a scheme - AddToScheme = localSchemeBuilder.AddToScheme -) - -// addKnownTypes registers known types to the given scheme -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CoschedulingArgs{}, - &NodeResourcesAllocatableArgs{}, - &TargetLoadPackingArgs{}, - &LoadVariationRiskBalancingArgs{}, - &NodeResourceTopologyMatchArgs{}, - &PreemptionTolerationArgs{}, - &TopologicalSortArgs{}, - &NetworkOverheadArgs{}, - ) - return nil -} - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) - localSchemeBuilder.Register(RegisterDefaults) - localSchemeBuilder.Register(RegisterConversions) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/types.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/types.go deleted file mode 100644 index de1bcce5f..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/types.go +++ /dev/null @@ -1,179 +0,0 @@ -/* -Copyright 2022 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta3 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - schedulerconfigv1beta3 "k8s.io/kube-scheduler/config/v1beta3" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CoschedulingArgs defines the scheduling parameters for Coscheduling plugin. -type CoschedulingArgs struct { - metav1.TypeMeta `json:",inline"` - - // PermitWaitingTimeSeconds is the waiting timeout in seconds. - PermitWaitingTimeSeconds *int64 `json:"permitWaitingTimeSeconds,omitempty"` -} - -// ModeType is a type "string". -type ModeType string - -const ( - // Least is the string "Least". - Least ModeType = "Least" - // Most is the string "Most". - Most ModeType = "Most" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourcesAllocatableArgs holds arguments used to configure NodeResourcesAllocatable plugin. -type NodeResourcesAllocatableArgs struct { - metav1.TypeMeta `json:",inline"` - - // Resources to be considered when scoring. - // Allowed weights start from 1. - // An example resource set might include "cpu" (millicores) and "memory" (bytes) - // with weights of 1<<20 and 1 respectfully. That would mean 1 MiB has equivalent - // weight as 1 millicore. - Resources []schedulerconfigv1beta3.ResourceSpec `json:"resources,omitempty"` - - // Whether to prioritize nodes with least or most allocatable resources. - Mode ModeType `json:"mode,omitempty"` -} - -// MetricProviderType is a "string" type. -type MetricProviderType string - -const ( - KubernetesMetricsServer MetricProviderType = "KubernetesMetricsServer" - Prometheus MetricProviderType = "Prometheus" - SignalFx MetricProviderType = "SignalFx" -) - -// Denote the spec of the metric provider -type MetricProviderSpec struct { - // Types of the metric provider - Type MetricProviderType `json:"type,omitempty"` - // The address of the metric provider - Address *string `json:"address,omitempty"` - // The authentication token of the metric provider - Token *string `json:"token,omitempty"` - // Whether to enable the InsureSkipVerify options for https requests on Prometheus Metric Provider. - InsecureSkipVerify *bool `json:"insecureSkipVerify,omitempty"` -} - -// TrimaranSpec holds common parameters for trimaran plugins -type TrimaranSpec struct { - // Metric Provider specification when using load watcher as library - MetricProvider MetricProviderSpec `json:"metricProvider,omitempty"` - // Address of load watcher service - WatcherAddress *string `json:"watcherAddress,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true - -// TargetLoadPackingArgs holds arguments used to configure TargetLoadPacking plugin. -type TargetLoadPackingArgs struct { - metav1.TypeMeta `json:",inline"` - - // Common parameters for trimaran plugins - TrimaranSpec `json:",inline"` - // Default requests to use for best effort QoS - DefaultRequests v1.ResourceList `json:"defaultRequests,omitempty"` - // Default requests multiplier for busrtable QoS - DefaultRequestsMultiplier *string `json:"defaultRequestsMultiplier,omitempty"` - // Node target CPU Utilization for bin packing - TargetUtilization *int64 `json:"targetUtilization,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +k8s:defaulter-gen=true - -// LoadVariationRiskBalancingArgs holds arguments used to configure LoadVariationRiskBalancing plugin. -type LoadVariationRiskBalancingArgs struct { - metav1.TypeMeta `json:",inline"` - - // Common parameters for trimaran plugins - TrimaranSpec `json:",inline"` - // Multiplier of standard deviation in risk value - SafeVarianceMargin *float64 `json:"safeVarianceMargin,omitempty"` - // Root power of standard deviation in risk value - SafeVarianceSensitivity *float64 `json:"safeVarianceSensitivity,omitempty"` -} - -// ScoringStrategyType is a "string" type. -type ScoringStrategyType string - -const ( - // MostAllocated strategy favors node with the least amount of available resource - MostAllocated ScoringStrategyType = "MostAllocated" - // BalancedAllocation strategy favors nodes with balanced resource usage rate - BalancedAllocation ScoringStrategyType = "BalancedAllocation" - // LeastAllocated strategy favors node with the most amount of available resource - LeastAllocated ScoringStrategyType = "LeastAllocated" -) - -type ScoringStrategy struct { - Type ScoringStrategyType `json:"type,omitempty"` - Resources []schedulerconfigv1beta3.ResourceSpec `json:"resources,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeResourceTopologyMatchArgs holds arguments used to configure the NodeResourceTopologyMatch plugin -type NodeResourceTopologyMatchArgs struct { - metav1.TypeMeta `json:",inline"` - - // ScoringStrategy a scoring model that determine how the plugin will score the nodes. - ScoringStrategy *ScoringStrategy `json:"scoringStrategy,omitempty"` - // If > 0, enables the caching facilities of the reserve plugin - which must be enabled - CacheResyncPeriodSeconds *int64 `json:"cacheResyncPeriodSeconds,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PreemptionTolerationArgs reuses DefaultPluginArgs. -type PreemptionTolerationArgs schedulerconfigv1beta3.DefaultPreemptionArgs - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type TopologicalSortArgs struct { - metav1.TypeMeta `json:",inline"` - - // Namespaces to be considered by TopologySort plugin - Namespaces []string `json:"namespaces,omitempty"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type NetworkOverheadArgs struct { - metav1.TypeMeta `json:",inline"` - - // Namespaces to be considered by NetworkMinCost plugin - Namespaces []string `json:"namespaces,omitempty"` - - // Preferred weights (Default: UserDefined) - WeightsName *string `json:"weightsName,omitempty"` - - // The NetworkTopology CRD name - NetworkTopologyName *string `json:"networkTopologyName,omitempty"` -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.conversion.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.conversion.go deleted file mode 100644 index ae97af823..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.conversion.go +++ /dev/null @@ -1,462 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1beta3 - -import ( - unsafe "unsafe" - - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - conversion "k8s.io/apimachinery/pkg/conversion" - runtime "k8s.io/apimachinery/pkg/runtime" - configv1beta3 "k8s.io/kube-scheduler/config/v1beta3" - apisconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" - config "sigs.k8s.io/scheduler-plugins/apis/config" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*CoschedulingArgs)(nil), (*config.CoschedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(a.(*CoschedulingArgs), b.(*config.CoschedulingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.CoschedulingArgs)(nil), (*CoschedulingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(a.(*config.CoschedulingArgs), b.(*CoschedulingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*LoadVariationRiskBalancingArgs)(nil), (*config.LoadVariationRiskBalancingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(a.(*LoadVariationRiskBalancingArgs), b.(*config.LoadVariationRiskBalancingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.LoadVariationRiskBalancingArgs)(nil), (*LoadVariationRiskBalancingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_LoadVariationRiskBalancingArgs_To_v1beta3_LoadVariationRiskBalancingArgs(a.(*config.LoadVariationRiskBalancingArgs), b.(*LoadVariationRiskBalancingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*MetricProviderSpec)(nil), (*config.MetricProviderSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_MetricProviderSpec_To_config_MetricProviderSpec(a.(*MetricProviderSpec), b.(*config.MetricProviderSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.MetricProviderSpec)(nil), (*MetricProviderSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_MetricProviderSpec_To_v1beta3_MetricProviderSpec(a.(*config.MetricProviderSpec), b.(*MetricProviderSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NetworkOverheadArgs)(nil), (*config.NetworkOverheadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_NetworkOverheadArgs_To_config_NetworkOverheadArgs(a.(*NetworkOverheadArgs), b.(*config.NetworkOverheadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NetworkOverheadArgs)(nil), (*NetworkOverheadArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NetworkOverheadArgs_To_v1beta3_NetworkOverheadArgs(a.(*config.NetworkOverheadArgs), b.(*NetworkOverheadArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*NodeResourcesAllocatableArgs)(nil), (*config.NodeResourcesAllocatableArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(a.(*NodeResourcesAllocatableArgs), b.(*config.NodeResourcesAllocatableArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.NodeResourcesAllocatableArgs)(nil), (*NodeResourcesAllocatableArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourcesAllocatableArgs_To_v1beta3_NodeResourcesAllocatableArgs(a.(*config.NodeResourcesAllocatableArgs), b.(*NodeResourcesAllocatableArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*PreemptionTolerationArgs)(nil), (*config.PreemptionTolerationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(a.(*PreemptionTolerationArgs), b.(*config.PreemptionTolerationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.PreemptionTolerationArgs)(nil), (*PreemptionTolerationArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_PreemptionTolerationArgs_To_v1beta3_PreemptionTolerationArgs(a.(*config.PreemptionTolerationArgs), b.(*PreemptionTolerationArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*ScoringStrategy)(nil), (*config.ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(a.(*ScoringStrategy), b.(*config.ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.ScoringStrategy)(nil), (*ScoringStrategy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(a.(*config.ScoringStrategy), b.(*ScoringStrategy), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*TargetLoadPackingArgs)(nil), (*config.TargetLoadPackingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(a.(*TargetLoadPackingArgs), b.(*config.TargetLoadPackingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.TargetLoadPackingArgs)(nil), (*TargetLoadPackingArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_TargetLoadPackingArgs_To_v1beta3_TargetLoadPackingArgs(a.(*config.TargetLoadPackingArgs), b.(*TargetLoadPackingArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*TopologicalSortArgs)(nil), (*config.TopologicalSortArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_TopologicalSortArgs_To_config_TopologicalSortArgs(a.(*TopologicalSortArgs), b.(*config.TopologicalSortArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.TopologicalSortArgs)(nil), (*TopologicalSortArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_TopologicalSortArgs_To_v1beta3_TopologicalSortArgs(a.(*config.TopologicalSortArgs), b.(*TopologicalSortArgs), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*TrimaranSpec)(nil), (*config.TrimaranSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_TrimaranSpec_To_config_TrimaranSpec(a.(*TrimaranSpec), b.(*config.TrimaranSpec), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*config.TrimaranSpec)(nil), (*TrimaranSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_TrimaranSpec_To_v1beta3_TrimaranSpec(a.(*config.TrimaranSpec), b.(*TrimaranSpec), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*config.NodeResourceTopologyMatchArgs)(nil), (*NodeResourceTopologyMatchArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_config_NodeResourceTopologyMatchArgs_To_v1beta3_NodeResourceTopologyMatchArgs(a.(*config.NodeResourceTopologyMatchArgs), b.(*NodeResourceTopologyMatchArgs), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*NodeResourceTopologyMatchArgs)(nil), (*config.NodeResourceTopologyMatchArgs)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1beta3_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(a.(*NodeResourceTopologyMatchArgs), b.(*config.NodeResourceTopologyMatchArgs), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int64_To_int64(&in.PermitWaitingTimeSeconds, &out.PermitWaitingTimeSeconds, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs is an autogenerated conversion function. -func Convert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(in *CoschedulingArgs, out *config.CoschedulingArgs, s conversion.Scope) error { - return autoConvert_v1beta3_CoschedulingArgs_To_config_CoschedulingArgs(in, out, s) -} - -func autoConvert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { - if err := v1.Convert_int64_To_Pointer_int64(&in.PermitWaitingTimeSeconds, &out.PermitWaitingTimeSeconds, s); err != nil { - return err - } - return nil -} - -// Convert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs is an autogenerated conversion function. -func Convert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(in *config.CoschedulingArgs, out *CoschedulingArgs, s conversion.Scope) error { - return autoConvert_config_CoschedulingArgs_To_v1beta3_CoschedulingArgs(in, out, s) -} - -func autoConvert_v1beta3_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(in *LoadVariationRiskBalancingArgs, out *config.LoadVariationRiskBalancingArgs, s conversion.Scope) error { - if err := Convert_v1beta3_TrimaranSpec_To_config_TrimaranSpec(&in.TrimaranSpec, &out.TrimaranSpec, s); err != nil { - return err - } - if err := v1.Convert_Pointer_float64_To_float64(&in.SafeVarianceMargin, &out.SafeVarianceMargin, s); err != nil { - return err - } - if err := v1.Convert_Pointer_float64_To_float64(&in.SafeVarianceSensitivity, &out.SafeVarianceSensitivity, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs is an autogenerated conversion function. -func Convert_v1beta3_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(in *LoadVariationRiskBalancingArgs, out *config.LoadVariationRiskBalancingArgs, s conversion.Scope) error { - return autoConvert_v1beta3_LoadVariationRiskBalancingArgs_To_config_LoadVariationRiskBalancingArgs(in, out, s) -} - -func autoConvert_config_LoadVariationRiskBalancingArgs_To_v1beta3_LoadVariationRiskBalancingArgs(in *config.LoadVariationRiskBalancingArgs, out *LoadVariationRiskBalancingArgs, s conversion.Scope) error { - if err := Convert_config_TrimaranSpec_To_v1beta3_TrimaranSpec(&in.TrimaranSpec, &out.TrimaranSpec, s); err != nil { - return err - } - if err := v1.Convert_float64_To_Pointer_float64(&in.SafeVarianceMargin, &out.SafeVarianceMargin, s); err != nil { - return err - } - if err := v1.Convert_float64_To_Pointer_float64(&in.SafeVarianceSensitivity, &out.SafeVarianceSensitivity, s); err != nil { - return err - } - return nil -} - -// Convert_config_LoadVariationRiskBalancingArgs_To_v1beta3_LoadVariationRiskBalancingArgs is an autogenerated conversion function. -func Convert_config_LoadVariationRiskBalancingArgs_To_v1beta3_LoadVariationRiskBalancingArgs(in *config.LoadVariationRiskBalancingArgs, out *LoadVariationRiskBalancingArgs, s conversion.Scope) error { - return autoConvert_config_LoadVariationRiskBalancingArgs_To_v1beta3_LoadVariationRiskBalancingArgs(in, out, s) -} - -func autoConvert_v1beta3_MetricProviderSpec_To_config_MetricProviderSpec(in *MetricProviderSpec, out *config.MetricProviderSpec, s conversion.Scope) error { - out.Type = config.MetricProviderType(in.Type) - if err := v1.Convert_Pointer_string_To_string(&in.Address, &out.Address, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.Token, &out.Token, s); err != nil { - return err - } - if err := v1.Convert_Pointer_bool_To_bool(&in.InsecureSkipVerify, &out.InsecureSkipVerify, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_MetricProviderSpec_To_config_MetricProviderSpec is an autogenerated conversion function. -func Convert_v1beta3_MetricProviderSpec_To_config_MetricProviderSpec(in *MetricProviderSpec, out *config.MetricProviderSpec, s conversion.Scope) error { - return autoConvert_v1beta3_MetricProviderSpec_To_config_MetricProviderSpec(in, out, s) -} - -func autoConvert_config_MetricProviderSpec_To_v1beta3_MetricProviderSpec(in *config.MetricProviderSpec, out *MetricProviderSpec, s conversion.Scope) error { - out.Type = MetricProviderType(in.Type) - if err := v1.Convert_string_To_Pointer_string(&in.Address, &out.Address, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.Token, &out.Token, s); err != nil { - return err - } - if err := v1.Convert_bool_To_Pointer_bool(&in.InsecureSkipVerify, &out.InsecureSkipVerify, s); err != nil { - return err - } - return nil -} - -// Convert_config_MetricProviderSpec_To_v1beta3_MetricProviderSpec is an autogenerated conversion function. -func Convert_config_MetricProviderSpec_To_v1beta3_MetricProviderSpec(in *config.MetricProviderSpec, out *MetricProviderSpec, s conversion.Scope) error { - return autoConvert_config_MetricProviderSpec_To_v1beta3_MetricProviderSpec(in, out, s) -} - -func autoConvert_v1beta3_NetworkOverheadArgs_To_config_NetworkOverheadArgs(in *NetworkOverheadArgs, out *config.NetworkOverheadArgs, s conversion.Scope) error { - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - if err := v1.Convert_Pointer_string_To_string(&in.WeightsName, &out.WeightsName, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.NetworkTopologyName, &out.NetworkTopologyName, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_NetworkOverheadArgs_To_config_NetworkOverheadArgs is an autogenerated conversion function. -func Convert_v1beta3_NetworkOverheadArgs_To_config_NetworkOverheadArgs(in *NetworkOverheadArgs, out *config.NetworkOverheadArgs, s conversion.Scope) error { - return autoConvert_v1beta3_NetworkOverheadArgs_To_config_NetworkOverheadArgs(in, out, s) -} - -func autoConvert_config_NetworkOverheadArgs_To_v1beta3_NetworkOverheadArgs(in *config.NetworkOverheadArgs, out *NetworkOverheadArgs, s conversion.Scope) error { - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - if err := v1.Convert_string_To_Pointer_string(&in.WeightsName, &out.WeightsName, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.NetworkTopologyName, &out.NetworkTopologyName, s); err != nil { - return err - } - return nil -} - -// Convert_config_NetworkOverheadArgs_To_v1beta3_NetworkOverheadArgs is an autogenerated conversion function. -func Convert_config_NetworkOverheadArgs_To_v1beta3_NetworkOverheadArgs(in *config.NetworkOverheadArgs, out *NetworkOverheadArgs, s conversion.Scope) error { - return autoConvert_config_NetworkOverheadArgs_To_v1beta3_NetworkOverheadArgs(in, out, s) -} - -func autoConvert_v1beta3_NodeResourceTopologyMatchArgs_To_config_NodeResourceTopologyMatchArgs(in *NodeResourceTopologyMatchArgs, out *config.NodeResourceTopologyMatchArgs, s conversion.Scope) error { - // WARNING: in.ScoringStrategy requires manual conversion: inconvertible types (*sigs.k8s.io/scheduler-plugins/apis/config/v1beta3.ScoringStrategy vs sigs.k8s.io/scheduler-plugins/apis/config.ScoringStrategy) - if err := v1.Convert_Pointer_int64_To_int64(&in.CacheResyncPeriodSeconds, &out.CacheResyncPeriodSeconds, s); err != nil { - return err - } - return nil -} - -func autoConvert_config_NodeResourceTopologyMatchArgs_To_v1beta3_NodeResourceTopologyMatchArgs(in *config.NodeResourceTopologyMatchArgs, out *NodeResourceTopologyMatchArgs, s conversion.Scope) error { - // WARNING: in.ScoringStrategy requires manual conversion: inconvertible types (sigs.k8s.io/scheduler-plugins/apis/config.ScoringStrategy vs *sigs.k8s.io/scheduler-plugins/apis/config/v1beta3.ScoringStrategy) - if err := v1.Convert_int64_To_Pointer_int64(&in.CacheResyncPeriodSeconds, &out.CacheResyncPeriodSeconds, s); err != nil { - return err - } - return nil -} - -func autoConvert_v1beta3_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(in *NodeResourcesAllocatableArgs, out *config.NodeResourcesAllocatableArgs, s conversion.Scope) error { - out.Resources = *(*[]apisconfig.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.Mode = config.ModeType(in.Mode) - return nil -} - -// Convert_v1beta3_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs is an autogenerated conversion function. -func Convert_v1beta3_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(in *NodeResourcesAllocatableArgs, out *config.NodeResourcesAllocatableArgs, s conversion.Scope) error { - return autoConvert_v1beta3_NodeResourcesAllocatableArgs_To_config_NodeResourcesAllocatableArgs(in, out, s) -} - -func autoConvert_config_NodeResourcesAllocatableArgs_To_v1beta3_NodeResourcesAllocatableArgs(in *config.NodeResourcesAllocatableArgs, out *NodeResourcesAllocatableArgs, s conversion.Scope) error { - out.Resources = *(*[]configv1beta3.ResourceSpec)(unsafe.Pointer(&in.Resources)) - out.Mode = ModeType(in.Mode) - return nil -} - -// Convert_config_NodeResourcesAllocatableArgs_To_v1beta3_NodeResourcesAllocatableArgs is an autogenerated conversion function. -func Convert_config_NodeResourcesAllocatableArgs_To_v1beta3_NodeResourcesAllocatableArgs(in *config.NodeResourcesAllocatableArgs, out *NodeResourcesAllocatableArgs, s conversion.Scope) error { - return autoConvert_config_NodeResourcesAllocatableArgs_To_v1beta3_NodeResourcesAllocatableArgs(in, out, s) -} - -func autoConvert_v1beta3_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(in *PreemptionTolerationArgs, out *config.PreemptionTolerationArgs, s conversion.Scope) error { - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int32_To_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs is an autogenerated conversion function. -func Convert_v1beta3_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(in *PreemptionTolerationArgs, out *config.PreemptionTolerationArgs, s conversion.Scope) error { - return autoConvert_v1beta3_PreemptionTolerationArgs_To_config_PreemptionTolerationArgs(in, out, s) -} - -func autoConvert_config_PreemptionTolerationArgs_To_v1beta3_PreemptionTolerationArgs(in *config.PreemptionTolerationArgs, out *PreemptionTolerationArgs, s conversion.Scope) error { - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage, s); err != nil { - return err - } - if err := v1.Convert_int32_To_Pointer_int32(&in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute, s); err != nil { - return err - } - return nil -} - -// Convert_config_PreemptionTolerationArgs_To_v1beta3_PreemptionTolerationArgs is an autogenerated conversion function. -func Convert_config_PreemptionTolerationArgs_To_v1beta3_PreemptionTolerationArgs(in *config.PreemptionTolerationArgs, out *PreemptionTolerationArgs, s conversion.Scope) error { - return autoConvert_config_PreemptionTolerationArgs_To_v1beta3_PreemptionTolerationArgs(in, out, s) -} - -func autoConvert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - out.Type = config.ScoringStrategyType(in.Type) - out.Resources = *(*[]apisconfig.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy is an autogenerated conversion function. -func Convert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in *ScoringStrategy, out *config.ScoringStrategy, s conversion.Scope) error { - return autoConvert_v1beta3_ScoringStrategy_To_config_ScoringStrategy(in, out, s) -} - -func autoConvert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { - out.Type = ScoringStrategyType(in.Type) - out.Resources = *(*[]configv1beta3.ResourceSpec)(unsafe.Pointer(&in.Resources)) - return nil -} - -// Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy is an autogenerated conversion function. -func Convert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in *config.ScoringStrategy, out *ScoringStrategy, s conversion.Scope) error { - return autoConvert_config_ScoringStrategy_To_v1beta3_ScoringStrategy(in, out, s) -} - -func autoConvert_v1beta3_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(in *TargetLoadPackingArgs, out *config.TargetLoadPackingArgs, s conversion.Scope) error { - if err := Convert_v1beta3_TrimaranSpec_To_config_TrimaranSpec(&in.TrimaranSpec, &out.TrimaranSpec, s); err != nil { - return err - } - out.DefaultRequests = *(*corev1.ResourceList)(unsafe.Pointer(&in.DefaultRequests)) - if err := v1.Convert_Pointer_string_To_string(&in.DefaultRequestsMultiplier, &out.DefaultRequestsMultiplier, s); err != nil { - return err - } - if err := v1.Convert_Pointer_int64_To_int64(&in.TargetUtilization, &out.TargetUtilization, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs is an autogenerated conversion function. -func Convert_v1beta3_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(in *TargetLoadPackingArgs, out *config.TargetLoadPackingArgs, s conversion.Scope) error { - return autoConvert_v1beta3_TargetLoadPackingArgs_To_config_TargetLoadPackingArgs(in, out, s) -} - -func autoConvert_config_TargetLoadPackingArgs_To_v1beta3_TargetLoadPackingArgs(in *config.TargetLoadPackingArgs, out *TargetLoadPackingArgs, s conversion.Scope) error { - if err := Convert_config_TrimaranSpec_To_v1beta3_TrimaranSpec(&in.TrimaranSpec, &out.TrimaranSpec, s); err != nil { - return err - } - out.DefaultRequests = *(*corev1.ResourceList)(unsafe.Pointer(&in.DefaultRequests)) - if err := v1.Convert_string_To_Pointer_string(&in.DefaultRequestsMultiplier, &out.DefaultRequestsMultiplier, s); err != nil { - return err - } - if err := v1.Convert_int64_To_Pointer_int64(&in.TargetUtilization, &out.TargetUtilization, s); err != nil { - return err - } - return nil -} - -// Convert_config_TargetLoadPackingArgs_To_v1beta3_TargetLoadPackingArgs is an autogenerated conversion function. -func Convert_config_TargetLoadPackingArgs_To_v1beta3_TargetLoadPackingArgs(in *config.TargetLoadPackingArgs, out *TargetLoadPackingArgs, s conversion.Scope) error { - return autoConvert_config_TargetLoadPackingArgs_To_v1beta3_TargetLoadPackingArgs(in, out, s) -} - -func autoConvert_v1beta3_TopologicalSortArgs_To_config_TopologicalSortArgs(in *TopologicalSortArgs, out *config.TopologicalSortArgs, s conversion.Scope) error { - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - return nil -} - -// Convert_v1beta3_TopologicalSortArgs_To_config_TopologicalSortArgs is an autogenerated conversion function. -func Convert_v1beta3_TopologicalSortArgs_To_config_TopologicalSortArgs(in *TopologicalSortArgs, out *config.TopologicalSortArgs, s conversion.Scope) error { - return autoConvert_v1beta3_TopologicalSortArgs_To_config_TopologicalSortArgs(in, out, s) -} - -func autoConvert_config_TopologicalSortArgs_To_v1beta3_TopologicalSortArgs(in *config.TopologicalSortArgs, out *TopologicalSortArgs, s conversion.Scope) error { - out.Namespaces = *(*[]string)(unsafe.Pointer(&in.Namespaces)) - return nil -} - -// Convert_config_TopologicalSortArgs_To_v1beta3_TopologicalSortArgs is an autogenerated conversion function. -func Convert_config_TopologicalSortArgs_To_v1beta3_TopologicalSortArgs(in *config.TopologicalSortArgs, out *TopologicalSortArgs, s conversion.Scope) error { - return autoConvert_config_TopologicalSortArgs_To_v1beta3_TopologicalSortArgs(in, out, s) -} - -func autoConvert_v1beta3_TrimaranSpec_To_config_TrimaranSpec(in *TrimaranSpec, out *config.TrimaranSpec, s conversion.Scope) error { - if err := Convert_v1beta3_MetricProviderSpec_To_config_MetricProviderSpec(&in.MetricProvider, &out.MetricProvider, s); err != nil { - return err - } - if err := v1.Convert_Pointer_string_To_string(&in.WatcherAddress, &out.WatcherAddress, s); err != nil { - return err - } - return nil -} - -// Convert_v1beta3_TrimaranSpec_To_config_TrimaranSpec is an autogenerated conversion function. -func Convert_v1beta3_TrimaranSpec_To_config_TrimaranSpec(in *TrimaranSpec, out *config.TrimaranSpec, s conversion.Scope) error { - return autoConvert_v1beta3_TrimaranSpec_To_config_TrimaranSpec(in, out, s) -} - -func autoConvert_config_TrimaranSpec_To_v1beta3_TrimaranSpec(in *config.TrimaranSpec, out *TrimaranSpec, s conversion.Scope) error { - if err := Convert_config_MetricProviderSpec_To_v1beta3_MetricProviderSpec(&in.MetricProvider, &out.MetricProvider, s); err != nil { - return err - } - if err := v1.Convert_string_To_Pointer_string(&in.WatcherAddress, &out.WatcherAddress, s); err != nil { - return err - } - return nil -} - -// Convert_config_TrimaranSpec_To_v1beta3_TrimaranSpec is an autogenerated conversion function. -func Convert_config_TrimaranSpec_To_v1beta3_TrimaranSpec(in *config.TrimaranSpec, out *TrimaranSpec, s conversion.Scope) error { - return autoConvert_config_TrimaranSpec_To_v1beta3_TrimaranSpec(in, out, s) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.deepcopy.go deleted file mode 100644 index 1e194a4d3..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.deepcopy.go +++ /dev/null @@ -1,381 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta3 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - configv1beta3 "k8s.io/kube-scheduler/config/v1beta3" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CoschedulingArgs) DeepCopyInto(out *CoschedulingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.PermitWaitingTimeSeconds != nil { - in, out := &in.PermitWaitingTimeSeconds, &out.PermitWaitingTimeSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoschedulingArgs. -func (in *CoschedulingArgs) DeepCopy() *CoschedulingArgs { - if in == nil { - return nil - } - out := new(CoschedulingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CoschedulingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadVariationRiskBalancingArgs) DeepCopyInto(out *LoadVariationRiskBalancingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - in.TrimaranSpec.DeepCopyInto(&out.TrimaranSpec) - if in.SafeVarianceMargin != nil { - in, out := &in.SafeVarianceMargin, &out.SafeVarianceMargin - *out = new(float64) - **out = **in - } - if in.SafeVarianceSensitivity != nil { - in, out := &in.SafeVarianceSensitivity, &out.SafeVarianceSensitivity - *out = new(float64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadVariationRiskBalancingArgs. -func (in *LoadVariationRiskBalancingArgs) DeepCopy() *LoadVariationRiskBalancingArgs { - if in == nil { - return nil - } - out := new(LoadVariationRiskBalancingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LoadVariationRiskBalancingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricProviderSpec) DeepCopyInto(out *MetricProviderSpec) { - *out = *in - if in.Address != nil { - in, out := &in.Address, &out.Address - *out = new(string) - **out = **in - } - if in.Token != nil { - in, out := &in.Token, &out.Token - *out = new(string) - **out = **in - } - if in.InsecureSkipVerify != nil { - in, out := &in.InsecureSkipVerify, &out.InsecureSkipVerify - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricProviderSpec. -func (in *MetricProviderSpec) DeepCopy() *MetricProviderSpec { - if in == nil { - return nil - } - out := new(MetricProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkOverheadArgs) DeepCopyInto(out *NetworkOverheadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.WeightsName != nil { - in, out := &in.WeightsName, &out.WeightsName - *out = new(string) - **out = **in - } - if in.NetworkTopologyName != nil { - in, out := &in.NetworkTopologyName, &out.NetworkTopologyName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkOverheadArgs. -func (in *NetworkOverheadArgs) DeepCopy() *NetworkOverheadArgs { - if in == nil { - return nil - } - out := new(NetworkOverheadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkOverheadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourceTopologyMatchArgs) DeepCopyInto(out *NodeResourceTopologyMatchArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.ScoringStrategy != nil { - in, out := &in.ScoringStrategy, &out.ScoringStrategy - *out = new(ScoringStrategy) - (*in).DeepCopyInto(*out) - } - if in.CacheResyncPeriodSeconds != nil { - in, out := &in.CacheResyncPeriodSeconds, &out.CacheResyncPeriodSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceTopologyMatchArgs. -func (in *NodeResourceTopologyMatchArgs) DeepCopy() *NodeResourceTopologyMatchArgs { - if in == nil { - return nil - } - out := new(NodeResourceTopologyMatchArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourceTopologyMatchArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesAllocatableArgs) DeepCopyInto(out *NodeResourcesAllocatableArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]configv1beta3.ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesAllocatableArgs. -func (in *NodeResourcesAllocatableArgs) DeepCopy() *NodeResourcesAllocatableArgs { - if in == nil { - return nil - } - out := new(NodeResourcesAllocatableArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesAllocatableArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreemptionTolerationArgs) DeepCopyInto(out *PreemptionTolerationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.MinCandidateNodesPercentage != nil { - in, out := &in.MinCandidateNodesPercentage, &out.MinCandidateNodesPercentage - *out = new(int32) - **out = **in - } - if in.MinCandidateNodesAbsolute != nil { - in, out := &in.MinCandidateNodesAbsolute, &out.MinCandidateNodesAbsolute - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptionTolerationArgs. -func (in *PreemptionTolerationArgs) DeepCopy() *PreemptionTolerationArgs { - if in == nil { - return nil - } - out := new(PreemptionTolerationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PreemptionTolerationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]configv1beta3.ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetLoadPackingArgs) DeepCopyInto(out *TargetLoadPackingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - in.TrimaranSpec.DeepCopyInto(&out.TrimaranSpec) - if in.DefaultRequests != nil { - in, out := &in.DefaultRequests, &out.DefaultRequests - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequestsMultiplier != nil { - in, out := &in.DefaultRequestsMultiplier, &out.DefaultRequestsMultiplier - *out = new(string) - **out = **in - } - if in.TargetUtilization != nil { - in, out := &in.TargetUtilization, &out.TargetUtilization - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetLoadPackingArgs. -func (in *TargetLoadPackingArgs) DeepCopy() *TargetLoadPackingArgs { - if in == nil { - return nil - } - out := new(TargetLoadPackingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TargetLoadPackingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologicalSortArgs) DeepCopyInto(out *TopologicalSortArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologicalSortArgs. -func (in *TopologicalSortArgs) DeepCopy() *TopologicalSortArgs { - if in == nil { - return nil - } - out := new(TopologicalSortArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TopologicalSortArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TrimaranSpec) DeepCopyInto(out *TrimaranSpec) { - *out = *in - in.MetricProvider.DeepCopyInto(&out.MetricProvider) - if in.WatcherAddress != nil { - in, out := &in.WatcherAddress, &out.WatcherAddress - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrimaranSpec. -func (in *TrimaranSpec) DeepCopy() *TrimaranSpec { - if in == nil { - return nil - } - out := new(TrimaranSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.defaults.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.defaults.go deleted file mode 100644 index b4ea5ec32..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/v1beta3/zz_generated.defaults.go +++ /dev/null @@ -1,83 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta3 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - scheme.AddTypeDefaultingFunc(&CoschedulingArgs{}, func(obj interface{}) { SetObjectDefaults_CoschedulingArgs(obj.(*CoschedulingArgs)) }) - scheme.AddTypeDefaultingFunc(&LoadVariationRiskBalancingArgs{}, func(obj interface{}) { - SetObjectDefaults_LoadVariationRiskBalancingArgs(obj.(*LoadVariationRiskBalancingArgs)) - }) - scheme.AddTypeDefaultingFunc(&NodeResourceTopologyMatchArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourceTopologyMatchArgs(obj.(*NodeResourceTopologyMatchArgs)) - }) - scheme.AddTypeDefaultingFunc(&NodeResourcesAllocatableArgs{}, func(obj interface{}) { - SetObjectDefaults_NodeResourcesAllocatableArgs(obj.(*NodeResourcesAllocatableArgs)) - }) - scheme.AddTypeDefaultingFunc(&PreemptionTolerationArgs{}, func(obj interface{}) { SetObjectDefaults_PreemptionTolerationArgs(obj.(*PreemptionTolerationArgs)) }) - scheme.AddTypeDefaultingFunc(&TargetLoadPackingArgs{}, func(obj interface{}) { SetObjectDefaults_TargetLoadPackingArgs(obj.(*TargetLoadPackingArgs)) }) - scheme.AddTypeDefaultingFunc(&TopologicalSortArgs{}, func(obj interface{}) { - SetObjectDefaults_TopologicalSortArgs(obj.(*TopologicalSortArgs)) - }) - scheme.AddTypeDefaultingFunc(&NetworkOverheadArgs{}, func(obj interface{}) { - SetObjectDefaults_NetworkOverheadArgs(obj.(*NetworkOverheadArgs)) - }) - return nil -} - -func SetObjectDefaults_CoschedulingArgs(in *CoschedulingArgs) { - SetDefaults_CoschedulingArgs(in) -} - -func SetObjectDefaults_LoadVariationRiskBalancingArgs(in *LoadVariationRiskBalancingArgs) { - SetDefaults_LoadVariationRiskBalancingArgs(in) -} - -func SetObjectDefaults_NodeResourceTopologyMatchArgs(in *NodeResourceTopologyMatchArgs) { - SetDefaults_NodeResourceTopologyMatchArgs(in) -} - -func SetObjectDefaults_NodeResourcesAllocatableArgs(in *NodeResourcesAllocatableArgs) { - SetDefaults_NodeResourcesAllocatableArgs(in) -} - -func SetObjectDefaults_PreemptionTolerationArgs(in *PreemptionTolerationArgs) { - SetDefaults_PreemptionTolerationArgs(in) -} - -func SetObjectDefaults_TargetLoadPackingArgs(in *TargetLoadPackingArgs) { - SetDefaults_TargetLoadPackingArgs(in) -} - -func SetObjectDefaults_TopologicalSortArgs(in *TopologicalSortArgs) { - SetDefaults_TopologicalSortArgs(in) -} - -func SetObjectDefaults_NetworkOverheadArgs(in *NetworkOverheadArgs) { - SetDefaults_NetworkOverheadArgs(in) -} diff --git a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/zz_generated.deepcopy.go b/vendor/sigs.k8s.io/scheduler-plugins/apis/config/zz_generated.deepcopy.go deleted file mode 100644 index bbe1c1689..000000000 --- a/vendor/sigs.k8s.io/scheduler-plugins/apis/config/zz_generated.deepcopy.go +++ /dev/null @@ -1,307 +0,0 @@ -//go:build !ignore_autogenerated -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package config - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - apisconfig "k8s.io/kubernetes/pkg/scheduler/apis/config" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CoschedulingArgs) DeepCopyInto(out *CoschedulingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoschedulingArgs. -func (in *CoschedulingArgs) DeepCopy() *CoschedulingArgs { - if in == nil { - return nil - } - out := new(CoschedulingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CoschedulingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadVariationRiskBalancingArgs) DeepCopyInto(out *LoadVariationRiskBalancingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - out.TrimaranSpec = in.TrimaranSpec - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadVariationRiskBalancingArgs. -func (in *LoadVariationRiskBalancingArgs) DeepCopy() *LoadVariationRiskBalancingArgs { - if in == nil { - return nil - } - out := new(LoadVariationRiskBalancingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LoadVariationRiskBalancingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricProviderSpec) DeepCopyInto(out *MetricProviderSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricProviderSpec. -func (in *MetricProviderSpec) DeepCopy() *MetricProviderSpec { - if in == nil { - return nil - } - out := new(MetricProviderSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkOverheadArgs) DeepCopyInto(out *NetworkOverheadArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkOverheadArgs. -func (in *NetworkOverheadArgs) DeepCopy() *NetworkOverheadArgs { - if in == nil { - return nil - } - out := new(NetworkOverheadArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkOverheadArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourceTopologyMatchArgs) DeepCopyInto(out *NodeResourceTopologyMatchArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ScoringStrategy.DeepCopyInto(&out.ScoringStrategy) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourceTopologyMatchArgs. -func (in *NodeResourceTopologyMatchArgs) DeepCopy() *NodeResourceTopologyMatchArgs { - if in == nil { - return nil - } - out := new(NodeResourceTopologyMatchArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourceTopologyMatchArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResourcesAllocatableArgs) DeepCopyInto(out *NodeResourcesAllocatableArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]apisconfig.ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResourcesAllocatableArgs. -func (in *NodeResourcesAllocatableArgs) DeepCopy() *NodeResourcesAllocatableArgs { - if in == nil { - return nil - } - out := new(NodeResourcesAllocatableArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeResourcesAllocatableArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreemptionTolerationArgs) DeepCopyInto(out *PreemptionTolerationArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreemptionTolerationArgs. -func (in *PreemptionTolerationArgs) DeepCopy() *PreemptionTolerationArgs { - if in == nil { - return nil - } - out := new(PreemptionTolerationArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PreemptionTolerationArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScoringStrategy) DeepCopyInto(out *ScoringStrategy) { - *out = *in - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]apisconfig.ResourceSpec, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScoringStrategy. -func (in *ScoringStrategy) DeepCopy() *ScoringStrategy { - if in == nil { - return nil - } - out := new(ScoringStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TargetLoadPackingArgs) DeepCopyInto(out *TargetLoadPackingArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - out.TrimaranSpec = in.TrimaranSpec - if in.DefaultRequests != nil { - in, out := &in.DefaultRequests, &out.DefaultRequests - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TargetLoadPackingArgs. -func (in *TargetLoadPackingArgs) DeepCopy() *TargetLoadPackingArgs { - if in == nil { - return nil - } - out := new(TargetLoadPackingArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TargetLoadPackingArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TrimaranSpec) DeepCopyInto(out *TrimaranSpec) { - *out = *in - out.MetricProvider = in.MetricProvider - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TrimaranSpec. -func (in *TrimaranSpec) DeepCopy() *TrimaranSpec { - if in == nil { - return nil - } - out := new(TrimaranSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologicalSortArgs) DeepCopyInto(out *TopologicalSortArgs) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologicalSortArgs. -func (in *TopologicalSortArgs) DeepCopy() *TopologicalSortArgs { - if in == nil { - return nil - } - out := new(TopologicalSortArgs) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TopologicalSortArgs) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -}