diff --git a/api/openapi-spec/swagger.json b/api/openapi-spec/swagger.json index db20fc5c5d78..2428f4a9cecc 100644 --- a/api/openapi-spec/swagger.json +++ b/api/openapi-spec/swagger.json @@ -14960,6 +14960,14 @@ "description": "MigrateOptions may be provided on migrate request.", "type": "object", "properties": { + "addedNodeSelector": { + "description": "AddedNodeSelector is an additional selector that can be used to complement a NodeSelector or NodeAffinity as set on the VM to restrict the set of allowed target nodes for a migration. In case of key collisions, values set on the VM objects are going to be preserved to ensure that addedNodeSelector can only restrict but not bypass constraints already set on the VM object.", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "apiVersion": { "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", "type": "string" @@ -16728,6 +16736,14 @@ "v1.VirtualMachineInstanceMigrationSpec": { "type": "object", "properties": { + "addedNodeSelector": { + "description": "AddedNodeSelector is an additional selector that can be used to complement a NodeSelector or NodeAffinity as set on the VM to restrict the set of allowed target nodes for a migration. In case of key collisions, values set on the VM objects are going to be preserved to ensure that addedNodeSelector can only restrict but not bypass constraints already set on the VM object.", + "type": "object", + "additionalProperties": { + "type": "string", + "default": "" + } + }, "vmiName": { "description": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", "type": "string" diff --git a/pkg/virt-api/rest/subresource.go b/pkg/virt-api/rest/subresource.go index 973b267683b6..6a4105c62cdc 100644 --- a/pkg/virt-api/rest/subresource.go +++ b/pkg/virt-api/rest/subresource.go @@ -301,7 +301,8 @@ func (app *SubresourceAPIApp) MigrateVMRequestHandler(request *restful.Request, GenerateName: "kubevirt-migrate-vm-", }, Spec: v1.VirtualMachineInstanceMigrationSpec{ - VMIName: name, + VMIName: name, + AddedNodeSelector: bodyStruct.AddedNodeSelector, }, }, k8smetav1.CreateOptions{DryRun: bodyStruct.DryRun}) if err != nil { diff --git a/pkg/virt-controller/watch/migration/migration.go b/pkg/virt-controller/watch/migration/migration.go index dc6f75cf7dfe..e7a5e72dbdb6 100644 --- a/pkg/virt-controller/watch/migration/migration.go +++ b/pkg/virt-controller/watch/migration/migration.go @@ -23,6 +23,7 @@ import ( "context" "errors" "fmt" + "maps" "sort" "strconv" "strings" @@ -740,6 +741,11 @@ func (c *Controller) createTargetPod(migration *virtv1.VirtualMachineInstanceMig templatePod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(templatePod.Spec.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, antiAffinityTerm) } + nodeSelector := make(map[string]string) + maps.Copy(nodeSelector, migration.Spec.AddedNodeSelector) + maps.Copy(nodeSelector, templatePod.Spec.NodeSelector) + templatePod.Spec.NodeSelector = nodeSelector + templatePod.ObjectMeta.Labels[virtv1.MigrationJobLabel] = string(migration.UID) templatePod.ObjectMeta.Annotations[virtv1.MigrationJobNameAnnotation] = migration.Name diff --git a/pkg/virt-controller/watch/migration/migration_test.go b/pkg/virt-controller/watch/migration/migration_test.go index 725114a2dcb1..4fbb3f1bbdb6 100644 --- a/pkg/virt-controller/watch/migration/migration_test.go +++ b/pkg/virt-controller/watch/migration/migration_test.go @@ -21,6 +21,7 @@ package migration import ( "context" + "errors" "fmt" "strings" "time" @@ -99,6 +100,19 @@ var _ = Describe("Migration watcher", func() { } } + getTargetPod := func(namespace string, uid types.UID, migrationUid types.UID) (*k8sv1.Pod, error) { + pods, err := kubeClient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ + LabelSelector: fmt.Sprintf("%s=%s,%s=%s", virtv1.MigrationJobLabel, string(migrationUid), virtv1.CreatedByLabel, string(uid)), + }) + if err != nil { + return nil, err + } + if len(pods.Items) == 1 { + return &pods.Items[0], nil + } + return nil, errors.New("Failed identifying target pod") + } + expectPodDoesNotExist := func(namespace, uid, migrationUid string) { pods, err := kubeClient.CoreV1().Pods(namespace).List(context.Background(), metav1.ListOptions{ LabelSelector: fmt.Sprintf("%s=%s,%s=%s", virtv1.MigrationJobLabel, migrationUid, virtv1.CreatedByLabel, uid), @@ -884,6 +898,86 @@ var _ = Describe("Migration watcher", func() { expectPodCreation(vmi.Namespace, vmi.UID, migration.UID, 2, 1, 1) }) + It("should create target pod merging addedNodeSelector and preserving the labels in the existing NodeSelector and NodeAffinity", func() { + vmi := newVirtualMachine("testvmi", virtv1.Running) + + vmiNodeSelector := map[string]string{ + "topology.kubernetes.io/region": "us-east-1", + "vmiLabel1": "vmiValue1", + "vmiLabel2": "vmiValue2", + } + nodeAffinityRule := &k8sv1.NodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &k8sv1.NodeSelector{ + NodeSelectorTerms: []k8sv1.NodeSelectorTerm{ + { + MatchExpressions: []k8sv1.NodeSelectorRequirement{ + { + Key: k8sv1.LabelHostname, + Operator: k8sv1.NodeSelectorOpIn, + Values: []string{"somenode"}, + }, + }, + }, + { + MatchExpressions: []k8sv1.NodeSelectorRequirement{ + { + Key: k8sv1.LabelHostname, + Operator: k8sv1.NodeSelectorOpIn, + Values: []string{"anothernode-ORed"}, + }, + }, + }, + }, + }, + } + + vmi.Spec.NodeSelector = vmiNodeSelector + vmi.Spec.Affinity = &k8sv1.Affinity{ + NodeAffinity: nodeAffinityRule, + } + + addedNodeSelector := map[string]string{ + "topology.kubernetes.io/region": "us-west-1", + "additionaLabel1": "additionalValue1", + "additionaLabel2": "additionalValue2", + } + + Expect(vmiNodeSelector).To(HaveKey("topology.kubernetes.io/region")) + Expect(addedNodeSelector).To(HaveKey("topology.kubernetes.io/region")) + + migration := newMigrationWithAddedNodeSelector("testmigration", vmi.Name, virtv1.MigrationPending, addedNodeSelector) + + addMigration(migration) + addVirtualMachineInstance(vmi) + addPod(newSourcePodForVirtualMachine(vmi)) + + controller.Execute() + + testutils.ExpectEvent(recorder, virtcontroller.SuccessfulCreatePodReason) + expectPodCreation(vmi.Namespace, vmi.UID, migration.UID, 1, 0, 2) + targetPod, err := getTargetPod(vmi.Namespace, vmi.UID, migration.UID) + Expect(err).ToNot(HaveOccurred()) + Expect(targetPod).ToNot(BeNil()) + Expect(targetPod.Spec.Affinity).ToNot(BeNil()) + Expect(targetPod.Spec.Affinity.PodAntiAffinity).ToNot(BeNil()) + Expect(targetPod.Spec.Affinity.NodeAffinity).ToNot(BeNil()) + Expect(targetPod.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution).ToNot(BeNil()) + + By("Expecting migration target pod to contain all the NodeSelector labels defined on the VM") + for k, v := range vmiNodeSelector { + Expect(targetPod.Spec.NodeSelector).To(HaveKeyWithValue(k, v)) + } + for k, v := range addedNodeSelector { + vmiVal, ok := vmiNodeSelector[k] + if ok { + Expect(targetPod.Spec.NodeSelector).To(HaveKeyWithValue(k, vmiVal)) + } else { + Expect(targetPod.Spec.NodeSelector).To(HaveKeyWithValue(k, v)) + } + } + + }) + It("should place migration in scheduling state if pod exists", func() { vmi := newVirtualMachine("testvmi", virtv1.Running) migration := newMigration("testmigration", vmi.Name, virtv1.MigrationPending) @@ -2276,6 +2370,12 @@ func newMigration(name string, vmiName string, phase virtv1.VirtualMachineInstan return migration } +func newMigrationWithAddedNodeSelector(name string, vmiName string, phase virtv1.VirtualMachineInstanceMigrationPhase, addedNodeSelector map[string]string) *virtv1.VirtualMachineInstanceMigration { + migration := newMigration(name, vmiName, phase) + migration.Spec.AddedNodeSelector = addedNodeSelector + return migration +} + func newVirtualMachine(name string, phase virtv1.VirtualMachineInstancePhase) *virtv1.VirtualMachineInstance { vmi := api.NewMinimalVMI(name) vmi.UID = types.UID(name) diff --git a/pkg/virt-operator/resource/generate/components/validations_generated.go b/pkg/virt-operator/resource/generate/components/validations_generated.go index bc9953a0cf0b..f5cfa26cf9b2 100644 --- a/pkg/virt-operator/resource/generate/components/validations_generated.go +++ b/pkg/virt-operator/resource/generate/components/validations_generated.go @@ -13908,6 +13908,17 @@ var CRDsValidation map[string]string = map[string]string{ type: object spec: properties: + addedNodeSelector: + additionalProperties: + type: string + description: |- + AddedNodeSelector is an additional selector that can be used to + complement a NodeSelector or NodeAffinity as set on the VM + to restrict the set of allowed target nodes for a migration. + In case of key collisions, values set on the VM objects + are going to be preserved to ensure that addedNodeSelector + can only restrict but not bypass constraints already set on the VM object. + type: object vmiName: description: The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace diff --git a/pkg/virtctl/vm/migrate.go b/pkg/virtctl/vm/migrate.go index b72ddec735bd..ec476aaab915 100644 --- a/pkg/virtctl/vm/migrate.go +++ b/pkg/virtctl/vm/migrate.go @@ -33,6 +33,8 @@ import ( const COMMAND_MIGRATE = "migrate" +var nodeName string + func NewMigrateCommand() *cobra.Command { c := Command{command: COMMAND_MIGRATE} cmd := &cobra.Command{ @@ -42,6 +44,8 @@ func NewMigrateCommand() *cobra.Command { Args: cobra.ExactArgs(1), RunE: c.migrateRun, } + + cmd.Flags().StringVar(&nodeName, "nodeName", nodeName, "--nodeName=: Flag to migrate this VM to a specific node (according to label \"kubernetes.io/hostname\"). If omitted (recommended!) the scheduler becomes responsible for finding the best Node to migrate the VM to.") cmd.Flags().BoolVar(&dryRun, dryRunArg, false, dryRunCommandUsage) cmd.SetUsageTemplate(templates.UsageTemplate()) return cmd @@ -57,7 +61,13 @@ func (o *Command) migrateRun(cmd *cobra.Command, args []string) error { dryRunOption := setDryRunOption(dryRun) - err = virtClient.VirtualMachine(namespace).Migrate(context.Background(), vmiName, &v1.MigrateOptions{DryRun: dryRunOption}) + options := &v1.MigrateOptions{DryRun: dryRunOption} + + if nodeName != "" { + options.AddedNodeSelector = map[string]string{"kubernetes.io/hostname": nodeName} + } + + err = virtClient.VirtualMachine(namespace).Migrate(context.Background(), vmiName, options) if err != nil { return fmt.Errorf("Error migrating VirtualMachine %v", err) } diff --git a/pkg/virtctl/vm/migrate_test.go b/pkg/virtctl/vm/migrate_test.go index 2f9a75af1da9..ab7b9714dae7 100644 --- a/pkg/virtctl/vm/migrate_test.go +++ b/pkg/virtctl/vm/migrate_test.go @@ -53,19 +53,35 @@ var _ = Describe("Migrate command", func() { Expect(err).Should(MatchError("accepts 1 arg(s), received 0")) }) - DescribeTable("should migrate a vm according to options", func(migrateOptions *v1.MigrateOptions) { + DescribeTable("should migrate a vm according to options", func(expectedMigrateOptions *v1.MigrateOptions, extraArgs ...string) { vm := kubecli.NewMinimalVM(vmName) kubecli.MockKubevirtClientInstance.EXPECT().VirtualMachine(k8smetav1.NamespaceDefault).Return(vmInterface).Times(1) - vmInterface.EXPECT().Migrate(context.Background(), vm.Name, migrateOptions).Return(nil).Times(1) + vmInterface.EXPECT().Migrate(context.Background(), vm.Name, expectedMigrateOptions).Return(nil).Times(1) args := []string{"migrate", vmName} - if len(migrateOptions.DryRun) > 0 { - args = append(args, "--dry-run") - } + args = append(args, extraArgs...) Expect(testing.NewRepeatableVirtctlCommand(args...)()).To(Succeed()) }, - Entry("with default", &v1.MigrateOptions{}), - Entry("with dry-run option", &v1.MigrateOptions{DryRun: []string{k8smetav1.DryRunAll}}), + Entry( + "with default", + &v1.MigrateOptions{}), + Entry( + "with dry-run option", + &v1.MigrateOptions{ + DryRun: []string{k8smetav1.DryRunAll}}, + "--dry-run"), + Entry( + "with nodeName option", + &v1.MigrateOptions{ + AddedNodeSelector: map[string]string{"kubernetes.io/hostname": "test.example.com"}}, + "--nodeName", "test.example.com"), + Entry( + "with dry-run and nodeName options", + &v1.MigrateOptions{ + AddedNodeSelector: map[string]string{"kubernetes.io/hostname": "test.example.com"}, + DryRun: []string{k8smetav1.DryRunAll}}, + "--dry-run", "--nodeName", "test.example.com"), ) + }) diff --git a/staging/src/kubevirt.io/api/core/v1/deepcopy_generated.go b/staging/src/kubevirt.io/api/core/v1/deepcopy_generated.go index 1e45d6b30a7b..1f7bfd8ff074 100644 --- a/staging/src/kubevirt.io/api/core/v1/deepcopy_generated.go +++ b/staging/src/kubevirt.io/api/core/v1/deepcopy_generated.go @@ -3114,6 +3114,13 @@ func (in *MigrateOptions) DeepCopyInto(out *MigrateOptions) { *out = make([]string, len(*in)) copy(*out, *in) } + if in.AddedNodeSelector != nil { + in, out := &in.AddedNodeSelector, &out.AddedNodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } @@ -5064,7 +5071,7 @@ func (in *VirtualMachineInstanceMigration) DeepCopyInto(out *VirtualMachineInsta *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec + in.Spec.DeepCopyInto(&out.Spec) in.Status.DeepCopyInto(&out.Status) return } @@ -5158,6 +5165,13 @@ func (in *VirtualMachineInstanceMigrationPhaseTransitionTimestamp) DeepCopy() *V // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *VirtualMachineInstanceMigrationSpec) DeepCopyInto(out *VirtualMachineInstanceMigrationSpec) { *out = *in + if in.AddedNodeSelector != nil { + in, out := &in.AddedNodeSelector, &out.AddedNodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } return } diff --git a/staging/src/kubevirt.io/api/core/v1/types.go b/staging/src/kubevirt.io/api/core/v1/types.go index 3e17c95fe85c..9c3dc7cca8d1 100644 --- a/staging/src/kubevirt.io/api/core/v1/types.go +++ b/staging/src/kubevirt.io/api/core/v1/types.go @@ -1381,6 +1381,15 @@ type VirtualMachineInstanceMigrationList struct { type VirtualMachineInstanceMigrationSpec struct { // The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace VMIName string `json:"vmiName,omitempty" valid:"required"` + + // AddedNodeSelector is an additional selector that can be used to + // complement a NodeSelector or NodeAffinity as set on the VM + // to restrict the set of allowed target nodes for a migration. + // In case of key collisions, values set on the VM objects + // are going to be preserved to ensure that addedNodeSelector + // can only restrict but not bypass constraints already set on the VM object. + // +optional + AddedNodeSelector map[string]string `json:"addedNodeSelector,omitempty"` } // VirtualMachineInstanceMigrationPhaseTransitionTimestamp gives a timestamp in relation to when a phase is set on a vmi @@ -2273,6 +2282,15 @@ type MigrateOptions struct { // +optional // +listType=atomic DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + + // AddedNodeSelector is an additional selector that can be used to + // complement a NodeSelector or NodeAffinity as set on the VM + // to restrict the set of allowed target nodes for a migration. + // In case of key collisions, values set on the VM objects + // are going to be preserved to ensure that addedNodeSelector + // can only restrict but not bypass constraints already set on the VM object. + // +optional + AddedNodeSelector map[string]string `json:"addedNodeSelector,omitempty"` } // VirtualMachineInstanceGuestAgentInfo represents information from the installed guest agent diff --git a/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go b/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go index 5b48775a7353..48b86805a159 100644 --- a/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go +++ b/staging/src/kubevirt.io/api/core/v1/types_swagger_generated.go @@ -316,7 +316,8 @@ func (VirtualMachineInstanceMigrationList) SwaggerDoc() map[string]string { func (VirtualMachineInstanceMigrationSpec) SwaggerDoc() map[string]string { return map[string]string{ - "vmiName": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", + "vmiName": "The name of the VMI to perform the migration on. VMI must exist in the migration objects namespace", + "addedNodeSelector": "AddedNodeSelector is an additional selector that can be used to\ncomplement a NodeSelector or NodeAffinity as set on the VM\nto restrict the set of allowed target nodes for a migration.\nIn case of key collisions, values set on the VM objects\nare going to be preserved to ensure that addedNodeSelector\ncan only restrict but not bypass constraints already set on the VM object.\n+optional", } } @@ -624,8 +625,9 @@ func (StopOptions) SwaggerDoc() map[string]string { func (MigrateOptions) SwaggerDoc() map[string]string { return map[string]string{ - "": "MigrateOptions may be provided on migrate request.", - "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + "": "MigrateOptions may be provided on migrate request.", + "dryRun": "When present, indicates that modifications should not be\npersisted. An invalid or unrecognized dryRun directive will\nresult in an error response and no further processing of the\nrequest. Valid values are:\n- All: all dry run stages will be processed\n+optional\n+listType=atomic", + "addedNodeSelector": "AddedNodeSelector is an additional selector that can be used to\ncomplement a NodeSelector or NodeAffinity as set on the VM\nto restrict the set of allowed target nodes for a migration.\nIn case of key collisions, values set on the VM objects\nare going to be preserved to ensure that addedNodeSelector\ncan only restrict but not bypass constraints already set on the VM object.\n+optional", } } diff --git a/staging/src/kubevirt.io/client-go/api/openapi_generated.go b/staging/src/kubevirt.io/client-go/api/openapi_generated.go index a5b51b9936c4..2d5ca506e115 100644 --- a/staging/src/kubevirt.io/client-go/api/openapi_generated.go +++ b/staging/src/kubevirt.io/client-go/api/openapi_generated.go @@ -22254,6 +22254,22 @@ func schema_kubevirtio_api_core_v1_MigrateOptions(ref common.ReferenceCallback) }, }, }, + "addedNodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "AddedNodeSelector is an additional selector that can be used to complement a NodeSelector or NodeAffinity as set on the VM to restrict the set of allowed target nodes for a migration. In case of key collisions, values set on the VM objects are going to be preserved to ensure that addedNodeSelector can only restrict but not bypass constraints already set on the VM object.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, @@ -25567,6 +25583,22 @@ func schema_kubevirtio_api_core_v1_VirtualMachineInstanceMigrationSpec(ref commo Format: "", }, }, + "addedNodeSelector": { + SchemaProps: spec.SchemaProps{ + Description: "AddedNodeSelector is an additional selector that can be used to complement a NodeSelector or NodeAffinity as set on the VM to restrict the set of allowed target nodes for a migration. In case of key collisions, values set on the VM objects are going to be preserved to ensure that addedNodeSelector can only restrict but not bypass constraints already set on the VM object.", + Type: []string{"object"}, + AdditionalProperties: &spec.SchemaOrBool{ + Allows: true, + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Default: "", + Type: []string{"string"}, + Format: "", + }, + }, + }, + }, + }, }, }, }, diff --git a/tests/decorators/decorators.go b/tests/decorators/decorators.go index 274820478595..77a39ac8331e 100644 --- a/tests/decorators/decorators.go +++ b/tests/decorators/decorators.go @@ -56,6 +56,7 @@ var ( MigrationBasedHotplugNICs = Label("migration-based-hotplug-NICs") NetCustomBindingPlugins = Label("netCustomBindingPlugins") RequiresTwoSchedulableNodes = Label("requires-two-schedulable-nodes") + RequiresThreeSchedulableNodes = Label("requires-three-schedulable-nodes") VMLiveUpdateRolloutStrategy = Label("VMLiveUpdateRolloutStrategy") USB = Label("USB") AutoResourceLimitsGate = Label("AutoResourceLimitsGate") diff --git a/tests/migration/BUILD.bazel b/tests/migration/BUILD.bazel index c3735e3c5106..80f284807257 100644 --- a/tests/migration/BUILD.bazel +++ b/tests/migration/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "eviction_strategy.go", "framework.go", + "nodeselector.go", "migration.go", "migration_policy.go", "paused.go", diff --git a/tests/migration/nodeselector.go b/tests/migration/nodeselector.go new file mode 100644 index 000000000000..b1a8990bbe4c --- /dev/null +++ b/tests/migration/nodeselector.go @@ -0,0 +1,241 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2023 Red Hat, Inc. + * + */ + +package migration + +import ( + "context" + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + k8sv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + "kubevirt.io/client-go/kubecli" + "kubevirt.io/kubevirt/pkg/libvmi" + "kubevirt.io/kubevirt/tests/decorators" + "kubevirt.io/kubevirt/tests/framework/cleanup" + "kubevirt.io/kubevirt/tests/framework/kubevirt" + "kubevirt.io/kubevirt/tests/libmigration" + "kubevirt.io/kubevirt/tests/libnet" + "kubevirt.io/kubevirt/tests/libnode" + "kubevirt.io/kubevirt/tests/libpod" + "kubevirt.io/kubevirt/tests/libvmifact" + "kubevirt.io/kubevirt/tests/libwait" + "kubevirt.io/kubevirt/tests/testsuite" +) + +var _ = SIGMigrationDescribe("Live Migration", decorators.RequiresThreeSchedulableNodes, func() { + var virtClient kubecli.KubevirtClient + + BeforeEach(func() { + virtClient = kubevirt.Client() + }) + + FContext("with a live-migrate eviction strategy set", Serial, func() { + var nodes *k8sv1.NodeList + + BeforeEach(func() { + Eventually(func() int { + nodes = libnode.GetAllSchedulableNodes(virtClient) + return len(nodes.Items) + }, 60*time.Second, 1*time.Second).Should(BeNumerically(">=", 3), "There should be at lest three compute nodes") + }) + + It("Should successfully migrate a VM to a labelled node", func() { + By("starting a VM on the source node") + vmi := libvmifact.NewFedora( + libnet.WithMasqueradeNetworking(), + libvmi.WithResourceMemory(fedoraVMSize), + ) + vmi, err := virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + libwait.WaitForSuccessfulVMIStart(vmi, libwait.WithTimeout(180)) + + sourceNodeName := vmi.Status.NodeName + var targetNodeName string + + By("labeling a target node") + for _, node := range nodes.Items { + if node.Name != sourceNodeName { + targetNodeName = node.Name + libnode.AddLabelToNode(node.Name, cleanup.TestLabelForNamespace(vmi.Namespace), "target") + break + } + Expect(targetNodeName).ToNot(BeEmpty(), "There should be a labeled target node") + } + + By("Checking nodeSelector on the VMI") + Expect(vmi.Spec.NodeSelector).ToNot(HaveKeyWithValue(cleanup.TestLabelForNamespace(vmi.Namespace), "target")) + + By("Checking nodeSelector on virt-launcher pod") + virtLauncherPod, err := libpod.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) + Expect(err).NotTo(HaveOccurred()) + Expect(virtLauncherPod.Spec.NodeSelector).ToNot(HaveKeyWithValue(cleanup.TestLabelForNamespace(vmi.Namespace), "target")) + + By("Starting the migration to the labeled node") + migration := libmigration.New(vmi.Name, vmi.Namespace) + migration.Spec.AddedNodeSelector = map[string]string{cleanup.TestLabelForNamespace(vmi.Namespace): "target"} + libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) + + By("Checking that the VMI landed on the target node") + Expect(vmi.Status.NodeName).To(Equal(targetNodeName)) + + By("Checking nodeSelector on the VMI") + Expect(vmi.Spec.NodeSelector).ToNot(HaveKeyWithValue(cleanup.TestLabelForNamespace(vmi.Namespace), "target")) + + By("Checking nodeSelector on virt-launcher pod") + virtLauncherPod, err = libpod.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) + Expect(err).NotTo(HaveOccurred()) + Expect(virtLauncherPod.Spec.NodeSelector).To(HaveKeyWithValue(cleanup.TestLabelForNamespace(vmi.Namespace), "target")) + + By("Migrating again the VM without configuring a nodeselector") + migration = libmigration.New(vmi.Name, vmi.Namespace) + libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) + + By("Checking nodeSelector on the VMI") + Expect(vmi.Spec.NodeSelector).ToNot(HaveKeyWithValue(cleanup.TestLabelForNamespace(vmi.Namespace), "target")) + + By("Checking nodeSelector on virt-launcher pod") + virtLauncherPod, err = libpod.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) + Expect(err).NotTo(HaveOccurred()) + Expect(virtLauncherPod.Spec.NodeSelector).ToNot(HaveKeyWithValue(cleanup.TestLabelForNamespace(vmi.Namespace), "target")) + + }) + + It("Should fail the migration when the nodeSelector could not be satisfied", func() { + By("starting a VM on the source node") + vmi := libvmifact.NewFedora( + libnet.WithMasqueradeNetworking(), + libvmi.WithResourceMemory(fedoraVMSize), + ) + vmi, err := virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + libwait.WaitForSuccessfulVMIStart(vmi, libwait.WithTimeout(180)) + + sourceNodeName := vmi.Status.NodeName + var targetNodeName string + + By("labeling a target node") + for _, node := range nodes.Items { + if node.Name != sourceNodeName { + targetNodeName = node.Name + libnode.AddLabelToNode(node.Name, cleanup.TestLabelForNamespace(vmi.Namespace), "validTarget") + break + } + Expect(targetNodeName).ToNot(BeEmpty(), "There should be a labeled target node") + } + + By("Starting the migration with an unsatisfiable nodeSelector") + migration := libmigration.New(vmi.Name, vmi.Namespace) + migration.Spec.AddedNodeSelector = map[string]string{cleanup.TestLabelForNamespace(vmi.Namespace): "brokenTarget"} + libmigration.RunMigrationAndExpectFailure(migration, libmigration.MigrationWaitTime) + + By("Checking that the VMI is still on the source node") + Expect(vmi.Status.NodeName).To(Equal(sourceNodeName)) + + By("Migrating again the VM without configuring a nodeSelector") + migration = libmigration.New(vmi.Name, vmi.Namespace) + libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) + + By("Checking that the VMI is now on a different node") + Expect(vmi.Status.NodeName).ToNot(Equal(sourceNodeName)) + + }) + + Context("with a node selector on the VMI", func() { + zoneLabelKey := fmt.Sprintf("%s/%s", cleanup.KubeVirtTestLabelPrefix, "zone") + vmiLabelValue := "vmi" + migrationLabelKey := fmt.Sprintf("%s/%s", cleanup.KubeVirtTestLabelPrefix, "migration") + migrationLabelValue := fmt.Sprintf("%s/%s", cleanup.KubeVirtTestLabelPrefix, "migration") + + BeforeEach(func() { + libnode.AddLabelToNode(nodes.Items[0].Name, zoneLabelKey, vmiLabelValue) + libnode.AddLabelToNode(nodes.Items[1].Name, zoneLabelKey, vmiLabelValue) + }) + + AfterEach(func() { + for _, node := range nodes.Items { + libnode.RemoveLabelFromNode(node.Name, zoneLabelKey) + libnode.RemoveLabelFromNode(node.Name, migrationLabelKey) + } + }) + + It("should only restrict VMI node selector", func() { + By("starting a VM with a nodeSelector") + vmi := libvmifact.NewFedora( + libnet.WithMasqueradeNetworking(), + libvmi.WithResourceMemory(fedoraVMSize), + ) + vmi.Spec.NodeSelector = map[string]string{zoneLabelKey: vmiLabelValue} + vmi, err := virtClient.VirtualMachineInstance(testsuite.GetTestNamespace(vmi)).Create(context.Background(), vmi, metav1.CreateOptions{}) + Expect(err).ToNot(HaveOccurred()) + libwait.WaitForSuccessfulVMIStart(vmi, libwait.WithTimeout(180)) + + By("labelling all the nodes but the source one") + sourceNodeName := vmi.Status.NodeName + for _, node := range nodes.Items { + if node.Name != sourceNodeName { + libnode.AddLabelToNode(node.Name, migrationLabelKey, migrationLabelValue) + } + } + + By("Starting the migration restricting the VMI nodeSelector") + migration := libmigration.New(vmi.Name, vmi.Namespace) + migration.Spec.AddedNodeSelector = map[string]string{ + zoneLabelKey: migrationLabelValue, + } + By("by trying to override a selector set on the VMI") + migration.Spec.AddedNodeSelector[migrationLabelKey] = migrationLabelValue + libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) + + By("Checking that the VMI is on a different node") + Expect(vmi.Status.NodeName).ToNot(Equal(sourceNodeName)) + + By("Checking nodeSelector on the VMI") + Expect(vmi.Spec.NodeSelector).To(HaveKeyWithValue(zoneLabelKey, vmiLabelValue)) + Expect(vmi.Spec.NodeSelector).ToNot(HaveKey(migrationLabelKey)) + + By("Checking nodeSelector on virt-launcher pod") + virtLauncherPod, err := libpod.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) + Expect(err).NotTo(HaveOccurred()) + By("Checking that the selectors from the VMI are correctly there") + Expect(virtLauncherPod.Spec.NodeSelector).To(HaveKeyWithValue(zoneLabelKey, vmiLabelValue)) + Expect(virtLauncherPod.Spec.NodeSelector).ToNot(HaveKeyWithValue(zoneLabelKey, migrationLabelValue)) + + By("Checking that the additional selectors from the migration are there") + Expect(virtLauncherPod.Spec.NodeSelector).To(HaveKeyWithValue(migrationLabelKey, migrationLabelValue)) + + By("Migrating again the VM without configuring a nodeSelector") + migration = libmigration.New(vmi.Name, vmi.Namespace) + libmigration.RunMigrationAndExpectToCompleteWithDefaultTimeout(virtClient, migration) + + By("Checking nodeSelector on virt-launcher pod") + virtLauncherPod, err = libpod.GetPodByVirtualMachineInstance(vmi, vmi.Namespace) + Expect(err).NotTo(HaveOccurred()) + Expect(virtLauncherPod.Spec.NodeSelector).To(HaveKeyWithValue(zoneLabelKey, vmiLabelValue)) + Expect(virtLauncherPod.Spec.NodeSelector).ToNot(HaveKey(migrationLabelKey)) + + }) + }) + }) +})