From 65263c2c6e19452b346ee7ab3710cc04dacf4e06 Mon Sep 17 00:00:00 2001 From: Ram Lavi Date: Tue, 24 Sep 2024 13:43:12 +0300 Subject: [PATCH 1/4] cluster-up: Add nse flag to OVNK kind script This will enable the network-segmentation feature on OVNK, needed in order to run VM workloads with primary UDN. Signed-off-by: Ram Lavi --- hack/cluster.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hack/cluster.sh b/hack/cluster.sh index 3a7fa27a..d7b7a698 100755 --- a/hack/cluster.sh +++ b/hack/cluster.sh @@ -3,7 +3,7 @@ set -xe SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -KIND_ARGS="${KIND_ARGS:--ic -ikv -i6 -mne}" +KIND_ARGS="${KIND_ARGS:--ic -ikv -i6 -mne -nse}" OUTPUT_DIR=${OUTPUT_DIR:-${SCRIPT_DIR}/../.output} From f10baf758871885352a71187879238ed94cc247a Mon Sep 17 00:00:00 2001 From: Ram Lavi Date: Tue, 24 Sep 2024 13:15:53 +0300 Subject: [PATCH 2/4] e2e/persistentips: Add role to GenerateLayer2WithSubnetNAD Currently GenerateLayer2WithSubnetNAD is creating NADs for secondary roles (which is the default). Add role input param to the function in order to support primary role that will be used in future commits. Signed-off-by: Ram Lavi --- test/e2e/persistentips_test.go | 2 +- test/env/generate.go | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/test/e2e/persistentips_test.go b/test/e2e/persistentips_test.go index 081f31c0..6a79c88a 100644 --- a/test/e2e/persistentips_test.go +++ b/test/e2e/persistentips_test.go @@ -70,7 +70,7 @@ var _ = Describe("Persistent IPs", func() { td.TearDown() }) - nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace) + nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace, "secondary") vmi = testenv.GenerateAlpineWithMultusVMI(td.Namespace, networkInterfaceName, nad.Name) vm = testenv.NewVirtualMachine(vmi, testenv.WithRunning()) diff --git a/test/env/generate.go b/test/env/generate.go index 66813054..596e72e7 100644 --- a/test/env/generate.go +++ b/test/env/generate.go @@ -13,7 +13,7 @@ import ( kubevirtv1 "kubevirt.io/api/core/v1" ) -func GenerateLayer2WithSubnetNAD(namespace string) *nadv1.NetworkAttachmentDefinition { +func GenerateLayer2WithSubnetNAD(namespace, role string) *nadv1.NetworkAttachmentDefinition { networkName := "l2" nadName := RandomName(networkName, 16) return &nadv1.NetworkAttachmentDefinition{ @@ -30,9 +30,10 @@ func GenerateLayer2WithSubnetNAD(namespace string) *nadv1.NetworkAttachmentDefin "topology": "layer2", "subnets": "10.100.200.0/24", "netAttachDefName": "%[1]s/%[2]s", + "role": "%[4]s", "allowPersistentIPs": true } -`, namespace, nadName, networkName), +`, namespace, nadName, networkName, role), }, } } From dc5e8beefb5e3487dbf3b791219ccd541384145c Mon Sep 17 00:00:00 2001 From: Ram Lavi Date: Tue, 24 Sep 2024 13:48:17 +0300 Subject: [PATCH 3/4] e2e/persistentips: Rename file to focus on secondary interfaces This is done to differentiate this test from the primary udn tests that will be added in future commits. Signed-off-by: Ram Lavi --- .../{persistentips_test.go => persistentips-secondary_test.go} | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename test/e2e/{persistentips_test.go => persistentips-secondary_test.go} (99%) diff --git a/test/e2e/persistentips_test.go b/test/e2e/persistentips-secondary_test.go similarity index 99% rename from test/e2e/persistentips_test.go rename to test/e2e/persistentips-secondary_test.go index 6a79c88a..4cdff572 100644 --- a/test/e2e/persistentips_test.go +++ b/test/e2e/persistentips-secondary_test.go @@ -40,7 +40,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -var _ = Describe("Persistent IPs", func() { +var _ = Describe("Persistent IPs on Secondary interfaces", func() { var failureCount int = 0 JustAfterEach(func() { if CurrentSpecReport().Failed() { From 6ce1cb241de8284f6eb56f170dfc0924dbc0d6ea Mon Sep 17 00:00:00 2001 From: Ram Lavi Date: Wed, 25 Sep 2024 15:14:12 +0300 Subject: [PATCH 4/4] e2e, persistentip: Add tests for primary UDN These tests run practically the same tests as in secondary interfaces, with a few semantic changes: - networkInterfaceName is 'ovn-udn1' - VMI is created with primary UDN - NAD is created with role: Primary - interface IPS are extracted using network-status annotation, and not vmi.status as that is not yet supported for primaryUDN. Signed-off-by: Ram Lavi --- test/e2e/persistentips-primary_test.go | 396 +++++++++++++++++++++++++ test/env/generate.go | 74 +++++ 2 files changed, 470 insertions(+) create mode 100644 test/e2e/persistentips-primary_test.go diff --git a/test/e2e/persistentips-primary_test.go b/test/e2e/persistentips-primary_test.go new file mode 100644 index 00000000..06e23c5c --- /dev/null +++ b/test/e2e/persistentips-primary_test.go @@ -0,0 +1,396 @@ +/* + * This file is part of the KubeVirt project + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * Copyright 2024 Red Hat, Inc. + * + */ + +package e2e + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" + + nadv1 "github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1" + + kubevirtv1 "kubevirt.io/api/core/v1" + + testenv "github.com/kubevirt/ipam-extensions/test/env" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +var _ = Describe("Persistent IPs on Primary UDN interface", func() { + var failureCount int = 0 + JustAfterEach(func() { + if CurrentSpecReport().Failed() { + failureCount++ + By(fmt.Sprintf("Test failed, collecting logs and artifacts, failure count %d", failureCount)) + + logCommand([]string{"get", "pods", "-A"}, "pods", failureCount) + logCommand([]string{"get", "vm", "-A", "-oyaml"}, "vms", failureCount) + logCommand([]string{"get", "vmi", "-A", "-oyaml"}, "vmis", failureCount) + logCommand([]string{"get", "ipamclaims", "-A", "-oyaml"}, "ipamclaims", failureCount) + logOvnPods(failureCount) + } + }) + + When("network attachment definition created with allowPersistentIPs=true", func() { + var ( + td testenv.TestData + networkInterfaceName = "ovn-udn1" + vm *kubevirtv1.VirtualMachine + vmi *kubevirtv1.VirtualMachineInstance + nad *nadv1.NetworkAttachmentDefinition + ) + BeforeEach(func() { + td = testenv.GenerateTestData() + td.SetUp() + DeferCleanup(func() { + td.TearDown() + }) + + nad = testenv.GenerateLayer2WithSubnetNAD(td.Namespace, "primary") + vmi = testenv.GenerateAlpineWithPrimaryUDNVMI(td.Namespace) + vm = testenv.NewVirtualMachine(vmi, testenv.WithRunning()) + + By("Create NetworkAttachmentDefinition") + Expect(testenv.Client.Create(context.Background(), nad)).To(Succeed()) + }) + Context("and a virtual machine using it is also created", func() { + var originalVMIDefaultNetworkStatus *nadv1.NetworkStatus + var err error + BeforeEach(func() { + By("Creating VM with primary UDN") + Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name)) + Eventually(testenv.ThisVMReadiness(vm)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(BeTrue()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + originalVMIDefaultNetworkStatus, err = getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(originalVMIDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName)) + Expect(originalVMIDefaultNetworkStatus.IPs).ToNot(BeEmpty()) + }) + + It("should keep ips after live migration", func() { + vmiIPsBeforeMigration := originalVMIDefaultNetworkStatus.IPs + + testenv.LiveMigrateVirtualMachine(td.Namespace, vm.Name) + testenv.CheckLiveMigrationSucceeded(td.Namespace, vm.Name) + + By("Wait for VMI to be ready after live migration") + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(testenv.ContainConditionVMIReady()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + targetVMIDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(targetVMIDefaultNetworkStatus.Interface).To(Equal(originalVMIDefaultNetworkStatus.Interface)) + Expect(targetVMIDefaultNetworkStatus.IPs).To(ConsistOf(vmiIPsBeforeMigration)) + }) + + It("should garbage collect IPAMClaims after VM deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM foreground deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + When("the VM is stopped", func() { + BeforeEach(func() { + By("Invoking virtctl stop") + output, err := exec.Command("virtctl", "stop", "-n", td.Namespace, vmi.Name).CombinedOutput() + Expect(err).NotTo(HaveOccurred(), output) + + By("Ensuring VM is not running") + Eventually(testenv.ThisVMI(vmi), 360*time.Second, 1*time.Second).Should( + SatisfyAll( + Not(testenv.BeCreated()), + Not(testenv.BeReady()), + )) + + Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM is deleted", func() { + By("Delete VM and check ipam claims are gone") + Expect(testenv.Client.Delete(context.Background(), vm)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM is foreground deleted", func() { + By("Foreground delete VM and check ipam claims are gone") + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + + It("should keep ips after restart", func() { + defaultNetworkStatusBeforeRestart, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + vmiIPsBeforeRestart := defaultNetworkStatusBeforeRestart.IPs + vmiUUIDBeforeRestart := vmi.UID + + By("Re-starting the VM") + output, err := exec.Command("virtctl", "restart", "-n", td.Namespace, vmi.Name).CombinedOutput() + Expect(err).NotTo(HaveOccurred(), output) + + By("Wait for a new VMI to be re-started") + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(90 * time.Second). + Should(testenv.BeRestarted(vmiUUIDBeforeRestart)) + + By("Wait for VMI to be ready after restart") + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(testenv.ContainConditionVMIReady()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + defaultNetworkStatusAfterRestart, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(defaultNetworkStatusAfterRestart.Interface).To(Equal(defaultNetworkStatusBeforeRestart.Interface)) + Expect(defaultNetworkStatusAfterRestart.IPs).To(ConsistOf(vmiIPsBeforeRestart)) + }) + }) + + When("requested for a VM whose VMI has extra finalizers", func() { + const testFinalizer = "testFinalizer" + + BeforeEach(func() { + By("Adding VMI custom finalizer to control VMI deletion") + vm.Spec.Template.ObjectMeta.Finalizers = []string{testFinalizer} + + By("Creating VM with secondary attachments") + Expect(testenv.Client.Create(context.Background(), vm)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine %s", vm.Name)) + Eventually(testenv.ThisVMReadiness(vm)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(BeTrue()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + vmiDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(vmiDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName)) + Expect(vmiDefaultNetworkStatus.IPs).ToNot(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VM foreground deletion, only after VMI is gone", func() { + By("Foreground delete the VM, and validate the IPAMClaim isnt deleted since VMI exists") + Expect(testenv.Client.Delete(context.Background(), vm, foregroundDeleteOptions())).To(Succeed()) + Consistently(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + By("Remove the finalizer (all the other are already deleted in this stage)") + patchData, err := removeFinalizersPatch() + Expect(err).NotTo(HaveOccurred()) + Expect(testenv.Client.Patch(context.TODO(), vmi, client.RawPatch(types.MergePatchType, patchData))).To(Succeed()) + + By("Check IPAMClaims are now deleted") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + + Context("and a virtual machine instance using it is also created", func() { + var originalVMIDefaultNetworkStatus *nadv1.NetworkStatus + var err error + BeforeEach(func() { + By("Creating VMI using the nad") + Expect(testenv.Client.Create(context.Background(), vmi)).To(Succeed()) + + By(fmt.Sprintf("Waiting for readiness at virtual machine instance %s", vmi.Name)) + Eventually(testenv.ThisVMI(vmi)). + WithPolling(time.Second). + WithTimeout(5 * time.Minute). + Should(testenv.ContainConditionVMIReady()) + + By("Wait for IPAMClaim to get created") + Eventually(testenv.IPAMClaimsFromNamespace(vm.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + ShouldNot(BeEmpty()) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + originalVMIDefaultNetworkStatus, err = getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(originalVMIDefaultNetworkStatus.Interface).To(Equal(networkInterfaceName)) + Expect(originalVMIDefaultNetworkStatus.IPs).ToNot(BeEmpty()) + }) + + It("should keep ips after live migration", func() { + vmiIPsBeforeMigration := originalVMIDefaultNetworkStatus.IPs + + testenv.LiveMigrateVirtualMachine(td.Namespace, vmi.Name) + testenv.CheckLiveMigrationSucceeded(td.Namespace, vmi.Name) + + Expect(testenv.Client.Get(context.Background(), client.ObjectKeyFromObject(vmi), vmi)).To(Succeed()) + + targetVMIDefaultNetworkStatus, err := getDefaultNetworkStatus(vmi) + Expect(err).ToNot(HaveOccurred()) + Expect(targetVMIDefaultNetworkStatus.Interface).To(Equal(originalVMIDefaultNetworkStatus.Interface)) + Expect(targetVMIDefaultNetworkStatus.IPs).To(ConsistOf(vmiIPsBeforeMigration)) + }) + + It("should garbage collect IPAMClaims after VMI deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vmi)).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + + It("should garbage collect IPAMClaims after VMI foreground deletion", func() { + Expect(testenv.Client.Delete(context.Background(), vmi, foregroundDeleteOptions())).To(Succeed()) + Eventually(testenv.IPAMClaimsFromNamespace(vmi.Namespace)). + WithTimeout(time.Minute). + WithPolling(time.Second). + Should(BeEmpty()) + }) + }) + + }) +}) + +func getPodByVirtualMachineInstance(vmi *kubevirtv1.VirtualMachineInstance) (*corev1.Pod, error) { + pod, err := lookupPodBySelector(vmi.Namespace, vmiLabelSelector(vmi), vmiFieldSelector(vmi)) + if err != nil { + return nil, fmt.Errorf("failed to find pod for VMI %s (%s)", vmi.Name, string(vmi.GetUID())) + } + return pod, nil +} + +func lookupPodBySelector(namespace string, labelSelector, fieldSelector map[string]string) (*corev1.Pod, error) { + pods := &corev1.PodList{} + err := testenv.Client.List(context.Background(), pods, + client.InNamespace(namespace), + client.MatchingLabels(labelSelector), + client.MatchingFields(fieldSelector)) + if err != nil { + return nil, err + } + + if len(pods.Items) == 0 { + return nil, fmt.Errorf("failed to lookup pod") + } + + return &pods.Items[0], nil +} + +func vmiLabelSelector(vmi *kubevirtv1.VirtualMachineInstance) map[string]string { + return map[string]string{kubevirtv1.CreatedByLabel: string(vmi.GetUID())} +} + +func vmiFieldSelector(vmi *kubevirtv1.VirtualMachineInstance) map[string]string { + fieldSelectors := map[string]string{} + if vmi.Status.Phase == kubevirtv1.Running { + const podPhase = "status.phase" + fieldSelectors[podPhase] = string(corev1.PodRunning) + } + if node := vmi.Status.NodeName; node != "" { + const nodeName = "spec.nodeName" + fieldSelectors[nodeName] = node + } + return fieldSelectors +} + +func parsePodNetworkStatusAnnotation(podNetStatus string) ([]nadv1.NetworkStatus, error) { + if len(podNetStatus) == 0 { + return nil, fmt.Errorf("network status annotation not found") + } + + var netStatus []nadv1.NetworkStatus + if err := json.Unmarshal([]byte(podNetStatus), &netStatus); err != nil { + return nil, err + } + + return netStatus, nil +} + +func getDefaultNetworkStatus(vmi *kubevirtv1.VirtualMachineInstance) (*nadv1.NetworkStatus, error) { + virtLauncherPod, err := getPodByVirtualMachineInstance(vmi) + if err != nil { + return nil, err + } + + netStatuses, err := parsePodNetworkStatusAnnotation(virtLauncherPod.Annotations[nadv1.NetworkStatusAnnot]) + if err != nil { + return nil, err + } + + for _, netStatus := range netStatuses { + if netStatus.Default { + return &netStatus, nil + } + } + return nil, fmt.Errorf("primary IPs not found") +} diff --git a/test/env/generate.go b/test/env/generate.go index 596e72e7..9373472f 100644 --- a/test/env/generate.go +++ b/test/env/generate.go @@ -97,6 +97,80 @@ func GenerateAlpineWithMultusVMI(namespace, interfaceName, networkName string) * } } +func GenerateAlpineWithPrimaryUDNVMI(namespace string) *kubevirtv1.VirtualMachineInstance { + const interfaceName = "passtnet" + return &kubevirtv1.VirtualMachineInstance{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: namespace, + Name: RandomName("alpine", 16), + }, + Spec: kubevirtv1.VirtualMachineInstanceSpec{ + Domain: kubevirtv1.DomainSpec{ + Resources: kubevirtv1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: resource.MustParse("2048Mi"), + }, + }, + Devices: kubevirtv1.Devices{ + Disks: []kubevirtv1.Disk{ + { + DiskDevice: kubevirtv1.DiskDevice{ + Disk: &kubevirtv1.DiskTarget{ + Bus: kubevirtv1.DiskBusVirtio, + }, + }, + Name: "containerdisk", + }, + }, + Interfaces: []kubevirtv1.Interface{ + { + Name: interfaceName, + Binding: &kubevirtv1.PluginBinding{ + Name: "passt", + }, + }, + }, + }, + }, + Networks: []kubevirtv1.Network{ + { + Name: interfaceName, + NetworkSource: kubevirtv1.NetworkSource{ + Pod: &kubevirtv1.PodNetwork{}, + }, + }, + }, + TerminationGracePeriodSeconds: pointer.Int64(5), + Volumes: []kubevirtv1.Volume{ + { + Name: "containerdisk", + VolumeSource: kubevirtv1.VolumeSource{ + ContainerDisk: &kubevirtv1.ContainerDiskSource{ + Image: "quay.io/kubevirtci/alpine-container-disk-demo:devel_alt", + }, + }, + }, + { + Name: "cloudinitdisk", + VolumeSource: kubevirtv1.VolumeSource{ + CloudInitNoCloud: &kubevirtv1.CloudInitNoCloudSource{ + NetworkData: cloudInitNetworkData(), + }, + }, + }, + }, + }, + } +} + +func cloudInitNetworkData() string { + return ` +version: 2 +ethernets: + eth0: + dhcp4: true` +} + type VMOption func(vm *kubevirtv1.VirtualMachine) func NewVirtualMachine(vmi *kubevirtv1.VirtualMachineInstance, opts ...VMOption) *kubevirtv1.VirtualMachine {