diff --git a/apiclient/harvester_api/managers/templates.py b/apiclient/harvester_api/managers/templates.py index d0ddc7374..a425e71ed 100644 --- a/apiclient/harvester_api/managers/templates.py +++ b/apiclient/harvester_api/managers/templates.py @@ -1,5 +1,4 @@ -import json - +from harvester_api.models.templates import TemplateSpec from .base import DEFAULT_NAMESPACE, BaseManager @@ -10,6 +9,8 @@ class TemplateManager(BaseManager): _KIND = "VirtualMachineTemplate" _VER_KIND = "VirtualMachineTemplateVersion" + Spec = TemplateSpec + def create_data(self, name, namespace, description): data = { "apiVersion": "{API_VERSION}", @@ -24,101 +25,6 @@ def create_data(self, name, namespace, description): } return self._inject_data(data) - def create_version_data(self, name, namespace, cpu, mem, disk_name): - data = { - "apiVersion": "{API_VERSION}", - "kind": self._VER_KIND, - "metadata": { - "generateName": f"{name}-", - "labels": { - "template.harvesterhci.io/templateID": name - }, - "namespace": namespace - }, - "spec": { - "templateId": f"{namespace}/{name}", - "vm": { - "metadata": { - "annotations": { - "harvesterhci.io/volumeClaimTemplates": json.dumps([{ - "metadata": { - "annotations": {"harvesterhci.io/imageId": ""}, - "name": f"-{disk_name}" - }, - "spec": { - "accessModes": ["ReadWriteMany"], - "resources": {"requests": {"storage": "10Gi"}}, - "volumeMode": "Block" - } - }]) - } - }, - "spec": { - "runStrategy": "RerunOnFailure", - "template": { - "metadata": { - "annotations": { - "harvesterhci.io/sshNames": "[]" - } - }, - "spec": { - "domain": { - "cpu": { - "cores": cpu, - "sockets": 1, - "threads": 1 - }, - "devices": { - "disks": [ - { - "disk": {"bus": "virtio"}, - "name": "disk-0" - } - ], - "inputs": [ - { - "bus": "usb", - "name": "tablet", - "type": "tablet" - } - ], - "interfaces": [ - { - "masquerade": {}, - "model": "virtio", - "name": "default" - } - ] - }, - "features": {"acpi": {"enabled": True}}, - "machine": { - "type": "" - }, - "resources": { - "limits": dict(cpu=cpu, memory=mem) - } - }, - "evictionStrategy": "LiveMigrate", - "networks": [ - { - "name": "default", - "pod": {} - } - ], - "volumes": [ - { - "dataVolume": {"name": f"-{disk_name}"}, - "name": "disk-0" - } - ] - } - } - } - } - } - } - return self._inject_data(data) - def get(self, name="", namespace=DEFAULT_NAMESPACE, *, raw=False): return self._get(self.PATH_fmt.format(uid=name, ns=namespace), raw=raw) @@ -130,12 +36,16 @@ def create(self, name, namespace=DEFAULT_NAMESPACE, description="", *, raw=False path = self.PATH_fmt.format(ns=namespace, uid="") return self._create(path, json=data, raw=raw) - def update(self, name, namespace=DEFAULT_NAMESPACE, *, raw=False, **options): - cpu, memory = options.get('cpu', 1), options.get('memory', "1Gi") - disk_name = options.get("disk_name", "default") - data = self.create_version_data(name, namespace, cpu, memory, disk_name) + def create_version(self, name, template_spec, namespace=DEFAULT_NAMESPACE, *, raw=False): + if isinstance(template_spec, self.Spec): + template_spec = template_spec.to_dict(name, namespace) + template_spec.update(apiVersion="{API_VERSION}", kind=self._VER_KIND) + data = self._inject_data(template_spec) path = self.VER_PATH_fmt.format(uid="", ns=namespace) return self._create(path, json=data, raw=raw) def delete(self, name, namespace=DEFAULT_NAMESPACE, *, raw=False): return self._delete(self.PATH_fmt.format(uid=name, ns=namespace), raw=raw) + + def delete_version(self, name, namespace=DEFAULT_NAMESPACE, *, raw=False): + return self._delete(self.VER_PATH_fmt.format(uid=name, ns=namespace), raw=raw) diff --git a/apiclient/harvester_api/managers/virtualmachines.py b/apiclient/harvester_api/managers/virtualmachines.py index bb46bd8fe..76070c3d7 100644 --- a/apiclient/harvester_api/managers/virtualmachines.py +++ b/apiclient/harvester_api/managers/virtualmachines.py @@ -34,14 +34,14 @@ def get_status(self, name="", namespace=DEFAULT_NAMESPACE, *, raw=False, **kwarg def create(self, name, vm_spec, namespace=DEFAULT_NAMESPACE, *, raw=False): if isinstance(vm_spec, self.Spec): - vm_spec = vm_spec.to_dict(name, namespace) + vm_spec = self.Spec.to_dict(vm_spec, name, namespace) path = self.PATH_fmt.format(uid="", ns=namespace) return self._create(path, json=vm_spec, raw=raw) def update(self, name, vm_spec, namespace=DEFAULT_NAMESPACE, *, raw=False, as_json=True, **kwargs): if isinstance(vm_spec, self.Spec): - vm_spec = vm_spec.to_dict(name, namespace) + vm_spec = self.Spec.to_dict(vm_spec, name, namespace) path = self.PATH_fmt.format(uid=f"/{name}", ns=namespace) return self._update(path, vm_spec, raw=raw, as_json=as_json, **kwargs) @@ -114,3 +114,12 @@ def remove_volume(self, name, disk_name, namespace=DEFAULT_NAMESPACE, *, raw=Fal json = dict(diskName=disk_name) params = dict(action="removeVolume") return self._create(path, params=params, json=json, raw=raw) + + def create_template( + self, name, template_name, keep_data=False, description="", namespace=DEFAULT_NAMESPACE, + *, raw=False + ): + path = self.PATH_fmt.format(uid=f"/{name}", ns=namespace) + json = dict(description=description, name=template_name, withData=keep_data) + params = dict(action="createTemplate") + return self._create(path, params=params, json=json, raw=raw) diff --git a/apiclient/harvester_api/models/templates.py b/apiclient/harvester_api/models/templates.py new file mode 100644 index 000000000..917dcd5b4 --- /dev/null +++ b/apiclient/harvester_api/models/templates.py @@ -0,0 +1,39 @@ +from .virtualmachines import VMSpec + + +class TemplateSpec(VMSpec): + def to_dict(self, name, namespace, hostname=""): + vd = super().to_dict(name, namespace, "") + metadata = vd.pop('metadata') + spec = vd.pop('spec') + + # we can't modify name/namespace/description in template + metadata.pop('namespace'), metadata.pop('name') + metadata['annotations'].pop('field.cattle.io/description') + # hostname are not injected in the time + spec['template']['spec'].pop('hostname') + + return { + "metadata": { + "generateName": f"{name}-", + "labels": { + "template.harvesterhci.io/templateID": name + }, + "namespace": namespace + }, + "spec": { + "templateId": f"{namespace}/{name}", + "vm": dict(metadata=metadata, spec=spec) + } + } + + @classmethod + def from_dict(cls, data): + if "VirtualMachineTemplateVersion" != data.get('kind'): + raise ValueError("Only support data comes from 'VirtualMachineTemplateVersion'") + + vd = data['spec']['vm'] + + vd['type'] = "kubevirt.io.virtualmachine" + vd['spec']['template']['spec']['hostname'] = "" + return super().from_dict(vd) diff --git a/apiclient/harvester_api/models/virtualmachines.py b/apiclient/harvester_api/models/virtualmachines.py index 9666f5f08..03dfe3258 100644 --- a/apiclient/harvester_api/models/virtualmachines.py +++ b/apiclient/harvester_api/models/virtualmachines.py @@ -1,5 +1,5 @@ from copy import deepcopy -from json import dumps +from json import dumps, loads import yaml @@ -329,6 +329,7 @@ def to_dict(self, name, namespace, hostname=""): if self._data: self._data['metadata'].update(data['metadata']) self._data['spec'].update(data['spec']) + self._data['metadata'].pop('resourceVersion') # remove for create new ones return deepcopy(self._data) return deepcopy(data) @@ -342,11 +343,12 @@ def from_dict(cls, data): spec, metadata = data.get('spec', {}), data.get('metadata', {}) vm_spec = spec['template']['spec'] + run_strategy = spec['runStrategy'] os_type = metadata.get('labels', {}).get("harvesterhci.io/os", "") desc = metadata['annotations'].get("field.cattle.io/description", "") reserved_mem = metadata['annotations'].get("harvesterhci.io/reservedMemory", "") - run_strategy = spec['runStrategy'] - # ???: volume template claims not load + vol_claims = {v['metadata']['name']: VolumeSpec.from_dict(v) for v in loads( + metadata['annotations'].get("harvesterhci.io/volumeClaimTemplates", "[]"))} hostname = vm_spec['hostname'] eviction_strategy = vm_spec['evictionStrategy'] @@ -369,8 +371,12 @@ def from_dict(cls, data): obj._features = features obj._firmwares = firmware + obj._cloudinit_vol = dict(disk=devices['disks'][-1], volume=volumes[-1]) obj.networks = [dict(iface=i, network=n) for i, n in zip(devices['interfaces'], networks)] obj.volumes = [dict(disk=d, volume=v) for d, v in zip(devices['disks'][:-1], volumes[:-1])] - obj._cloudinit_vol = dict(disk=devices['disks'][-1], volume=volumes[-1]) + for v in obj.volumes: + if "persistentVolumeClaim" in v['volume']: + v['claim'] = vol_claims[v['volume']['persistentVolumeClaim']['claimName']] + obj._data = data return obj diff --git a/harvester_e2e_tests/apis/test_vm_templates.py b/harvester_e2e_tests/apis/test_vm_templates.py index bda7f0135..97c9b33e9 100644 --- a/harvester_e2e_tests/apis/test_vm_templates.py +++ b/harvester_e2e_tests/apis/test_vm_templates.py @@ -75,11 +75,9 @@ def test_get(self, api_client, unique_name): assert unique_name == data['metadata']['name'] def test_update(self, api_client, unique_name): - config = { - "cpu": 1, - "memory": "2Gi", - } - code, data = api_client.templates.update(unique_name, **config) + spec = api_client.templates.Spec(1, 2) + + code, data = api_client.templates.create_version(unique_name, spec) assert 201 == code, (code, data) assert data['metadata']['name'].startswith(unique_name), (code, data) diff --git a/harvester_e2e_tests/integrations/test_3_vm_functions.py b/harvester_e2e_tests/integrations/test_3_vm_functions.py index ef6774759..24deb4559 100644 --- a/harvester_e2e_tests/integrations/test_3_vm_functions.py +++ b/harvester_e2e_tests/integrations/test_3_vm_functions.py @@ -1,9 +1,9 @@ from time import sleep from datetime import datetime, timedelta from contextlib import contextmanager - import json import re + import pytest import yaml from paramiko.ssh_exception import ChannelException diff --git a/harvester_e2e_tests/integrations/test_4_vm_template.py b/harvester_e2e_tests/integrations/test_4_vm_template.py new file mode 100644 index 000000000..804807e96 --- /dev/null +++ b/harvester_e2e_tests/integrations/test_4_vm_template.py @@ -0,0 +1,195 @@ +from time import sleep +from datetime import datetime, timedelta + +import pytest +import yaml + + +@pytest.fixture(scope="module") +def image(api_client, image_opensuse, unique_name, wait_timeout): + unique_image_id = f'image-{unique_name}' + code, data = api_client.images.create_by_url( + unique_image_id, image_opensuse.url, display_name=f"{unique_name}-{image_opensuse.name}" + ) + + assert 201 == code, (code, data) + + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + code, data = api_client.images.get(unique_image_id) + if 100 == data.get('status', {}).get('progress', 0): + break + sleep(3) + else: + raise AssertionError( + "Failed to create Image with error:\n" + f"Status({code}): {data}" + ) + + yield dict(id=f"{data['metadata']['namespace']}/{unique_image_id}", + user=image_opensuse.ssh_user) + + code, data = api_client.images.delete(unique_image_id) + + +@pytest.fixture(scope="class") +def stopped_vm(api_client, ssh_keypair, wait_timeout, image, unique_name): + unique_name = f"stopped-{datetime.now().strftime('%m%S%f')}-{unique_name}" + cpu, mem = 1, 2 + pub_key, pri_key = ssh_keypair + vm_spec = api_client.vms.Spec(cpu, mem) + vm_spec.add_image("disk-0", image['id']) + vm_spec.run_strategy = "Halted" + + userdata = yaml.safe_load(vm_spec.user_data) + userdata['ssh_authorized_keys'] = [pub_key] + vm_spec.user_data = yaml.dump(userdata) + + code, data = api_client.vms.create(unique_name, vm_spec) + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + code, data = api_client.vms.get(unique_name) + if "Stopped" == data.get('status', {}).get('printableStatus'): + break + sleep(1) + + yield unique_name, image['user'] + + code, data = api_client.vms.get(unique_name) + vm_spec = api_client.vms.Spec.from_dict(data) + + api_client.vms.delete(unique_name) + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + code, data = api_client.vms.get_status(unique_name) + if 404 == code: + break + sleep(3) + + for vol in vm_spec.volumes: + vol_name = vol['volume']['persistentVolumeClaim']['claimName'] + api_client.volumes.delete(vol_name) + + +class TestVMTemplate: + def test_create_template_with_data( + self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout, stopped_vm + ): + """ ref: https://github.com/harvester/tests/issues/1194 + Steps: + 1. Create VM and write some data + 2. Create new template and keep data from the VM + 3. Create new VM from the template + 4. Check data consitency + Expected result: + - VM should created and operate normally + - Template should created successfully + - New VM should able to be created and operate normally + - Data in new VM should consistent with old one + """ + + unique_name, ssh_user = stopped_vm + pub_key, pri_key = ssh_keypair + + code, data = api_client.vms.start(unique_name) + assert 204 == code, (code, data) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_name, ["default"]) + assert vm_got_ips, ( + f"Failed to Start VM({unique_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) + + # Login to VM and write some data + vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] + if iface['name'] == 'default') + code, data = api_client.hosts.get(data['status']['nodeName']) + host_ip = next(addr['address'] for addr in data['status']['addresses'] + if addr['type'] == 'InternalIP') + with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh: + cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh) + assert cloud_inited, ( + f"VM {unique_name} Started {vm_checker.wait_timeout} seconds" + f", but cloud-init still in {out}" + ) + out, err = sh.exec_command( + "dd if=/dev/urandom of=./generate_file bs=1M count=512; sync" + ) + assert not out, (out, err) + vm1_md5, err = sh.exec_command( + "md5sum ./generate_file > ./generate_file.md5; cat ./generate_file.md5; sync" + ) + assert not err, (vm1_md5, err) + + # generate VM template with data + code, data = api_client.vms.create_template(unique_name, unique_name, keep_data=True) + assert 204 == code, (code, data) + + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + try: + code, data = api_client.templates.get(unique_name) + assert 200 == code, (code, data) + ns, name = data['spec']['defaultVersionId'].split('/') + except (AssertionError, ValueError): + # ValueError: version is not created yet, so `defaultVersionId` will be empty + pass + else: + code, data = api_client.templates.get_version(name, ns) + conds = data.get('status', {}).get('conditions', []) + if conds and all('True' == c['status'] for c in conds): + tmpl_spec = api_client.templates.Spec.from_dict(data) + break + sleep(5) + else: + raise AssertionError( + "Failed to create template with status:\n" + f"{data.get('status')}\n" + f"API Status({code}): {data}" + ) + + # Create new VM from the template + tmpl_vm_name = f"tmpl-{unique_name}" + code, data = api_client.vms.create(tmpl_vm_name, tmpl_spec) + assert 201 == code, (code, data) + + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(tmpl_vm_name, ["default"]) + assert vm_got_ips, ( + f"Failed to Start VM({tmpl_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) + + # Login to VM and check the data is consistent + vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] + if iface['name'] == 'default') + code, data = api_client.hosts.get(data['status']['nodeName']) + host_ip = next(addr['address'] for addr in data['status']['addresses'] + if addr['type'] == 'InternalIP') + with vm_shell_from_host(host_ip, vm_ip, ssh_user, pkey=pri_key) as sh: + cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh) + assert cloud_inited, ( + f"VM {tmpl_vm_name} Started {vm_checker.wait_timeout} seconds" + f", but cloud-init still in {out}" + ) + out, err = sh.exec_command("md5sum -c ./generate_file.md5") + assert not err, (out, err) + vm2_md5, err = sh.exec_command("cat ./generate_file.md5") + assert not err, (vm2_md5, err) + assert vm1_md5 == vm2_md5 + + # teardown + api_client.vms.delete(tmpl_vm_name) + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + code, data = api_client.vms.get_status(tmpl_vm_name) + if 404 == code: + break + sleep(3) + + for vol in tmpl_spec.volumes: + vol_name = vol['volume']['persistentVolumeClaim']['claimName'] + api_client.volumes.delete(vol_name) + + code, data = api_client.templates.delete(unique_name) + assert 200 == code, (code, data)