diff --git a/harvester_e2e_tests/apis/test_vms.py b/harvester_e2e_tests/apis/test_vms.py new file mode 100644 index 000000000..d4e9b3d52 --- /dev/null +++ b/harvester_e2e_tests/apis/test_vms.py @@ -0,0 +1,33 @@ +import pytest + +pytest_plugins = [ + "harvester_e2e_tests.fixtures.api_client" + ] + + +@pytest.mark.p0 +@pytest.mark.negative +@pytest.mark.virtualmachines +class TestVMNegative: + def test_get_not_exist(self, api_client, unique_name): + """ + 1. Tries to get a VM that doesn't exist + 2. Checks that the get command gets a 404 + """ + code, data = api_client.vms.get(unique_name) + + assert 404 == code, (code, data) + assert "NotFound" == data.get('code'), (code, data) + + def test_delete_not_exist(self, api_client, unique_name): + """ ref: https://github.com/harvester/tests/issues/1215 + 1. Tries to delete a VM that doesn't exist + 2. Checks that it gets a 404 + """ + try: + api_client.set_retries(1) + code, data = api_client.vms.delete(unique_name) + assert 404 == code, (code, data) + assert "NotFound" in data.get('code'), (code, data) + finally: + api_client.set_retries() diff --git a/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py b/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py index 9a4b5726c..1aa7b114c 100644 --- a/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py +++ b/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py @@ -527,7 +527,7 @@ def test_restore_replace_vm_not_stop(self, api_client, backup_config, base_vm_wi assert 422 == code, (code, data) @pytest.mark.negative - @pytest.mark.skip_version_before('v1.1.2', 'v1.2.1') + @pytest.mark.skip_version_if('< v1.1.2', '< v1.2.1') @pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True) def test_restore_with_invalid_name(self, api_client, backup_config, base_vm_with_data): # RFC1123 DNS Subdomain name rules: @@ -562,6 +562,80 @@ def test_restore_with_invalid_name(self, api_client, backup_config, base_vm_with code, data = api_client.backups.restore(unique_vm_name, spec) assert 422 == code, (code, data) + @pytest.mark.skip_version_if('< v1.2.2') + @pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True) + def test_restore_replace_with_vm_shutdown_command( + self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker, + backup_config, base_vm_with_data + ): + ''' ref: https://github.com/harvester/tests/issues/943 + 1. Create VM and write some data + 2. Take backup for the VM + 3. Mess up existing data + 3. Shutdown the VM by executing `shutdown` command in OS + 4. Restore backup to replace existing VM + 5. VM should be restored successfully + 6. Data in VM should be the same as backed up + ''' + + unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data'] + pub_key, pri_key = ssh_keypair + + # mess up the existing data then shutdown it + with vm_shell_from_host( + base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'], + base_vm_with_data['ssh_user'], pkey=pri_key + ) as sh: + out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}") + assert not err, (out, err) + sh.exec_command('sync') + sh.exec_command('sudo shutdown now') + + endtime = datetime.now() + timedelta(seconds=wait_timeout) + while endtime > datetime.now(): + code, data = api_client.vms.get(unique_vm_name) + if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'): + break + sleep(5) + else: + raise AssertionError( + f"Failed to shut down VM({unique_vm_name}) with errors:\n" + f"Status({code}): {data}" + ) + + # restore VM to existing + spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True) + code, data = api_client.backups.restore(unique_vm_name, spec) + assert 201 == code, f'Failed to restore backup with current VM replaced, {data}' + + # Check VM Started then get IPs (vm and host) + vm_got_ips, (code, data) = vm_checker.wait_interfaces(unique_vm_name) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) + vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] + if iface['name'] == 'default') + code, data = api_client.hosts.get(data['status']['nodeName']) + host_ip = next(addr['address'] for addr in data['status']['addresses'] + if addr['type'] == 'InternalIP') + + # Login to the new VM and check data is existing + with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh: + cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh) + assert cloud_inited, ( + f"VM {unique_vm_name} Started {wait_timeout} seconds" + f", but cloud-init still in {out}" + ) + out, err = sh.exec_command(f"cat {backup_data['path']}") + + assert backup_data['content'] in out, ( + f"cloud-init writefile failed\n" + f"Executed stdout: {out}\n" + f"Executed stderr: {err}" + ) + @pytest.mark.skip("https://github.com/harvester/harvester/issues/1473") @pytest.mark.p0