Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[e2e] Add new test cases for VM #1238

Merged
merged 3 commits into from
May 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
33 changes: 33 additions & 0 deletions harvester_e2e_tests/apis/test_vms.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import pytest

pytest_plugins = [
"harvester_e2e_tests.fixtures.api_client"
]


@pytest.mark.p0
@pytest.mark.negative
@pytest.mark.virtualmachines
class TestVMNegative:
def test_get_not_exist(self, api_client, unique_name):
"""
1. Tries to get a VM that doesn't exist
2. Checks that the get command gets a 404
"""
code, data = api_client.vms.get(unique_name)

assert 404 == code, (code, data)
assert "NotFound" == data.get('code'), (code, data)
lanfon72 marked this conversation as resolved.
Show resolved Hide resolved

def test_delete_not_exist(self, api_client, unique_name):
""" ref: https://github.com/harvester/tests/issues/1215
1. Tries to delete a VM that doesn't exist
2. Checks that it gets a 404
"""
try:
api_client.set_retries(1)
code, data = api_client.vms.delete(unique_name)
assert 404 == code, (code, data)
assert "NotFound" in data.get('code'), (code, data)
finally:
api_client.set_retries()
76 changes: 75 additions & 1 deletion harvester_e2e_tests/integrations/test_4_vm_backup_restore.py
Original file line number Diff line number Diff line change
Expand Up @@ -527,7 +527,7 @@ def test_restore_replace_vm_not_stop(self, api_client, backup_config, base_vm_wi
assert 422 == code, (code, data)

@pytest.mark.negative
@pytest.mark.skip_version_before('v1.1.2', 'v1.2.1')
@pytest.mark.skip_version_if('< v1.1.2', '< v1.2.1')
@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
def test_restore_with_invalid_name(self, api_client, backup_config, base_vm_with_data):
# RFC1123 DNS Subdomain name rules:
Expand Down Expand Up @@ -562,6 +562,80 @@ def test_restore_with_invalid_name(self, api_client, backup_config, base_vm_with
code, data = api_client.backups.restore(unique_vm_name, spec)
assert 422 == code, (code, data)

@pytest.mark.skip_version_if('< v1.2.2')
@pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True)
def test_restore_replace_with_vm_shutdown_command(
self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, vm_checker,
backup_config, base_vm_with_data
):
''' ref: https://github.com/harvester/tests/issues/943
1. Create VM and write some data
2. Take backup for the VM
3. Mess up existing data
3. Shutdown the VM by executing `shutdown` command in OS
4. Restore backup to replace existing VM
5. VM should be restored successfully
6. Data in VM should be the same as backed up
'''

unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data']
pub_key, pri_key = ssh_keypair

# mess up the existing data then shutdown it
with vm_shell_from_host(
base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'],
base_vm_with_data['ssh_user'], pkey=pri_key
) as sh:
out, err = sh.exec_command(f"echo {pub_key!r} > {base_vm_with_data['data']['path']}")
assert not err, (out, err)
sh.exec_command('sync')
sh.exec_command('sudo shutdown now')

endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get(unique_vm_name)
if 200 == code and "Stopped" == data.get('status', {}).get('printableStatus'):
break
sleep(5)
else:
raise AssertionError(
f"Failed to shut down VM({unique_vm_name}) with errors:\n"
f"Status({code}): {data}"
)

# restore VM to existing
spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True)
code, data = api_client.backups.restore(unique_vm_name, spec)
assert 201 == code, f'Failed to restore backup with current VM replaced, {data}'

# Check VM Started then get IPs (vm and host)
vm_got_ips, (code, data) = vm_checker.wait_interfaces(unique_vm_name)
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
code, data = api_client.hosts.get(data['status']['nodeName'])
host_ip = next(addr['address'] for addr in data['status']['addresses']
if addr['type'] == 'InternalIP')

# Login to the new VM and check data is existing
with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh:
cloud_inited, (out, err) = vm_checker.wait_cloudinit_done(sh)
assert cloud_inited, (
f"VM {unique_vm_name} Started {wait_timeout} seconds"
f", but cloud-init still in {out}"
)
out, err = sh.exec_command(f"cat {backup_data['path']}")

assert backup_data['content'] in out, (
f"cloud-init writefile failed\n"
f"Executed stdout: {out}\n"
f"Executed stderr: {err}"
)


@pytest.mark.skip("https://github.com/harvester/harvester/issues/1473")
@pytest.mark.p0
Expand Down