diff --git a/harvester_e2e_tests/fixtures/virtualmachines.py b/harvester_e2e_tests/fixtures/virtualmachines.py index d97a618f0..306587ff9 100644 --- a/harvester_e2e_tests/fixtures/virtualmachines.py +++ b/harvester_e2e_tests/fixtures/virtualmachines.py @@ -280,6 +280,19 @@ def cb(ctx): ) return self.wait_agent_connected(vm_name, endtime, cb, **kws) + def wait_ip_addresses(self, vm_name, ifnames, endtime=None, callback=default_cb, **kws): + def cb(ctx): + if ctx.callee == 'vm.start': + return callback(ctx) + ifaces = {d['name']: d for d in ctx.data.get('status', {}).get('interfaces', {})} + return ( + all(ifaces.get(name, {}).get('ipAddress') for name in ifnames) + and callback(ctx) + ) + + ifnames = list(ifnames) + return self.wait_interfaces(vm_name, endtime, cb, **kws) + def wait_cloudinit_done(self, shell, endtime=None, callback=default_cb, **kws): cmd = 'cloud-init status' endtime = endtime or self._endtime() diff --git a/harvester_e2e_tests/integrations/test_3_vm_functions.py b/harvester_e2e_tests/integrations/test_3_vm_functions.py index 180c08698..0423cbdf4 100644 --- a/harvester_e2e_tests/integrations/test_3_vm_functions.py +++ b/harvester_e2e_tests/integrations/test_3_vm_functions.py @@ -929,8 +929,9 @@ def test_update_enable_user_data(self, api_client, unique_vm_name, vm_checker, i @pytest.mark.p0 @pytest.mark.virtualmachines class TestVMClone: - def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout, - host_shell, vm_shell, stopped_vm): + def test_clone_running_vm( + self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm + ): """ To cover test: - (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-on/ # noqa @@ -951,23 +952,13 @@ def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout, pub_key, pri_key = ssh_keypair code, data = api_client.vms.start(unique_vm_name) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) + vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -1010,24 +1001,24 @@ def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout, code, _ = api_client.vms.clone(unique_vm_name, cloned_name) assert 204 == code, f"Failed to clone VM {unique_vm_name} into new VM {cloned_name}" - # Check VM started + # Check cloned VM is created endtime = datetime.now() + timedelta(seconds=wait_timeout) while endtime > datetime.now(): - code, data = api_client.vms.get_status(cloned_name) + code, data = api_client.vms.get(cloned_name) if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break + break sleep(3) else: raise AssertionError( - f"Failed to Start VM({cloned_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" + f"restored VM {cloned_name} is not created" ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(cloned_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({cloned_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) + vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -1088,8 +1079,9 @@ def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout, vol_name = vol['volume']['persistentVolumeClaim']['claimName'] api_client.volumes.delete(vol_name) - def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout, - host_shell, vm_shell, stopped_vm): + def test_clone_stopped_vm( + self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm + ): """ To cover test: - (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-off/ # noqa @@ -1110,24 +1102,13 @@ def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout, unique_vm_name, ssh_user = stopped_vm pub_key, pri_key = ssh_keypair code, data = api_client.vms.start(unique_vm_name) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -1202,23 +1183,12 @@ def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout, # Check cloned VM started api_client.vms.start(cloned_name) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(cloned_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({cloned_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(cloned_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({cloned_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -1283,8 +1253,9 @@ def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout, @pytest.mark.p0 @pytest.mark.virtualmachines class TestVMWithVolumes: - def test_create_with_two_volumes(self, api_client, ssh_keypair, wait_timeout, - host_shell, vm_shell, stopped_vm): + def test_create_with_two_volumes( + self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm + ): """ To cover test: - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-two-disk-volumes/ # noqa @@ -1311,23 +1282,12 @@ def test_create_with_two_volumes(self, api_client, ssh_keypair, wait_timeout, # Start VM with 2 additional volumes code, data = api_client.vms.update(unique_vm_name, vm_spec) assert 200 == code, (code, data) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) # Log into VM to verify added volumes vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] @@ -1405,8 +1365,9 @@ def test_create_with_two_volumes(self, api_client, ssh_keypair, wait_timeout, for vol_name in claims: api_client.volumes.delete(vol_name) - def test_create_with_existing_volume(self, api_client, ssh_keypair, wait_timeout, - host_shell, vm_shell, stopped_vm): + def test_create_with_existing_volume( + self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm + ): """ To cover test: - https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-existing-volume/ # noqa @@ -1438,23 +1399,12 @@ def test_create_with_existing_volume(self, api_client, ssh_keypair, wait_timeout # Start VM with added existing volume code, data = api_client.vms.update(unique_vm_name, vm_spec) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) # Log into VM to verify added volumes vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] @@ -1964,30 +1914,20 @@ def login_to_vm_from_host( @pytest.mark.dependency(name="hot_plug_volume") def test_add( - self, api_client, ssh_keypair, wait_timeout, host_shell, vm_shell, small_volume, stopped_vm + self, api_client, ssh_keypair, wait_timeout, vm_checker, + host_shell, vm_shell, small_volume, stopped_vm ): unique_vm_name, ssh_user = stopped_vm pub_key, pri_key = ssh_keypair # Start VM code, data = api_client.vms.start(unique_vm_name) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) # Log into VM to verify OS is ready vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') diff --git a/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py b/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py index 1aa7b114c..9d56540fe 100644 --- a/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py +++ b/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py @@ -196,7 +196,7 @@ def base_vm(api_client, ssh_keypair, unique_name, vm_checker, image, backup_conf code, data = api_client.vms.create(unique_vm_name, vm_spec) # Check VM started and get IPs (vm and host) - vm_got_ips, (code, data) = vm_checker.wait_interfaces(unique_vm_name) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) assert vm_got_ips, ( f"Failed to Start VM({unique_vm_name}) with errors:\n" f"Status: {data.get('status')}\n" @@ -376,7 +376,7 @@ def test_update_backup_by_yaml( @pytest.mark.dependency(depends=["TestBackupRestore::tests_backup_vm"], param=True) def test_restore_with_new_vm( - self, api_client, vm_shell_from_host, ssh_keypair, wait_timeout, + self, api_client, vm_shell_from_host, vm_checker, ssh_keypair, wait_timeout, backup_config, base_vm_with_data ): unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data'] @@ -398,23 +398,12 @@ def test_restore_with_new_vm( assert 201 == code, (code, data) # Check VM Started then get IPs (vm and host) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(restored_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({restored_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(restored_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({restored_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -752,7 +741,7 @@ def test_restore_replace_migrated_vm( class TestMultipleBackupRestore: @pytest.mark.dependency() def test_backup_multiple( - self, api_client, wait_timeout, host_shell, vm_shell, ssh_keypair, + self, api_client, wait_timeout, host_shell, vm_shell, vm_checker, ssh_keypair, backup_config, config_backup_target, base_vm_with_data ): def write_data(content): @@ -805,23 +794,12 @@ def create_backup(vm_name, backup_name): unique_vm_name = base_vm_with_data['name'] # Check VM started and get IPs (vm and host) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -841,7 +819,7 @@ def create_backup(vm_name, backup_name): depends=["TestMultipleBackupRestore::test_backup_multiple"], param=True ) def test_delete_first_backup( - self, api_client, host_shell, vm_shell, ssh_keypair, wait_timeout, + self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout, backup_config, config_backup_target, base_vm_with_data ): unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data'] @@ -885,23 +863,12 @@ def test_delete_first_backup( assert 201 == code, f'Failed to restore backup with current VM replaced, {data}' # Check VM Started then get IPs (vm and host) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -947,7 +914,7 @@ def test_delete_first_backup( depends=["TestMultipleBackupRestore::test_backup_multiple"], param=True ) def test_delete_last_backup( - self, api_client, host_shell, vm_shell, ssh_keypair, wait_timeout, + self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout, backup_config, config_backup_target, base_vm_with_data ): unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data'] @@ -990,23 +957,12 @@ def test_delete_last_backup( assert 201 == code, f'Failed to restore backup with current VM replaced, {data}' # Check VM Started then get IPs (vm and host) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) @@ -1052,7 +1008,7 @@ def test_delete_last_backup( depends=["TestMultipleBackupRestore::test_backup_multiple"], param=True ) def test_delete_middle_backup( - self, api_client, host_shell, vm_shell, ssh_keypair, wait_timeout, + self, api_client, host_shell, vm_shell, vm_checker, ssh_keypair, wait_timeout, backup_config, config_backup_target, base_vm_with_data ): unique_vm_name, backup_data = base_vm_with_data['name'], base_vm_with_data['data'] @@ -1095,23 +1051,12 @@ def test_delete_middle_backup( assert 201 == code, f'Failed to restore backup with current VM replaced, {data}' # Check VM Started then get IPs (vm and host) - endtime = datetime.now() + timedelta(seconds=wait_timeout) - while endtime > datetime.now(): - code, data = api_client.vms.get_status(unique_vm_name) - if 200 == code: - phase = data.get('status', {}).get('phase') - conds = data.get('status', {}).get('conditions', [{}]) - if ("Running" == phase - and "AgentConnected" == conds[-1].get('type') - and data['status'].get('interfaces')): - break - sleep(3) - else: - raise AssertionError( - f"Failed to Start VM({unique_vm_name}) with errors:\n" - f"Status: {data.get('status')}\n" - f"API Status({code}): {data}" - ) + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( + f"Failed to Start VM({unique_vm_name}) with errors:\n" + f"Status: {data.get('status')}\n" + f"API Status({code}): {data}" + ) vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces'] if iface['name'] == 'default') code, data = api_client.hosts.get(data['status']['nodeName']) diff --git a/harvester_e2e_tests/integrations/test_5_vm_networks.py b/harvester_e2e_tests/integrations/test_5_vm_networks.py index 44e7da793..e63c58a72 100644 --- a/harvester_e2e_tests/integrations/test_5_vm_networks.py +++ b/harvester_e2e_tests/integrations/test_5_vm_networks.py @@ -222,8 +222,8 @@ def test_add_vlan( ): # clean cloud-init for rerun, and get the correct ifname (unique_vm_name, ssh_user), (_, pri_key) = minimal_vm, ssh_keypair - vm_started, (code, data) = vm_checker.wait_interfaces(unique_vm_name) - assert vm_started, ( + vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) + assert vm_got_ips, ( f"Failed to Start VM({unique_vm_name}) with errors:\n" f"Status: {data.get('status')}\n" f"API Status({code}): {data}"