Skip to content

Commit

Permalink
* [e2e] fix bug when get VM IP
Browse files Browse the repository at this point in the history
  • Loading branch information
lanfon72 committed May 16, 2024
1 parent a69a9a9 commit 31e6cd2
Show file tree
Hide file tree
Showing 4 changed files with 113 additions and 215 deletions.
13 changes: 13 additions & 0 deletions harvester_e2e_tests/fixtures/virtualmachines.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,6 +280,19 @@ def cb(ctx):
)
return self.wait_agent_connected(vm_name, endtime, cb, **kws)

def wait_ip_addresses(self, vm_name, ifnames, endtime=None, callback=default_cb, **kws):
def cb(ctx):
if ctx.callee == 'vm.start':
return callback(ctx)
ifaces = {d['name']: d for d in ctx.data.get('status', {}).get('interfaces', {})}
return (
all(ifaces.get(name, {}).get('ipAddress') for name in ifnames)
and callback(ctx)
)

ifnames = list(ifnames)
return self.wait_interfaces(vm_name, endtime, cb, **kws)

def wait_cloudinit_done(self, shell, endtime=None, callback=default_cb, **kws):
cmd = 'cloud-init status'
endtime = endtime or self._endtime()
Expand Down
184 changes: 62 additions & 122 deletions harvester_e2e_tests/integrations/test_3_vm_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -929,8 +929,9 @@ def test_update_enable_user_data(self, api_client, unique_vm_name, vm_checker, i
@pytest.mark.p0
@pytest.mark.virtualmachines
class TestVMClone:
def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout,
host_shell, vm_shell, stopped_vm):
def test_clone_running_vm(
self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
):
"""
To cover test:
- (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-on/ # noqa
Expand All @@ -951,23 +952,13 @@ def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout,
pub_key, pri_key = ssh_keypair
code, data = api_client.vms.start(unique_vm_name)

endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(unique_vm_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)

vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
code, data = api_client.hosts.get(data['status']['nodeName'])
Expand Down Expand Up @@ -1010,24 +1001,24 @@ def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout,
code, _ = api_client.vms.clone(unique_vm_name, cloned_name)
assert 204 == code, f"Failed to clone VM {unique_vm_name} into new VM {cloned_name}"

# Check VM started
# Check cloned VM is created
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(cloned_name)
code, data = api_client.vms.get(cloned_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({cloned_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
f"restored VM {cloned_name} is not created"
)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(cloned_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({cloned_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)

vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
code, data = api_client.hosts.get(data['status']['nodeName'])
Expand Down Expand Up @@ -1088,8 +1079,9 @@ def test_clone_running_vm(self, api_client, ssh_keypair, wait_timeout,
vol_name = vol['volume']['persistentVolumeClaim']['claimName']
api_client.volumes.delete(vol_name)

def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout,
host_shell, vm_shell, stopped_vm):
def test_clone_stopped_vm(
self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
):
"""
To cover test:
- (legacy) https://harvester.github.io/tests/manual/virtual-machines/clone-vm-that-is-turned-off/ # noqa
Expand All @@ -1110,24 +1102,13 @@ def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout,
unique_vm_name, ssh_user = stopped_vm
pub_key, pri_key = ssh_keypair
code, data = api_client.vms.start(unique_vm_name)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)

endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(unique_vm_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
code, data = api_client.hosts.get(data['status']['nodeName'])
Expand Down Expand Up @@ -1202,23 +1183,12 @@ def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout,

# Check cloned VM started
api_client.vms.start(cloned_name)
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(cloned_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({cloned_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(cloned_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({cloned_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
code, data = api_client.hosts.get(data['status']['nodeName'])
Expand Down Expand Up @@ -1283,8 +1253,9 @@ def test_clone_stopped_vm(self, api_client, ssh_keypair, wait_timeout,
@pytest.mark.p0
@pytest.mark.virtualmachines
class TestVMWithVolumes:
def test_create_with_two_volumes(self, api_client, ssh_keypair, wait_timeout,
host_shell, vm_shell, stopped_vm):
def test_create_with_two_volumes(
self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
):
"""
To cover test:
- https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-two-disk-volumes/ # noqa
Expand All @@ -1311,23 +1282,12 @@ def test_create_with_two_volumes(self, api_client, ssh_keypair, wait_timeout,
# Start VM with 2 additional volumes
code, data = api_client.vms.update(unique_vm_name, vm_spec)
assert 200 == code, (code, data)
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(unique_vm_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)

# Log into VM to verify added volumes
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
Expand Down Expand Up @@ -1405,8 +1365,9 @@ def test_create_with_two_volumes(self, api_client, ssh_keypair, wait_timeout,
for vol_name in claims:
api_client.volumes.delete(vol_name)

def test_create_with_existing_volume(self, api_client, ssh_keypair, wait_timeout,
host_shell, vm_shell, stopped_vm):
def test_create_with_existing_volume(
self, api_client, ssh_keypair, vm_checker, wait_timeout, host_shell, vm_shell, stopped_vm
):
"""
To cover test:
- https://harvester.github.io/tests/manual/virtual-machines/create-vm-with-existing-volume/ # noqa
Expand Down Expand Up @@ -1438,23 +1399,12 @@ def test_create_with_existing_volume(self, api_client, ssh_keypair, wait_timeout

# Start VM with added existing volume
code, data = api_client.vms.update(unique_vm_name, vm_spec)
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(unique_vm_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)

# Log into VM to verify added volumes
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
Expand Down Expand Up @@ -1964,30 +1914,20 @@ def login_to_vm_from_host(

@pytest.mark.dependency(name="hot_plug_volume")
def test_add(
self, api_client, ssh_keypair, wait_timeout, host_shell, vm_shell, small_volume, stopped_vm
self, api_client, ssh_keypair, wait_timeout, vm_checker,
host_shell, vm_shell, small_volume, stopped_vm
):
unique_vm_name, ssh_user = stopped_vm
pub_key, pri_key = ssh_keypair

# Start VM
code, data = api_client.vms.start(unique_vm_name)
endtime = datetime.now() + timedelta(seconds=wait_timeout)
while endtime > datetime.now():
code, data = api_client.vms.get_status(unique_vm_name)
if 200 == code:
phase = data.get('status', {}).get('phase')
conds = data.get('status', {}).get('conditions', [{}])
if ("Running" == phase
and "AgentConnected" == conds[-1].get('type')
and data['status'].get('interfaces')):
break
sleep(3)
else:
raise AssertionError(
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default'])
assert vm_got_ips, (
f"Failed to Start VM({unique_vm_name}) with errors:\n"
f"Status: {data.get('status')}\n"
f"API Status({code}): {data}"
)
# Log into VM to verify OS is ready
vm_ip = next(iface['ipAddress'] for iface in data['status']['interfaces']
if iface['name'] == 'default')
Expand Down
Loading

0 comments on commit 31e6cd2

Please sign in to comment.