diff --git a/harvester_e2e_tests/fixtures/virtualmachines.py b/harvester_e2e_tests/fixtures/virtualmachines.py index 3332f7cda..438d56247 100644 --- a/harvester_e2e_tests/fixtures/virtualmachines.py +++ b/harvester_e2e_tests/fixtures/virtualmachines.py @@ -209,6 +209,18 @@ def cb(ctx): ) return self.wait_stopped(vm_name, endtime, cb, **kws) + def wait_status_running(self, vm_name, endtime=None, callback=default_cb, **kws): + endtime = endtime or self._endtime() + while endtime > datetime.now(): + ctx = ResponseContext('vm.get', *self.vms.get(vm_name, **kws)) + status = ctx.data.get('status', {}).get('printableStatus') + if 200 == ctx.code and "Running" == status and callback(ctx): + break + sleep(self.snooze) + else: + return False, ctx + return True, ctx + def wait_deleted(self, vm_name, endtime=None, callback=default_cb, **kws): ctx = ResponseContext('vm.delete', *self.vms.delete(vm_name, **kws)) if 404 == ctx.code and callback(ctx): diff --git a/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py b/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py index 72ee2d882..784010c1e 100644 --- a/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py +++ b/harvester_e2e_tests/integrations/test_4_vm_backup_restore.py @@ -480,8 +480,12 @@ def test_restore_replace_with_delete_vols( spec = api_client.backups.RestoreSpec.for_existing(delete_volumes=True) code, data = api_client.backups.restore(unique_vm_name, spec) assert 201 == code, f'Failed to restore backup with current VM replaced, {data}' - vm_getable, (code, data) = vm_checker.wait_getable(unique_vm_name) - assert vm_getable, (code, data) + + vm_running, (code, data) = vm_checker.wait_status_running(unique_vm_name) + assert vm_running, ( + f"Failed to restore VM({unique_vm_name}) with errors:\n" + f"Status({code}): {data}" + ) # Check VM Started then get IPs (vm and host) vm_got_ips, (code, data) = vm_checker.wait_ip_addresses(unique_vm_name, ['default']) @@ -495,6 +499,7 @@ def test_restore_replace_with_delete_vols( code, data = api_client.hosts.get(data['status']['nodeName']) host_ip = next(addr['address'] for addr in data['status']['addresses'] if addr['type'] == 'InternalIP') + base_vm_with_data['host_ip'], base_vm_with_data['vm_ip'] = host_ip, vm_ip # Login to the new VM and check data is existing with vm_shell_from_host(host_ip, vm_ip, base_vm_with_data['ssh_user'], pkey=pri_key) as sh: