From 970e56e2e7e88e0913516c32369a18e700399c40 Mon Sep 17 00:00:00 2001 From: Kamil Varga Date: Wed, 3 Nov 2021 17:33:17 +0100 Subject: [PATCH 01/58] This commit is a fix for remote_with_unix.negative_testing.socket_with_polkit_ro test case, where response from the libvirt has changed with the recent version and therefore password prompt is showing up with error message and requiring root account instead of polkit account which is denied. This fix add root password to pass through the login prompt and code to check for the new error message. Signed-off-by: Kamil Varga --- libvirt/tests/cfg/remote_access/remote_with_unix.cfg | 2 ++ libvirt/tests/src/remote_access/remote_access.py | 11 +++++++++-- 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/libvirt/tests/cfg/remote_access/remote_with_unix.cfg b/libvirt/tests/cfg/remote_access/remote_with_unix.cfg index 44dc01b51e..b189fdb110 100644 --- a/libvirt/tests/cfg/remote_access/remote_with_unix.cfg +++ b/libvirt/tests/cfg/remote_access/remote_with_unix.cfg @@ -149,5 +149,7 @@ main_vm = "avocado-vt-vm1" status_error = "no" patterns_virsh_cmd = ".*authentication unavailable.*" + auth_pwd = "${local_pwd}" + error_pattern = 'System policy prevents management of local virtualized systems' polkit_pkla = "/etc/polkit-1/localauthority/50-local.d/polkit.pkla" polkit_pkla_cxt = "[Allow ${su_user} libvirt monitor permissions]\nIdentity=unix-user:${su_user}\nAction=org.libvirt.unix.monitor\nResultAny=yes\nResultInactive=yes\nResultActive=yes" diff --git a/libvirt/tests/src/remote_access/remote_access.py b/libvirt/tests/src/remote_access/remote_access.py index 01f7449c05..73608d9225 100644 --- a/libvirt/tests/src/remote_access/remote_access.py +++ b/libvirt/tests/src/remote_access/remote_access.py @@ -60,8 +60,15 @@ def remote_access(params, test): fp.close() logging.info("Succeed to connect libvirt daemon.") else: - test.fail("Failed to connect libvirt daemon!!output: {}" - .format(output)) + if error_pattern: + if error_pattern in output: + logging.info("Expected libvirt output!!") + else: + test.fail("Unexpected output: {}, when looking for: {} " + "pattern".format(output, error_pattern)) + else: + test.fail("Failed to connect libvirt daemon!!output: {}" + .format(output)) else: if not ret: if error_pattern: From 021aea7a97ee75bf5cd7dde3b044a34058f681ce Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Fri, 8 Oct 2021 06:12:59 -0400 Subject: [PATCH 02/58] virttools: add type and first test Add new test type for virt tools on virt-manager repo: - virt-install - virt-clone - virt-xml - virt-manager Also, add first test for hostdev support of virt-install on s390x Signed-off-by: Sebastian Mitterle --- .../tests/cfg/virt_install/s390x_hostdev.cfg | 5 +++ .../tests/src/virt_install/s390x_hostdev.py | 45 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 virttools/tests/cfg/virt_install/s390x_hostdev.cfg create mode 100644 virttools/tests/src/virt_install/s390x_hostdev.py diff --git a/virttools/tests/cfg/virt_install/s390x_hostdev.cfg b/virttools/tests/cfg/virt_install/s390x_hostdev.cfg new file mode 100644 index 0000000000..51be7040cf --- /dev/null +++ b/virttools/tests/cfg/virt_install/s390x_hostdev.cfg @@ -0,0 +1,5 @@ +- s390x_hostdev: + type = s390x_hostdev + only s390-virtio + variants: + - check_present_inside_guest: diff --git a/virttools/tests/src/virt_install/s390x_hostdev.py b/virttools/tests/src/virt_install/s390x_hostdev.py new file mode 100644 index 0000000000..38ecd0bfba --- /dev/null +++ b/virttools/tests/src/virt_install/s390x_hostdev.py @@ -0,0 +1,45 @@ +import logging +from virttest.utils_zcrypt import CryptoDeviceInfoBuilder, \ + APMaskHelper, load_vfio_ap, unload_vfio_ap +from provider.vfio import ccw +from uuid import uuid4 + +def run(test, env, params): + """ + Import machine with supported --hostdevs on s390x. + Start the machine and confirm the passthrough. + """ + + ccw.assure_preconditions() + schid, chpids = ccw.get_device_info() + uuid = str(uuid4()) + ccw.set_override(schid) + ccw.start_device(uuid, schid) + + + load_vfio_ap() + info = CryptoDeviceInfoBuilder.get() + + devices = [info.domains[0]] + mask_helper = APMaskHelper.from_infos(devices) + matrix_dev = MatrixDevice.from_infos(devices) + + result = vires.nodedev_list(cap="mdev", debug=True) + logging.debug(result, str(result)) + + """ + vm.start() + + if not ccw.device_is_listed(session, chpids): + test.fail("CCW device not listed") + """ + finally: + if matrix_dev: + matrix_dev.unassign_all() + if mask_helper: + mask_helper.unassign_all() + unload_vfio_ap() + if uuid: + ccw.stop_device(uuid) + if schid: + ccw.unset_override(schid) From e7050ecb20a3541f0fc67244217751da8489e70b Mon Sep 17 00:00:00 2001 From: Yingshun Cui Date: Tue, 26 Oct 2021 15:36:18 +0800 Subject: [PATCH 03/58] vDPA: Add a test to check hotplug/unplug vdpa type interface This PR adds: RHEL-196261: hotplug/unplug vdpa type interface Signed-off-by: Yingshun Cui --- .../virtual_interface/interface_hotplug.cfg | 14 +++ .../virtual_interface/interface_hotplug.py | 104 ++++++++++++++++++ provider/interface/interface_base.py | 74 +++++++++++++ provider/interface/vdpa_base.py | 90 +++++++++++++++ 4 files changed, 282 insertions(+) create mode 100644 libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg create mode 100644 libvirt/tests/src/virtual_interface/interface_hotplug.py create mode 100644 provider/interface/interface_base.py create mode 100644 provider/interface/vdpa_base.py diff --git a/libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg b/libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg new file mode 100644 index 0000000000..9737882b57 --- /dev/null +++ b/libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg @@ -0,0 +1,14 @@ +- interface_hotplug: + type = interface_hotplug + start_vm = no + repeat_times = 1 + + variants dev_type: + - vdpa: + only x86_64 + func_supported_since_libvirt_ver = (7, 3, 0) + func_supported_since_qemu_kvm_ver = (6, 0, 0) + iface_dict = {"source": {'dev':'/dev/vhost-vdpa-0'}} + variants test_target: + - simulator: + - mellanox: diff --git a/libvirt/tests/src/virtual_interface/interface_hotplug.py b/libvirt/tests/src/virtual_interface/interface_hotplug.py new file mode 100644 index 0000000000..4dc6f27731 --- /dev/null +++ b/libvirt/tests/src/virtual_interface/interface_hotplug.py @@ -0,0 +1,104 @@ +import logging + +from virttest import libvirt_version +from virttest import utils_misc +from virttest import utils_vdpa +from virttest.libvirt_xml import vm_xml +from virttest.staging import service +from virttest.utils_libvirt import libvirt_vmxml + +from provider.interface import interface_base +from provider.interface import vdpa_base + + +def run(test, params, env): + """ + Test Hotplug/unplug interface device(s) + """ + + def setup_default(): + """ + Default setup + """ + logging.debug("Remove VM's interface devices.") + libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') + + def teardown_default(): + """ + Default cleanup + """ + pass + + def setup_vdpa(): + """ + Setup vDPA environment + """ + setup_default() + test_env_obj = None + if test_target == "simulator": + test_env_obj = utils_vdpa.VDPASimulatorTest() + else: + pf_pci = utils_vdpa.get_vdpa_pci() + test_env_obj = utils_vdpa.VDPAOvsTest(pf_pci) + test_env_obj.setup() + return test_env_obj + + def teardown_vdpa(): + """ + Cleanup vDPA environment + """ + if test_target != "simulator": + service.Factory.create_service("NetworkManager").restart() + if test_obj: + test_obj.cleanup() + + def test_vdpa(): + """ + Hotplug/unplug vDPA type interface + + 1) Start the vm, hotplug the interface + 2) Login to the vm and check the network function + 3) Hot-unplug the interface + """ + vm.start() + vm_session = vm.wait_for_serial_login(timeout=240) + + br_name = None + if test_target == "mellanox": + br_name = test_obj.br_name + for _i in range(eval(params.get('repeat_times', '1'))): + interface_base.attach_iface_device(vm_name, dev_type, params) + vdpa_base.check_vdpa_conn(vm_session, test_target, br_name) + interface_base.detach_iface_device(vm_name, dev_type) + + libvirt_version.is_libvirt_feature_supported(params) + supported_qemu_ver = eval(params.get('func_supported_since_qemu_kvm_ver', '()')) + if supported_qemu_ver: + if not utils_misc.compare_qemu_version(*supported_qemu_ver, False): + test.cancel("Current qemu version doesn't support this test!") + + # Variable assignment + test_target = params.get('test_target', '') + dev_type = params.get('dev_type', '') + + vm_name = params.get('main_vm') + vm = env.get_vm(vm_name) + + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + backup_vmxml = vmxml.copy() + + run_test = eval("test_%s" % dev_type) + setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \ + locals() else setup_default + teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \ + dev_type in locals() else teardown_default + + test_obj = None + try: + # Execute test + test_obj = setup_test() + run_test() + + finally: + backup_vmxml.sync() + teardown_test() diff --git a/provider/interface/interface_base.py b/provider/interface/interface_base.py new file mode 100644 index 0000000000..bc58245d59 --- /dev/null +++ b/provider/interface/interface_base.py @@ -0,0 +1,74 @@ +import logging +import time + +from avocado.core import exceptions + +from virttest import utils_net +from virttest import virsh +from virttest.libvirt_xml import vm_xml +from virttest.libvirt_xml.devices import interface +from virttest.utils_libvirt import libvirt_vmxml +from virttest.utils_test import libvirt + + +def create_iface(iface_type, iface_dict): + """ + Create Interface device + + :param iface_type: String, interface type + :param iface_dict: Dict, attrs of Interface + :return: xml object of interface + """ + iface = interface.Interface(iface_type) + iface.setup_attrs(**iface_dict) + + logging.debug("Interface XML: %s", iface) + return iface + + +def get_vm_iface(vm_session): + """ + Get VM's 1st interface + + :param vm_session: An session to VM + :return: VM's first interface + """ + p_iface, _v_ifc = utils_net.get_remote_host_net_ifs(vm_session) + vm_iface = p_iface[:1:] + if not vm_iface: + raise exceptions.TestFail("Failed to get vm's iface!") + return vm_iface[0] + + +def attach_iface_device(vm_name, dev_type, params): + """ + Attach an interface to VM + + :param vm_name: VM's name + :param dev_type: Interface device type + :param params: Dictionary with the test parameters + """ + iface_dict = eval(params.get('iface_dict', '{}')) + status_error = "yes" == params.get('status_error', 'no') + + iface = create_iface(dev_type, iface_dict) + res = virsh.attach_device(vm_name, iface.xml, debug=True) + libvirt.check_exit_status(res, status_error) + libvirt_vmxml.check_guest_xml(vm_name, dev_type) + # FIXME: Sleep for 20 secs to make iface work properly + time.sleep(20) + + +def detach_iface_device(vm_name, dev_type): + """ + Detach an interface from VM + + :param vm_name: VM's name + :param dev_type: Interface device type + """ + iface = interface.Interface(dev_type) + iface = vm_xml.VMXML.new_from_dumpxml(vm_name).devices.by_device_tag( + "interface")[0] + virsh.detach_device(vm_name, iface.xml, wait_for_event=True, + debug=True, ignore_status=False) + libvirt_vmxml.check_guest_xml(vm_name, dev_type, status_error=True) diff --git a/provider/interface/vdpa_base.py b/provider/interface/vdpa_base.py new file mode 100644 index 0000000000..ca431b1f0f --- /dev/null +++ b/provider/interface/vdpa_base.py @@ -0,0 +1,90 @@ +import logging +import re + +from avocado.core import exceptions +from avocado.utils import process + +from provider.interface import interface_base + +from virttest import utils_test +from virttest import utils_misc +from virttest.staging import service + + +def config_vdpa_conn(vm_session, vm_iface, br_name, + br_ip_addr='100.100.100.100', cidr='24'): + """ + Config vdpa connection + + :param vm_session: An session to VM + :param vm_iface: VM's interface + :param br_name: Bridge name + :param br_ip_addr: IP address of the bridge + :param cidr: CIDR + """ + vm_ip = re.sub('\d+$', '60', br_ip_addr) + service.Factory.create_service("NetworkManager").stop() + + logging.debug("Config static ip %s for vm.", vm_ip) + cmd = ("nmcli con del {0}; nmcli con add type ethernet ifname {0} " + "con-name {0} ipv4.method manual ipv4.address {1}/{2}" + .format(vm_iface, vm_ip, cidr)) + vm_session.cmd(cmd) + logging.debug("Set ip address of the bridge.") + cmd = ("ip addr add {0}/{1} dev {2}; sleep 5;ip link set {2} up" + .format(br_ip_addr, cidr, br_name)) + process.run(cmd, shell=True) + + +def check_vdpa_network(vm_session, vm_iface, br_name, + ping_dest="100.100.100.100"): + """ + Check vdpa network connection + + :param vm_session: An session to VM + :param vm_iface: VM's interface + :param br_name: Bridge name + :param ping_dest: The ip address to ping + """ + config_vdpa_conn(vm_session, vm_iface, br_name) + + if not utils_misc.wait_for(lambda: not utils_test.ping( + ping_dest, count=3, timeout=5, output_func=logging.debug, + session=vm_session)[0], first=5, timeout=30): + raise exceptions.TestFail("Failed to ping %s." % ping_dest) + + +def check_rx_tx_packages(vm_session, vm_iface): + """ + Check rx and tx package + + :param vm_session: An session to VM + :param vm_iface: VM's interface + """ + cmd = "ip -s -json link show %s" % vm_iface + status, stdout = vm_session.cmd_status_output(cmd) + if status or not stdout: + raise exceptions.TestFail("Failed to run cmd - {}, status - {}, " + "output - {}.".format(cmd, status, stdout)) + ip_info = eval(stdout.strip()) + logging.debug("VM iface's info: %s.", ip_info) + + tx_info = ip_info[0]['stats64']['tx']['packets'] + rx_info = ip_info[0]['stats64']['rx']['packets'] + if rx_info != tx_info: + raise exceptions.TestFail("The value of rx and tx should be same.") + + +def check_vdpa_conn(vm_session, test_target, br_name=None): + """ + Check vDPA connection + + :param vm_session: An session to VM + :param test_target: Test target env, eg, "mellanox" or "simulator" + :param br_name: Bridge name + """ + vm_iface = interface_base.get_vm_iface(vm_session) + if test_target == "mellanox": + check_vdpa_network(vm_session, vm_iface, br_name) + elif test_target == "simulator": + check_rx_tx_packages(vm_session, vm_iface) From 0324a9b60a321b19a9b653b3355e56fe856d85db Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Tue, 9 Nov 2021 09:41:49 +0800 Subject: [PATCH 04/58] snapshot: set timeout for snapshot_create_as Signed-off-by: Liu Yiding --- .../tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py | 2 ++ .../tests/src/virsh_cmd/snapshot/virsh_snapshot_dumpxml.py | 3 ++- libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_edit.py | 5 +++-- .../tests/src/virsh_cmd/snapshot/virsh_snapshot_par_cur.py | 5 +++-- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py index 384f3fbae7..7cedf961a3 100644 --- a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py +++ b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_create_as.py @@ -547,12 +547,14 @@ def run(test, params, env): vp = virsh.VirshPersistent() vp.create(vmxml_backup['xml'], '--autodestroy') cmd_result = vp.snapshot_create_as(vm_name, options, + timeout=300, ignore_status=True, debug=True) vp.close_session() vmxml_backup.define() else: cmd_result = virsh.snapshot_create_as(vm_name, options, + timeout=300, unprivileged_user=usr, uri=uri, ignore_status=True, diff --git a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_dumpxml.py b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_dumpxml.py index c19885f716..f0d23c6ee8 100644 --- a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_dumpxml.py +++ b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_dumpxml.py @@ -136,10 +136,11 @@ def run(test, params, env): dom_state = virsh.domstate(vm_name).stdout.strip() # Create disk snapshot before all to make the origin image clean - virsh.snapshot_create_as(vm_name, "--disk-only") + virsh.snapshot_create_as(vm_name, "--disk-only", timeout=300) # Create snapshot with options snapshot_result = virsh.snapshot_create_as(vm_name, snap_opt, + timeout=300, readonly=readonly) if snapshot_result.exit_status: if status_error == "no": diff --git a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_edit.py b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_edit.py index b8fb5fbb95..dab0df1f74 100644 --- a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_edit.py +++ b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_edit.py @@ -124,7 +124,8 @@ def log_first_lines(pre_xml, after_xml, count=15): try: # Create disk snapshot before all to make the origin image clean logging.debug("Create snap-temp --disk-only") - ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only") + ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only", + timeout=300) if ret.exit_status != 0: test.fail("Fail to create temp snap, Error: %s" % ret.stderr.strip()) @@ -132,7 +133,7 @@ def log_first_lines(pre_xml, after_xml, count=15): # Create snapshots for opt in [snap_create_opt1, snap_create_opt2]: logging.debug("...use option %s", opt) - result = virsh.snapshot_create_as(vm_name, opt) + result = virsh.snapshot_create_as(vm_name, opt, timeout=300) if result.exit_status: test.fail("Failed to create snapshot. Error:%s." % result.stderr.strip()) diff --git a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_par_cur.py b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_par_cur.py index 7e68034a54..12c1516745 100644 --- a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_par_cur.py +++ b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_par_cur.py @@ -143,14 +143,15 @@ def parent_snapshot_test(): try: if not without_snapshot: # Create disk snapshot before all to make the origin image clean - ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only") + ret = virsh.snapshot_create_as(vm_name, "snap-temp --disk-only", + timeout=300) if ret.exit_status != 0: test.fail("Fail to create temp snap, Error: %s" % ret.stderr.strip()) # Create snapshots for opt in snap_opt: - result = virsh.snapshot_create_as(vm_name, opt) + result = virsh.snapshot_create_as(vm_name, opt, timeout=300) if result.exit_status: test.fail("Failed to create snapshot. Error:%s." % result.stderr.strip()) From f34b363ecee7ad54c2397b9662f0862c5b9d3ae3 Mon Sep 17 00:00:00 2001 From: Dan Zheng Date: Mon, 8 Nov 2021 19:47:07 +0800 Subject: [PATCH 05/58] virsh_cpu_baseline: new case --migratable Case ID: RHEL7-20178 Case Title: [cmd/cpu-baseline] Obtain cpu baseline with "--migratable" through XML contains invtsc from "virsh capabilities" This case requires the host has 'invtsc' feature. This case is to test no 'invtsc' feature outputed in virsh.cpu_baseline Signed-off-by: Dan Zheng --- .../virsh_cmd/domain/virsh_cpu_baseline.cfg | 5 +++ .../virsh_cmd/domain/virsh_cpu_baseline.py | 35 ++++++++++++++++--- 2 files changed, 35 insertions(+), 5 deletions(-) diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg index 48803e0818..1eff5b236b 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg @@ -24,6 +24,11 @@ vms = "avocado-vt-vm1" main_vm = "avocado-vt-vm1" config_guest = "yes" + - migratable: + only q35 + cpu_baseline_extra = '--migratable' + cpu_baseline_test_feature = "invtsc" + feature_exist = "no" - negative_test: status_error = "yes" variants: diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py index aac9437ba1..4c7c0be999 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py @@ -46,12 +46,30 @@ def create_attach_xml(cpu_xmlfile, test_feature): with open(cpu_xmlfile, 'w') as xmlfile: xmlfile.write(content) + def validate_host(to_file, test_feature): + """ + Validate the host meets the test requirement which includes a + certain feature + + :param to_file: the output to be written to + :param test_feature: feature name to be searched + :raises: test.cancel if the host does not include the tested feature + """ + + output = virsh.capabilities(to_file=to_file, + ignore_status=False, + debug=True) + if not check_xml(output, test_feature): + test.cancel("The capabilities do not include feature '%s'. " + "Skip the test" % test_feature) + def check_xml(xml_output, test_feature): """ Check if result output contains tested feature. :param xml_output: virsh cpu-baseline command's result. :param test_feature: Test feature element. + :return: a match object if the feature exists, otherwise None """ feature_name = "" dom = parseString(xml_output) @@ -59,8 +77,7 @@ def check_xml(xml_output, test_feature): for names in feature: feature_name += names.getAttribute("name") dom.unlink() - if not re.search(test_feature, feature_name): - test.fail("Cannot see '%s' feature" % test_feature) + return re.search(test_feature, feature_name) # Get all parameters. file_name = params.get("cpu_baseline_cpu_file", "cpu.xml") @@ -69,9 +86,13 @@ def check_xml(xml_output, test_feature): test_feature = params.get("cpu_baseline_test_feature", "acpi") status_error = "yes" == params.get("status_error", "no") cpu_xmlfile = os.path.join(data_dir.get_tmp_dir(), file_name) + exp_feature_exist = "yes" == params.get("feature_exist", "yes") - # Prepare a xml file. - create_attach_xml(cpu_xmlfile, test_feature) + if '--migratable' not in extra: + # Prepare a xml file. + create_attach_xml(cpu_xmlfile, test_feature) + else: + validate_host(cpu_xmlfile, test_feature) if cpu_ref == "file": cpu_ref = cpu_xmlfile @@ -92,7 +113,11 @@ def check_xml(xml_output, test_feature): else: if status != 0: test.fail("Run failed with right command") - check_xml(output, test_feature) + feature_found = check_xml(output, test_feature) + if exp_feature_exist and not feature_found: + test.fail("Cannot see '%s' feature" % test_feature) + if not exp_feature_exist and feature_found: + test.fail("Can see '%s' feature, but not expected" % test_feature) # Use the output to config VM config_guest = "yes" == params.get("config_guest", "no") From 5903c1eda32a324e0f3168f071e10b8ea9ecedd9 Mon Sep 17 00:00:00 2001 From: Yanqiu Zhang Date: Tue, 9 Nov 2021 06:35:14 -0500 Subject: [PATCH 06/58] virsh_event: Stopped failed event if kill qemu Lifecycle event "Stopped Failed" if kill domain qemu process. Signed-off-by: Yanqiu Zhang --- libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg | 4 ++++ libvirt/tests/src/virsh_cmd/domain/virsh_event.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg index a7c07e7d93..ae04540e50 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg @@ -46,6 +46,10 @@ panic_model = 'isa' addr_type = 'isa' addr_iobase = '0x505' + - kill_qemu: + only test_events + events_list = "kill_qemu" + signal = 'SIGKILL' - reboot_event: event_name = "reboot" events_list = "reset" diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_event.py b/libvirt/tests/src/virsh_cmd/domain/virsh_event.py index 4a2ea6adfc..63e44c0143 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_event.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_event.py @@ -539,6 +539,10 @@ def trigger_events(dom, events_list=[]): ret = virsh.domstate(dom.name, "--reason", **virsh_dargs) if ret.stdout.strip() != "paused (I/O error)": test.fail("Domain state should still be paused due to I/O error!") + elif event == "kill_qemu": + os.kill(dom.get_pid(), getattr(signal, signal_name)) + expected_events_list.append("'lifecycle' for %s:" + " Stopped Failed") else: test.error("Unsupported event: %s" % event) # Event may not received immediately From dae2cf13471e696ce19870af8540add334874921 Mon Sep 17 00:00:00 2001 From: xiaodwan Date: Wed, 10 Nov 2021 10:21:57 +0800 Subject: [PATCH 07/58] Revert "v2v: fix the failed cases with error 'Not find cirrus device'" --- provider/v2v_vmcheck_helper.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/provider/v2v_vmcheck_helper.py b/provider/v2v_vmcheck_helper.py index bd753d81e3..9c00287bc5 100644 --- a/provider/v2v_vmcheck_helper.py +++ b/provider/v2v_vmcheck_helper.py @@ -316,8 +316,8 @@ def _when_target_ovirt(): # Video model will change to QXL for Windows2008r2 and windows7 if self.target == 'libvirt': video_model = _when_target_libvirt(has_qxldod) - if not has_virtio_win: - video_model = 'cirrus' + if not has_virtio_win: + video_model = 'cirrus' return video_model From 255e5710f1d9914eacb2216dd32d6249f55d3196 Mon Sep 17 00:00:00 2001 From: Yan Fu Date: Mon, 1 Nov 2021 11:03:24 +0800 Subject: [PATCH 08/58] Add testcase for all the sub-daemons starting in foreground 1.Will run all the testcases for sub-daemons in modular daemon mode and cancel testcases for legacy libvirtd; 2.Will run testcases for libvirtd in legacy libvirtd mode and cancel testcases for modular daemon; Signed-off-by: Yan Fu --- .../tests/cfg/daemon/daemon_functional.cfg | 28 ++++++++- libvirt/tests/src/daemon/daemon_functional.py | 60 +++++++++++-------- 2 files changed, 62 insertions(+), 26 deletions(-) diff --git a/libvirt/tests/cfg/daemon/daemon_functional.cfg b/libvirt/tests/cfg/daemon/daemon_functional.cfg index 104ee8cbf0..cbe4aee1a2 100644 --- a/libvirt/tests/cfg/daemon/daemon_functional.cfg +++ b/libvirt/tests/cfg/daemon/daemon_functional.cfg @@ -6,6 +6,29 @@ exit_time_tolerance = 1 pseries: exit_time_tolerance = 2 + variants: + - legacy_daemon: + require_modular_daemon = "no" + variants: + - libvirtd: + daemon_name = "libvirtd" + - modular_daemon: + require_modular_daemon = "yes" + variants: + - virtnetworkd: + daemon_name = "virtnetworkd" + - virtnodedevd: + daemon_name = "virtnodedevd" + - virtsecretd: + daemon_name = "virtsecretd" + - virtstoraged: + daemon_name = "virtstoraged" + - virtinterfaced: + daemon_name = "virtinterfaced" + - virtnwfilterd: + daemon_name = "virtnwfilterd" + - virtqemud: + daemon_name = "virtqemud" variants: - no_opt: - opt_help: @@ -20,11 +43,12 @@ libvirtd_arg = '--timeout 3' expected_exit_time = 3 - opt_timeout_3_live_vm: + only virtqemud, libvirtd libvirtd_arg = '--timeout 3' start_vm = yes - opt_config: - libvirtd_arg = '--config /tmp/virt-test.conf' - expected_config_path = '/tmp/virt-test.conf' + test_config = 'yes' + libvirtd_arg = '--config ' - opt_pid: libvirtd_arg = '--pid-file /tmp/virt-test.pid' expected_pid_path = '/tmp/virt-test.pid' diff --git a/libvirt/tests/src/daemon/daemon_functional.py b/libvirt/tests/src/daemon/daemon_functional.py index 314c6dceb4..b2be12a43b 100644 --- a/libvirt/tests/src/daemon/daemon_functional.py +++ b/libvirt/tests/src/daemon/daemon_functional.py @@ -6,9 +6,10 @@ from virttest import virsh from virttest import utils_config +from virttest import utils_split_daemons +from virttest import virt_admin from virttest.utils_libvirtd import LibvirtdSession from virttest.utils_libvirtd import Libvirtd -from virttest.libvirt_xml import capability_xml from virttest import libvirt_version @@ -42,7 +43,7 @@ def check_version(params): Check whether the output is libvirtd version. """ expected_version = params.get('expected_version', 'no') == 'yes' - is_version = log[0].startswith('{} (libvirt)'.format(Libvirtd().service_list[0])) + is_version = log[0].startswith('{} (libvirt)'.format(daemon_name)) if expected_version != is_version: test.fail( 'Expected output version is %s, but get output:\n%s' % @@ -76,7 +77,7 @@ def check_pid_file(): with open(pid_path) as pid_file: pid = int(pid_file.readline()) - result = process.run('pgrep %s' % Libvirtd().service_list[0], + result = process.run('pgrep %s' % daemon_name, ignore_status=True, shell=True) expected_pid = int(result.stdout_text.strip().split()[0]) @@ -86,12 +87,20 @@ def check_pid_file(): def check_config_file(): """ - Check whether the config file take effects by checking UUID. + Check whether the config file take effects by checking max_clients. """ - cur_uuid = capability_xml.CapabilityXML()['uuid'] - if cur_uuid != check_uuid: - test.fail('Expected host UUID is %s, but got %s' % - (check_uuid, cur_uuid)) + if daemon_name == "libvirtd": + connect_uri = "qemu:///system" + else: + connect_uri = daemon_name + ":///system" + vp = virt_admin.VirtadminPersistent(uri=connect_uri) + result = vp.srv_clients_info(daemon_name, uri=connect_uri, ignore_status=True, debug=True) + output = result.stdout.strip().splitlines() + out_split = [item.split(':') for item in output] + out_dict = dict([[item[0].strip(), item[1].strip()] for item in out_split]) + if int(out_dict["nclients_max"]) != check_max_clients: + test.fail('Expected max_clients is %s, but got %s' % + (check_max_clients, out_dict["nclients_max"])) MAX_TIMEOUT = 10 arg_str = params.get("libvirtd_arg", "") @@ -99,7 +108,11 @@ def check_config_file(): expected_exit_time = float(params.get("expected_exit_time", 'inf')) config_path = params.get('expected_config_path', "") pid_path = params.get('expected_pid_path', "") + daemon_name = params.get('daemon_name', "") + test_config = params.get('test_config', "no") == "yes" + require_modular_daemon = params.get('require_modular_daemon', "no") == "yes" + utils_split_daemons.daemon_mode_check(require_modular_daemon) if expected_exit_time == float('inf'): timeout = MAX_TIMEOUT else: @@ -109,21 +122,21 @@ def check_config_file(): 'there exists living domain') timeout = expected_exit_time + time_tolerance - libvirtd = LibvirtdSession( - logging_handler=_logger, - ) + libvirtd = LibvirtdSession(service_name=daemon_name, logging_handler=_logger) # Setup config file. - check_uuid = '13371337-1337-1337-1337-133713371337' - if config_path: - open(config_path, 'a').close() - config = utils_config.LibvirtdConfig(config_path) - config.host_uuid = check_uuid + check_max_clients = int(101) + if test_config: + config = utils_config.get_conf_obj(daemon_name) + logging.debug(config.conf_path) + config_path = config.conf_path + config.max_clients = check_max_clients + arg_str = arg_str + config_path try: check_unix_socket_files() - Libvirtd().stop() + Libvirtd(daemon_name).stop() libvirtd.start(arg_str=arg_str, wait_for_working=False) start = time.time() @@ -150,20 +163,19 @@ def check_config_file(): (expected_exit_time, time_tolerance, wait_time)) not libvirt_version.version_compare(5, 6, 0) and check_unix_socket_files() - if config_path: + if test_config: check_config_file() if pid_path: check_pid_file() finally: libvirtd.exit() - Libvirtd().stop() - Libvirtd("libvirtd.socket").restart() - Libvirtd().start() + Libvirtd(daemon_name).stop() + socket_name = daemon_name + ".socket" + Libvirtd(socket_name).restart() + Libvirtd(daemon_name).start() # Clean up config file - if config_path: + if test_config: config.restore() - if os.path.exists(config_path): - os.remove(config_path) if os.path.exists(pid_path): os.remove(pid_path) From 2722840ca542e8fdd0dce1ad73faad87bd2d78f2 Mon Sep 17 00:00:00 2001 From: Yanqiu Zhang Date: Fri, 12 Nov 2021 04:15:27 -0500 Subject: [PATCH 09/58] iface_network: direct br iface with vlan Vm with direct type bridge mode interface with vlan tag will start fail. Signed-off-by: Yanqiu Zhang --- libvirt/tests/cfg/virtual_network/iface_network.cfg | 7 +++++++ libvirt/tests/src/virtual_network/iface_network.py | 3 +++ 2 files changed, 10 insertions(+) diff --git a/libvirt/tests/cfg/virtual_network/iface_network.cfg b/libvirt/tests/cfg/virtual_network/iface_network.cfg index 25ab47aaf7..0adadbc8eb 100644 --- a/libvirt/tests/cfg/virtual_network/iface_network.cfg +++ b/libvirt/tests/cfg/virtual_network/iface_network.cfg @@ -267,6 +267,13 @@ forward_iface = "eno1 eno2 eno2" net_forward = "{'mode':'bridge'}" define_error = "yes" + - direct_br_vlan: + create_network = "no" + change_iface_option = "yes" + iface_type = "direct" + iface_source = "{'dev':'eno1','mode':'bridge'}" + iface_vlan = "{'trunk': 'no', 'tags': [{'id': '47'}]}" + start_error = "yes" - net_bridge: change_iface_option = "yes" iface_source = "{'network':'nettest'}" diff --git a/libvirt/tests/src/virtual_network/iface_network.py b/libvirt/tests/src/virtual_network/iface_network.py index c6a9f71560..f960533e66 100644 --- a/libvirt/tests/src/virtual_network/iface_network.py +++ b/libvirt/tests/src/virtual_network/iface_network.py @@ -129,6 +129,8 @@ def modify_iface_xml(sync=True): source['dev'] = net_ifs[0] del iface.source iface.source = source + if iface_vlan: + iface.vlan = iface.new_vlan(**iface_vlan) if iface_model: iface.model = get_iface_model(iface_model, host_arch) if iface_rom: @@ -617,6 +619,7 @@ def dig_test(session): iface_boot = params.get("iface_boot") iface_model = params.get("iface_model") iface_driver = params.get("iface_driver") + iface_vlan = eval(params.get("iface_vlan", "None")) multiple_guests = params.get("multiple_guests") create_network = "yes" == params.get("create_network", "no") attach_iface = "yes" == params.get("attach_iface", "no") From 19b84914d3221dbfd5a670126aab9246420e083d Mon Sep 17 00:00:00 2001 From: Yanqiu Zhang Date: Wed, 10 Nov 2021 05:45:33 -0500 Subject: [PATCH 10/58] virsh_domjobinfo: test --keep-completed option Test --keep-completed option for completed domjobinfo, take live dump action as a demo. Signed-off-by: Yanqiu Zhang --- .../cfg/virsh_cmd/domain/virsh_domjobinfo.cfg | 9 +++++++-- .../src/virsh_cmd/domain/virsh_domjobinfo.py | 19 +++++++++++++++++-- 2 files changed, 24 insertions(+), 4 deletions(-) diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg index 5f1f491241..e1f178e13c 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg @@ -21,9 +21,14 @@ domjobinfo_action = "dump" variants: - live_dump: - dump_opt="--live" + dump_opt = "--live" - crash_dump: - dump_opt="--crash" + dump_opt = "--crash" + - keep_complete_test: + only running_state + only vm_name + dump_opt = "--live" + keep_complete = "yes" - save_action: domjobinfo_action = "save" - managedsave_action: diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py b/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py index 1fb468b8fe..07e092f971 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py @@ -64,7 +64,7 @@ def cmp_jobinfo(result, info_list, job_type, actions): if out_dict["Job type"].strip() != job_type: test.fail("Expect %s Job type but got %s" % (job_type, out_dict["Job type"].strip())) - if out_dict["Operation"].strip() != actions.capitalize(): + if job_type != "None" and out_dict["Operation"].strip() != actions.capitalize(): test.fail("Expect %s Operation but got %s" % (actions.capitalize(), out_dict["Operation"].strip())) @@ -89,6 +89,7 @@ def cmp_jobinfo(result, info_list, job_type, actions): act_opt = params.get("dump_opt", "") vm_ref = params.get("domjobinfo_vm_ref") status_error = params.get("status_error", "no") + keep_complete = "yes" == params.get("keep_complete", "no") libvirtd = params.get("libvirtd", "on") # Use tmp_pipe to act as target file for job operation in subprocess, # such as vm.dump, vm.save, etc. @@ -183,7 +184,13 @@ def cmp_jobinfo(result, info_list, job_type, actions): except OSError: pass - # Get completed domjobinfo + # Get completed domjobinfo with --keep-completed option, next completed domjobinfo gathering will still get statistics. + if keep_complete: + time.sleep(5) + vm_ref_tmp = "%s --completed --keep-completed" % vm_ref + virsh.domjobinfo(vm_ref_tmp, ignore_status=False, debug=True) + + # Get completed domjobinfo.(Without -keep-completed option, later completed domjobinfo gathering will get None.) if status_error == "no": time.sleep(5) if act_opt != "--live" and vm_ref == domid: @@ -193,6 +200,10 @@ def cmp_jobinfo(result, info_list, job_type, actions): ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True) status_cmplt = ret_cmplt.exit_status + # Get completed domjobinfo again, get None. + if keep_complete: + ret_cmplt_later = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True) + # Recover the environment. if actions == "managedsave": virsh.managedsave_remove(vm_name, ignore_status=True) @@ -225,3 +236,7 @@ def cmp_jobinfo(result, info_list, job_type, actions): info_list[info_list.index("Expected downtime")] = "Total downtime" logging.debug("The expected info_list for completed job is %s", info_list) cmp_jobinfo(ret_cmplt, info_list, "Completed", actions) + # Check output of later "virsh domjobinfo --completed" + if keep_complete: + info_list = ["Job type"] + cmp_jobinfo(ret_cmplt_later, info_list, "None", actions) From 387fbc09175d80ce98199550956b90bce3f5d0e0 Mon Sep 17 00:00:00 2001 From: Yanqiu Zhang Date: Fri, 12 Nov 2021 07:16:04 -0500 Subject: [PATCH 11/58] net_update: add ip-dhcp-range on nonexist index Expecting fail when add ip-dhcp-range on nonexist index. Signed-off-by: Yanqiu Zhang --- libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg | 5 +++++ libvirt/tests/src/virsh_cmd/network/virsh_net_update.py | 9 +++++++++ 2 files changed, 14 insertions(+) diff --git a/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg b/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg index 367c50dc13..63318918b0 100644 --- a/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg +++ b/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg @@ -369,6 +369,11 @@ - add: only ip_dhcp_range update_command = "add" + - index_nonexist: + error_type = "index-nonexist" + parent_index = 1 + only ip_dhcp_range + update_command = "add" - delete_host_mismatch: only ip_dhcp_host error_type = "host-mismatch" diff --git a/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py b/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py index a2db663a67..cd8b8ea188 100644 --- a/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py +++ b/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py @@ -447,6 +447,11 @@ def section_update(ori_pre, new_pre): without_ip_dhcp == "no"): test_xml.del_element(element="/ip/dhcp", index=section_index) + if error_type == "index-nonexist": + for idx in [3, 2, 1]: + test_xml.del_element(element="/ip", index=idx) + test_xml.del_element(element="/route") + if loop == 0: try: # Define and start network @@ -563,6 +568,10 @@ def section_update(ori_pre, new_pre): # range-mismatch error info err_dic["range-mismatch"] = "couldn't locate a matching dhcp " + \ "range entry in network " + # index-nonexist error info + err_dic["index-nonexist"] = "couldn't update dhcp host entry " + \ + "- no Date: Mon, 15 Nov 2021 06:47:02 -0500 Subject: [PATCH 12/58] crypto_nodedev_create_destroy: restore matrix device The test didn't assign the devices back to host correctly which caused test failure in libvirt_ap_passthrough if run afterwards. Restore the matrix device at tear down and add an additional log message to help determine host state for passthrough test if it fails again. Signed-off-by: Sebastian Mitterle --- libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py | 1 + .../src/virsh_cmd/nodedev/crypto_nodedev_create_destroy.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py b/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py index 0efee5af77..78442f5a92 100644 --- a/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py +++ b/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py @@ -51,6 +51,7 @@ def run(test, params, env): load_vfio_ap() info = CryptoDeviceInfoBuilder.get() + logging.debug("Host lszcrypt got %s", info) if not info.entries or int(info.domains[0].hwtype) < MIN_HWTYPE: test.error("vfio-ap requires at least HWTYPE %s." % MIN_HWTYPE) diff --git a/libvirt/tests/src/virsh_cmd/nodedev/crypto_nodedev_create_destroy.py b/libvirt/tests/src/virsh_cmd/nodedev/crypto_nodedev_create_destroy.py index c3df5c63de..0e9bba6cd6 100644 --- a/libvirt/tests/src/virsh_cmd/nodedev/crypto_nodedev_create_destroy.py +++ b/libvirt/tests/src/virsh_cmd/nodedev/crypto_nodedev_create_destroy.py @@ -120,6 +120,7 @@ def run(test, params, env): libvirt_version.is_libvirt_feature_supported(params) matrix_cap = 'ap_matrix' device_file = None + mask_helper = None info = CryptoDeviceInfoBuilder.get() if int(info.entries[0].hwtype) < HWTYPE: @@ -132,7 +133,7 @@ def run(test, params, env): load_vfio_ap() if find_devices_by_cap(test, matrix_cap): devices = [info.domains[0]] - APMaskHelper.from_infos(devices) + mask_helper = APMaskHelper.from_infos(devices) device_file = create_nodedev_from_xml(uuid, adapter, domain) else: raise test.fail("Could not get %s correctly through nodedev-API" % @@ -143,6 +144,8 @@ def run(test, params, env): destroy_nodedev(dev_name) check_device_was_destroyed(test) finally: + if mask_helper: + mask_helper.return_to_host_all() unload_vfio_ap() if device_file: os.remove(device_file) From 29a9a683260a580555577668ccd257abc213b972 Mon Sep 17 00:00:00 2001 From: Yingshun Cui Date: Tue, 16 Nov 2021 10:31:08 +0800 Subject: [PATCH 13/58] cpu_stats: Correct the cpu time checking The value of the total time and the sum of user and system time are allowed to be the same. Signed-off-by: Yingshun Cui --- libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py index 7e7875debb..5b40196d34 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py @@ -123,7 +123,7 @@ def run(test, params, env): system_time = int(total_list[7]) # check Total cpu_time >= User + System cpu_time - if user_time + system_time >= total_time: + if user_time + system_time > total_time: test.fail("total cpu_time < user_time + " "system_time") logging.debug("Check total cpu_time %d >= user + system " From b3bc95d259624b6f28453c6d405c97ebf1d59a15 Mon Sep 17 00:00:00 2001 From: Xiaodai Wang Date: Tue, 16 Nov 2021 11:51:06 +0800 Subject: [PATCH 14/58] v2v: add a new case for datetime format settings in windows See RHEL-241704 and bz1895323. Signed-off-by: Xiaodai Wang --- v2v/tests/cfg/function_test_esx.cfg | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/v2v/tests/cfg/function_test_esx.cfg b/v2v/tests/cfg/function_test_esx.cfg index 5c31c0783f..0250e15d6a 100644 --- a/v2v/tests/cfg/function_test_esx.cfg +++ b/v2v/tests/cfg/function_test_esx.cfg @@ -112,6 +112,12 @@ checkpoint = 'fstrim_warning' msg_content = 'virt-v2v: warning: fstrim on guest filesystem /dev/.*? failed. Usually' expect_msg = yes + - schtask: + only esx_70 + checkpoint = "ogac" + virtio_win_path = "/usr/share/virtio-win/" + os_version = VM_VERSION_WIN_SCHTASKS_V2V_EXAMPLE + main_vm = VM_NAME_WIN_SCHTASKS_V2V_EXAMPLE - OGAC: only esx_67 checkpoint = "ogac" From 887bbf09b010f719aad0dafcc3646bf7fb9ab42a Mon Sep 17 00:00:00 2001 From: lcheng Date: Mon, 6 Sep 2021 11:19:34 +0800 Subject: [PATCH 15/58] live_migration: Add case about migration_port_min RHEL7-17384 - [Migration] Do live VM migration with the minimum available migration port occupied by other app Signed-off-by: lcheng --- .../tests/cfg/migration/live_migration.cfg | 7 +++++ libvirt/tests/src/migration/live_migration.py | 31 +++++++++++++++++-- 2 files changed, 35 insertions(+), 3 deletions(-) diff --git a/libvirt/tests/cfg/migration/live_migration.cfg b/libvirt/tests/cfg/migration/live_migration.cfg index 43d0e1e2a6..f94163685e 100644 --- a/libvirt/tests/cfg/migration/live_migration.cfg +++ b/libvirt/tests/cfg/migration/live_migration.cfg @@ -208,3 +208,10 @@ migrate_again = 'yes' migrate_again_status_error = 'no' action_during_mig = '[{"func": "virsh.destroy", "after_event": "iteration: '1'", "func_param": "'%s' % params.get('migrate_main_vm')"}]' + - migration_minport_occupied: + check_port = 'yes' + min_port = 49152 + qemu_conf_list = '["migration_port_min", "migration_port_max"]' + qemu_conf_path = '/etc/libvirt/qemu.conf' + migrate_speed = 10 + action_during_mig = '[{"func": "libvirt_network.check_established", "after_event": "iteration: '1'", "func_param": 'params'}]' diff --git a/libvirt/tests/src/migration/live_migration.py b/libvirt/tests/src/migration/live_migration.py index 2ee22dbb7d..3d8fe721f6 100644 --- a/libvirt/tests/src/migration/live_migration.py +++ b/libvirt/tests/src/migration/live_migration.py @@ -153,10 +153,17 @@ def run(test, params, env): setup_tls = "yes" == params.get("setup_tls", "no") qemu_conf_dest = params.get("qemu_conf_dest", "{}") migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no") - server_params = {'server_ip': params.get("migrate_dest_host"), - 'server_user': params.get("remote_user", "root"), - 'server_pwd': params.get("migrate_dest_pwd")} poweroff_src_vm = "yes" == params.get("poweroff_src_vm", "no") + check_port = "yes" == params.get("check_port", "no") + server_ip = params.get("migrate_dest_host") + server_user = params.get("remote_user", "root") + server_pwd = params.get("migrate_dest_pwd") + server_params = {'server_ip': server_ip, + 'server_user': server_user, + 'server_pwd': server_pwd} + qemu_conf_list = eval(params.get("qemu_conf_list", "[]")) + qemu_conf_path = params.get("qemu_conf_path") + min_port = params.get("min_port") vm_session = None qemu_conf_remote = None @@ -181,6 +188,13 @@ def run(test, params, env): # Setup migrate_tls_force default value on local remove_key_local = libvirt_config.remove_key_in_conf(value_list, "qemu") + + if check_port: + server_params['file_path'] = qemu_conf_path + remove_key_remote = libvirt_config.remove_key_in_conf(qemu_conf_list, + "qemu", + remote_params=server_params) + # Update only remote qemu conf if qemu_conf_dest: qemu_conf_remote = libvirt_remote.update_remote_file( @@ -198,6 +212,13 @@ def run(test, params, env): else: remote_file_list.append(libvirt_disk.create_remote_disk_by_same_metadata(vm, params)) + if check_port: + # Create a remote runner + runner_on_target = remote_old.RemoteRunner(host=server_ip, + username=server_user, + password=server_pwd) + cmd = "nc -l -p %s &" % min_port + remote_old.run_remote_cmd(cmd, params, runner_on_target, ignore_status=False) if not vm.is_alive(): vm.start() @@ -230,6 +251,10 @@ def run(test, params, env): logging.debug("Migration returns function results:%s", func_returns) if return_port: port_used = get_used_port(func_returns) + if check_port: + port_used = get_used_port(func_returns) + if int(port_used) != int(min_port) + 1: + test.fail("Wrong port for migration.") if vm_state_after_abort: check_vm_state_after_abort(vm_name, vm_state_after_abort, From 74523a7a989a77625b7d0ed919b2a40e336d92c3 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 16 Nov 2021 12:33:02 -0500 Subject: [PATCH 16/58] Determine image path Signed-off-by: Sebastian Mitterle --- virttools/tests/src/virt_install/s390x_hostdev.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/virttools/tests/src/virt_install/s390x_hostdev.py b/virttools/tests/src/virt_install/s390x_hostdev.py index 38ecd0bfba..4d304e2e86 100644 --- a/virttools/tests/src/virt_install/s390x_hostdev.py +++ b/virttools/tests/src/virt_install/s390x_hostdev.py @@ -1,10 +1,18 @@ +import os import logging +from avocado.core import data_dir from virttest.utils_zcrypt import CryptoDeviceInfoBuilder, \ APMaskHelper, load_vfio_ap, unload_vfio_ap from provider.vfio import ccw from uuid import uuid4 +from virttest import storage def run(test, env, params): + disk = storage.get_image_filename_filesytem(env, data_dir.get_data_dir())) + + pass + +def __fake_run(test, env, params): """ Import machine with supported --hostdevs on s390x. Start the machine and confirm the passthrough. @@ -33,6 +41,8 @@ def run(test, env, params): if not ccw.device_is_listed(session, chpids): test.fail("CCW device not listed") """ + try: + pass finally: if matrix_dev: matrix_dev.unassign_all() From add5a57b9507e89d7817cd7b97ad62f3730707b1 Mon Sep 17 00:00:00 2001 From: Yan Fu Date: Wed, 17 Nov 2021 15:50:43 +0800 Subject: [PATCH 17/58] Modify the expect ownership of '/var/cache/libvirt/qemu' after libvirt-7.8.0 1.This change is introduced by commit 4c0cf7c4dc5260ffd541f33458b842b1f6cb3865 Signed-off-by: Yan Fu --- libvirt/tests/src/svirt/default_dac_check.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libvirt/tests/src/svirt/default_dac_check.py b/libvirt/tests/src/svirt/default_dac_check.py index 873d6995d6..a381299488 100644 --- a/libvirt/tests/src/svirt/default_dac_check.py +++ b/libvirt/tests/src/svirt/default_dac_check.py @@ -5,6 +5,7 @@ from virttest import utils_libvirtd from virttest import utils_misc +from virttest import libvirt_version from virttest.libvirt_xml import vm_xml from virttest.staging import utils_memory from virttest.staging.utils_memory import drop_caches @@ -86,7 +87,10 @@ def check_ownership(filename): result = process.run("ls -ld %s" % filename, shell=True).stdout_text.strip().split(' ') ownership = "%s:%s" % (result[2], result[3]) logging.debug(ownership) - expect_result = "qemu:qemu" + if libvirt_version.version_compare(7, 8, 0) and filename == "/var/cache/libvirt/qemu": + expect_result = "root:root" + else: + expect_result = "qemu:qemu" if ownership != expect_result: test.fail("The ownership of %s is %s" % (filename, ownership)) From f9e6ca972e721645eea43ad9f10749c6f346f8ee Mon Sep 17 00:00:00 2001 From: lcheng Date: Tue, 7 Sep 2021 15:08:13 +0800 Subject: [PATCH 18/58] migrate_service_control: Add 3 cases about kill service 1. RHEL-130259 - [Migration] Kill dst qemu during Perform Phase of live migration - p2p migration 2. RHEL7-31564 - [Migration] Kill DST libvirtd during Perform Phase of live migration - p2p migration 3. RHEL-130061 - [Migration] Kill SRC libvirtd during Perform Phase of live migration - p2p migration Signed-off-by: lcheng --- .../cfg/migration/migrate_service_control.cfg | 46 +++++ .../src/migration/migrate_service_control.py | 176 ++++++++++++++++++ provider/migration/migration_base.py | 1 + 3 files changed, 223 insertions(+) create mode 100644 libvirt/tests/cfg/migration/migrate_service_control.cfg create mode 100644 libvirt/tests/src/migration/migrate_service_control.py diff --git a/libvirt/tests/cfg/migration/migrate_service_control.cfg b/libvirt/tests/cfg/migration/migrate_service_control.cfg new file mode 100644 index 0000000000..b06cde1f1e --- /dev/null +++ b/libvirt/tests/cfg/migration/migrate_service_control.cfg @@ -0,0 +1,46 @@ +- virsh.migrate_service_control: + type = migrate_service_control + migration_setup = "yes" + storage_type = 'nfs' + setup_local_nfs = 'yes' + disk_type = "file" + disk_source_protocol = "netfs" + mnt_path_name = ${nfs_mount_dir} + # Console output can only be monitored via virsh console output + only_pty = True + take_regular_screendumps = no + # Extra options to pass after + virsh_migrate_extra = "" + # SSH connection time out + ssh_timeout = 60 + # Local URI + virsh_migrate_connect_uri = "qemu:///system" + image_convert = 'no' + variants: + - without_postcopy: + postcopy_options = "" + variants: + - p2p_live: + virsh_migrate_options = "--live --p2p --persistent --verbose" + variants: + - kill_service: + kill_service = 'yes' + vm_state_after_abort = "{'source': 'running', 'target': 'nonexist'}" + migrate_speed = 10 + status_error = 'yes' + action_during_mig = '[{"func": "utils_misc.kill_service", "after_event": "migration-iteration", "before_event": "Suspended Migrated", "func_param": "params"}]' + migrate_again = 'yes' + migrate_again_status_error = 'no' + expected_image_ownership = 'qemu:qemu' + variants: + - kill_libvirtd_on_dst: + service_name = "libvirtd" + service_on_dst = "yes" + err_msg = 'client socket is closed' + - kill_qemu_on_dst: + service_name = "qemu-kvm" + service_on_dst = "yes" + err_msg = 'qemu unexpectedly closed the monitor|domain is no longer running' + - kill_libvirtd_on_src: + service_name = "libvirtd" + err_msg = 'End of file while reading data: Input/output error' diff --git a/libvirt/tests/src/migration/migrate_service_control.py b/libvirt/tests/src/migration/migrate_service_control.py new file mode 100644 index 0000000000..ee18c4a32b --- /dev/null +++ b/libvirt/tests/src/migration/migrate_service_control.py @@ -0,0 +1,176 @@ +import logging +import os + +from pwd import getpwuid +from grp import getgrgid + +from virttest import libvirt_vm +from virttest import migration +from virttest import virsh +from virttest import libvirt_version +from virttest import remote +from virttest import utils_libvirtd + +from virttest.libvirt_xml import vm_xml +from virttest.utils_test import libvirt + +from provider.migration import migration_base + + +def check_image_ownership(vm_name, exp_ownership, test): + """ + Check ownership of image + + :param vm_name: vm name + :param exp_ownership: the expected ownership + :param test: test object + """ + sourcelist = vm_xml.VMXML.get_disk_source(vm_name) + disk_source = sourcelist[0].find('source').get('file') + logging.debug("image file: %s" % disk_source) + image_ownership = "%s:%s" % (getpwuid(os.stat(disk_source).st_uid).pw_name, + getgrgid(os.stat(disk_source).st_gid).gr_name) + logging.debug("image ownership: %s" % image_ownership) + if image_ownership != exp_ownership: + test.fail("The ownership {} is not expected, it should be {}." + .format(image_ownership, exp_ownership)) + + +def check_vm_state_after_abort(vm_name, vm_state_after_abort, src_uri, dest_uri, test): + """ + Check the VM state after domjobabort the migration + + :param vm_name: str, vm name + :param vm_state_after_abort: str, like "{'source': 'running', 'target': 'nonexist'}" + source: local host, target: remote host + :param src_uri: uri for source host + :param dest_uri: uri for target host + :param test: test object + """ + state_dict = eval(vm_state_after_abort) + logging.debug("Check guest state should be {} on source host".format(state_dict['source'])) + libvirt.check_vm_state(vm_name, state=state_dict['source'], uri=src_uri) + logging.debug("Check guest persistent on source host") + cmd_res = virsh.domstats(vm_name, '--list-persistent', debug=True, ignore_status=False) + if not cmd_res.stdout_text.count(vm_name): + test.fail("The guest is expected to be persistent on source host, but it isn't") + logging.debug("Check guest state should be {} on target host".format(state_dict['target'])) + if state_dict['target'] == 'nonexist': + if virsh.domain_exists(vm_name, uri=dest_uri): + test.fail("The domain on target host is found, but expected not") + else: + libvirt.check_vm_state(vm_name, state=state_dict['target'], uri=dest_uri) + + +def run(test, params, env): + """ + Run the test + + :param test: test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + libvirt_version.is_libvirt_feature_supported(params) + + vm_name = params.get("migrate_main_vm") + vm = env.get_vm(vm_name) + vm.verify_alive() + bk_uri = vm.connect_uri + + migration_test = migration.MigrationTest() + migration_test.check_parameters(params) + extra_args = migration_test.update_virsh_migrate_extra_args(params) + + extra = params.get("virsh_migrate_extra") + postcopy_options = params.get("postcopy_options") + if postcopy_options: + extra = "%s %s" % (extra, postcopy_options) + params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( + params.get("migrate_dest_host")) + dest_uri = params.get("virsh_migrate_desturi") + options = params.get("virsh_migrate_options", + "--live --p2p --persistent --verbose") + virsh_options = params.get("virsh_options", "") + action_during_mig = params.get("action_during_mig") + migrate_speed = params.get("migrate_speed") + migrate_again = "yes" == params.get("migrate_again", "no") + vm_state_after_abort = params.get("vm_state_after_abort") + + kill_service = "yes" == params.get("kill_service", "no") + expected_image_ownership = params.get("expected_image_ownership") + service_name = params.get("service_name", "libvirtd") + service_on_dst = "yes" == params.get("service_on_dst", "no") + server_ip = params.get("remote_ip") + server_user = params.get("remote_user", "root") + server_pwd = params.get("remote_pwd") + + # For safety reasons, we'd better back up xmlfile. + new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + orig_config_xml = new_xml.copy() + + try: + # Update guest disk xml + libvirt.set_vm_disk(vm, params) + + logging.debug("Guest xml after starting:\n%s", + vm_xml.VMXML.new_from_dumpxml(vm_name)) + + vm.wait_for_login().close() + + if kill_service: + check_image_ownership(vm_name, expected_image_ownership, test) + if service_name == "libvirtd": + if service_on_dst: + remote_session = remote.wait_for_login('ssh', server_ip, '22', + server_user, server_pwd, + r"[\#\$]\s*$") + service_name = utils_libvirtd.Libvirtd(session=remote_session).service_name + remote_session.close() + else: + service_name = utils_libvirtd.Libvirtd().service_name + params.update({'service_name': service_name}) + + if migrate_speed: + mode = 'both' if '--postcopy' in postcopy_options else 'precopy' + migration_test.control_migrate_speed(vm_name, + int(migrate_speed), + mode) + + if action_during_mig: + action_during_mig = migration_base.parse_funcs(action_during_mig, + test, params) + + # Execute migration process + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, extra, + action_during_mig, + extra_args) + + func_returns = dict(migration_test.func_ret) + migration_test.func_ret.clear() + logging.debug("Migration returns function results:%s", func_returns) + + if vm_state_after_abort: + check_vm_state_after_abort(vm_name, vm_state_after_abort, + bk_uri, dest_uri, test) + + if kill_service: + check_image_ownership(vm_name, expected_image_ownership, test) + + if migrate_again: + action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'), + test, params) + extra_args['status_error'] = params.get("migrate_again_status_error", "no") + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, + extra, action_during_mig, + extra_args) + if int(migration_test.ret.exit_status) == 0: + migration_test.post_migration_check([vm], params, uri=dest_uri) + finally: + logging.info("Recover test environment") + vm.connect_uri = bk_uri + # Clean VM on destination and source + migration_test.cleanup_vm(vm, dest_uri) + + orig_config_xml.sync() diff --git a/provider/migration/migration_base.py b/provider/migration/migration_base.py index 8e873379f5..8d6a5afd12 100644 --- a/provider/migration/migration_base.py +++ b/provider/migration/migration_base.py @@ -4,6 +4,7 @@ import signal # pylint: disable=W0611 from virttest import virsh # pylint: disable=W0611 +from virttest import utils_misc # pylint: disable=W0611 from virttest.utils_conn import TLSConnection from virttest.utils_libvirt import libvirt_network # pylint: disable=W0611 From 498376121702f4ad6c133dce733640fb8257bb08 Mon Sep 17 00:00:00 2001 From: Yan Fu Date: Wed, 17 Nov 2021 10:41:10 +0800 Subject: [PATCH 19/58] Modify to start 'virtnetworkd' to do the test in the modular daemon mode 1.Keep using 'libvirtd' in legacy libvirtd mode; 2.Using 'virtnetworkd' in modular daemon mode; Signed-off-by: Yan Fu --- libvirt/tests/src/libvirtd_start.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libvirt/tests/src/libvirtd_start.py b/libvirt/tests/src/libvirtd_start.py index bf7ee48412..2fa42b1a20 100644 --- a/libvirt/tests/src/libvirtd_start.py +++ b/libvirt/tests/src/libvirtd_start.py @@ -119,6 +119,7 @@ def _check_errors(): errors = [] # Run libvirt session and collect errors in log. libvirtd_session = utils_libvirtd.LibvirtdSession( + service_name="virtnetworkd", logging_handler=_error_handler, logging_params=(errors,), logging_pattern=r'[-\d]+ [.:+\d]+ [:\d]+ error :', @@ -143,6 +144,8 @@ def _check_errors(): _check_errors() finally: logging.info('Recovering services status') + #Restart socket service after starting process at foreground + utils_libvirtd.Libvirtd("virtnetworkd.socket").restart() # If service do not exists, then backup status and current status # will all be none and nothing will be done if service_mgr.status('iptables') != backup_iptables_status: From 7fc3c6740d7d5b5e60aadaef6df487c415c3419d Mon Sep 17 00:00:00 2001 From: chunfuwen Date: Tue, 19 Oct 2021 20:58:44 -0400 Subject: [PATCH 20/58] Fix gluster disk device detach event issue When detach gluster disk device, device-removed may not be sent out if VM not completely booted Signed-off-by: chunfuwen --- libvirt/tests/src/virtual_disks/virtual_disks_gluster.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py b/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py index 30ff03c0ca..5938500f67 100644 --- a/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py +++ b/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py @@ -207,6 +207,7 @@ def test_pmsuspend(vm_name): if start_vm: if vm.is_dead(): vm.start() + vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() From e52b5647cfa7860ea15ef709f4e09aba8f0fcfd3 Mon Sep 17 00:00:00 2001 From: chunfuwen Date: Thu, 18 Nov 2021 01:42:14 -0500 Subject: [PATCH 21/58] Fix change media matrix event not received issue Regarding hotplug device changes, it need ensure VM is completedly booted Signed-off-by: chunfuwen --- libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py b/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py index a57c6cee60..b13b60c511 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py @@ -298,6 +298,8 @@ def check_result(vm_name, disk_source, disk_type, disk_target, wait_for_event = False else: wait_for_event = True + if vm.is_alive(): + vm.wait_for_login().close() ret = virsh.change_media(vm_ref, target_device, all_options, wait_for_event=wait_for_event, event_timeout=40, From 00322ceab59def7ba797172874d002a0471febc8 Mon Sep 17 00:00:00 2001 From: lcheng Date: Fri, 19 Nov 2021 00:27:13 +0800 Subject: [PATCH 22/58] virsh_qemu_monitor_command: Update error message From libvirt-7.9.0, run invalid qemu_monitor_command will return 0. For libvirt-7.9.0: # virsh qemu-monitor-command vm1 --cmd '{system_reset}' {"id":"libvirt-390","error":{"class":"CommandNotFound","desc":"The command {system_reset} has not been found"}} # echo $? 0 For libvirt-7.8.0: # virsh qemu-monitor-command vm1 --cmd '{system_reset}' error: internal error: cannot parse json {system_reset}: lexical error: invalid char in json text. {system_reset} (right here) ------^ # echo $? 1 Signed-off-by: lcheng --- .../tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py b/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py index af4c23a1b0..688aabf34a 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py @@ -58,7 +58,8 @@ def run(test, params, env): if status_error: if not status: # Return status is 0 with unknown command - if "unknown command:" in output: + # From libvirt-7.9.0, return status is 0 with CommandNotFound + if "unknown command:" in output or "CommandNotFound" in output: logging.debug("Command failed: %s" % output) else: test.fail("Expect fail, but run successfully.") From d3c114cac5979ffc08bf0972184c3ca0b0e4c537 Mon Sep 17 00:00:00 2001 From: Meina Li Date: Thu, 18 Nov 2021 22:38:49 -0500 Subject: [PATCH 23/58] virsh_blockcopy.py: remove libvirtd status check to adapt to the split daemon mode Signed-off-by: Meina Li --- libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py index a602d77bdb..b72438db41 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py @@ -443,7 +443,7 @@ def run(test, params, env): extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} - libvirtd_utl = utils_libvirtd.Libvirtd() + libvirtd_utl = utils_libvirtd.Libvirtd('virtqemud') libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_daemons.log") libvirtd_conf_dict = {"log_filter": '"3:json 1:libvirt 1:qemu"', "log_outputs": '"1:file:%s"' % libvirtd_log_path} From 2b67f9dbb3b473aaa3db274e5d70d0e721b92dc1 Mon Sep 17 00:00:00 2001 From: Yingshun Cui Date: Fri, 29 Oct 2021 16:04:00 +0800 Subject: [PATCH 24/58] vDPA: Add a case to test domain lifecycle This PR adds below case: RHEL-196259: Test vm lifecycle with vdpa type inteface Signed-off-by: Yingshun Cui --- .../virtual_interface/domain_lifecycle.cfg | 14 ++ .../src/virtual_interface/domain_lifecycle.py | 140 ++++++++++++++++++ provider/interface/check_points.py | 32 ++++ provider/interface/vdpa_base.py | 17 ++- 4 files changed, 197 insertions(+), 6 deletions(-) create mode 100644 libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg create mode 100644 libvirt/tests/src/virtual_interface/domain_lifecycle.py create mode 100644 provider/interface/check_points.py diff --git a/libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg b/libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg new file mode 100644 index 0000000000..634bda04c0 --- /dev/null +++ b/libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg @@ -0,0 +1,14 @@ +- iface.domain_lifecycle: + type = domain_lifecycle + start_vm = no + + variants dev_type: + - vdpa: + only x86_64 + func_supported_since_libvirt_ver = (7, 3, 0) + func_supported_since_qemu_kvm_ver = (6, 0, 0) + iface_dict = {"source": {'dev':'/dev/vhost-vdpa-0'}} + save_error = "yes" + variants test_target: + - simulator: + - mellanox: diff --git a/libvirt/tests/src/virtual_interface/domain_lifecycle.py b/libvirt/tests/src/virtual_interface/domain_lifecycle.py new file mode 100644 index 0000000000..a8d8277fbf --- /dev/null +++ b/libvirt/tests/src/virtual_interface/domain_lifecycle.py @@ -0,0 +1,140 @@ +import logging +import os + +from virttest import data_dir +from virttest import libvirt_version +from virttest import utils_misc +from virttest import utils_vdpa +from virttest import virsh +from virttest.libvirt_xml import vm_xml +from virttest.staging import service +from virttest.utils_libvirt import libvirt_vmxml +from virttest.utils_test import libvirt + +from provider.interface import interface_base +from provider.interface import check_points + +VIRSH_ARGS = {'debug': True, 'ignore_status': False} + + +def run(test, params, env): + """ + Test domain lifecycle + """ + + def setup_default(): + """ + Default setup + """ + logging.debug("Remove VM's interface devices.") + libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') + + def teardown_default(): + """ + Default cleanup + """ + pass + + def setup_vdpa(): + """ + Setup vDPA environment + """ + setup_default() + test_env_obj = None + if test_target == "simulator": + test_env_obj = utils_vdpa.VDPASimulatorTest() + else: + pf_pci = utils_vdpa.get_vdpa_pci() + test_env_obj = utils_vdpa.VDPAOvsTest(pf_pci) + test_env_obj.setup() + return test_env_obj + + def teardown_vdpa(): + """ + Cleanup vDPA environment + """ + if test_target != "simulator": + service.Factory.create_service("NetworkManager").restart() + if test_obj: + test_obj.cleanup() + + def run_test(dev_type, params, test_obj=None): + """ + Test domain lifecycle + + 1) Start the vm and check network + 2) Destroy and start the VM, and check network + 3) Save and restore, and check network + 4) Suspend and resume, and check network + 5) Reboot the VM and check the network + """ + # Setup Iface device + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + iface_dict = eval(params.get('iface_dict', '{}')) + iface_dev = interface_base.create_iface(dev_type, iface_dict) + libvirt.add_vm_device(vmxml, iface_dev) + + logging.info("Start a VM with a '%s' type interface.", dev_type) + vm.start() + vm.wait_for_serial_login(timeout=240).close() + check_points.check_network_accessibility(vm, test_obj=test_obj, **params) + + logging.info("Destroy and start the VM.") + virsh.destroy(vm.name, **VIRSH_ARGS) + virsh.start(vm.name, **VIRSH_ARGS) + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=True, **params) + + logging.info("Save the VM.") + save_error = "yes" == params.get("save_error", "no") + save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save') + res = virsh.save(vm.name, 'sss', debug=True) + libvirt.check_exit_status(res, expect_error=save_error) + if not save_error: + logging.info("Restore vm.") + virsh.restore(save_path, **VIRSH_ARGS) + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=False, **params) + + logging.info("Suspend and resume the vm.") + virsh.suspend(vm.name, **VIRSH_ARGS) + if not libvirt.check_vm_state(vm_name, "paused"): + test.fail("VM should be paused!") + virsh.resume(vm.name, **VIRSH_ARGS) + if not libvirt.check_vm_state(vm_name, "running"): + test.fail("VM should be running!") + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=False, **params) + + logging.debug("Reboot VM and check network.") + virsh.reboot(vm.name, **VIRSH_ARGS) + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=False, **params) + + libvirt_version.is_libvirt_feature_supported(params) + utils_misc.is_qemu_function_supported(params) + + # Variable assignment + test_target = params.get('test_target', '') + dev_type = params.get('dev_type', '') + + vm_name = params.get('main_vm') + vm = env.get_vm(vm_name) + + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + backup_vmxml = vmxml.copy() + + setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \ + locals() else setup_default + teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \ + dev_type in locals() else teardown_default + + test_obj = None + try: + # Execute test + test_obj = setup_test() + run_test(dev_type, params, test_obj=test_obj) + + finally: + backup_vmxml.sync() + teardown_test() diff --git a/provider/interface/check_points.py b/provider/interface/check_points.py new file mode 100644 index 0000000000..5428f50a3f --- /dev/null +++ b/provider/interface/check_points.py @@ -0,0 +1,32 @@ +import logging +from avocado.core import exceptions + +from provider.interface import vdpa_base + + +def check_network_accessibility(vm, **kwargs): + """ + Check VM's network accessibility + + :param vm: VM object + """ + if kwargs.get("recreate_vm_session", "yes") == "yes": + logging.debug("Recreating vm session...") + vm.cleanup_serial_console() + vm.create_serial_console() + vm_session = vm.wait_for_serial_login() + else: + vm_session = vm.session + + dev_type = kwargs.get("dev_type") + if dev_type == "vdpa": + br_name = None + config_vdpa = True + test_target = kwargs.get("test_target") + if test_target == "mellanox": + if not kwargs.get("test_obj"): + raise exceptions.TestError("test_obj must be assigned!") + br_name = kwargs.get("test_obj").br_name + config_vdpa = kwargs.get("config_vdpa", True) + vdpa_base.check_vdpa_conn( + vm_session, test_target, br_name, config_vdpa=config_vdpa) diff --git a/provider/interface/vdpa_base.py b/provider/interface/vdpa_base.py index ca431b1f0f..7006e0eaf1 100644 --- a/provider/interface/vdpa_base.py +++ b/provider/interface/vdpa_base.py @@ -31,13 +31,14 @@ def config_vdpa_conn(vm_session, vm_iface, br_name, .format(vm_iface, vm_ip, cidr)) vm_session.cmd(cmd) logging.debug("Set ip address of the bridge.") - cmd = ("ip addr add {0}/{1} dev {2}; sleep 5;ip link set {2} up" - .format(br_ip_addr, cidr, br_name)) + cmd = ("ip addr del {0}/{1} dev {2}; sleep 5; ip addr add {0}/{1} dev {2};" + "sleep 5;ip link set {2} up".format(br_ip_addr, cidr, br_name)) process.run(cmd, shell=True) def check_vdpa_network(vm_session, vm_iface, br_name, - ping_dest="100.100.100.100"): + ping_dest="100.100.100.100", + config_vdpa=True): """ Check vdpa network connection @@ -45,8 +46,10 @@ def check_vdpa_network(vm_session, vm_iface, br_name, :param vm_iface: VM's interface :param br_name: Bridge name :param ping_dest: The ip address to ping + :config_vdpa: Whether to config vDPA connection """ - config_vdpa_conn(vm_session, vm_iface, br_name) + if config_vdpa: + config_vdpa_conn(vm_session, vm_iface, br_name) if not utils_misc.wait_for(lambda: not utils_test.ping( ping_dest, count=3, timeout=5, output_func=logging.debug, @@ -75,16 +78,18 @@ def check_rx_tx_packages(vm_session, vm_iface): raise exceptions.TestFail("The value of rx and tx should be same.") -def check_vdpa_conn(vm_session, test_target, br_name=None): +def check_vdpa_conn(vm_session, test_target, br_name=None, config_vdpa=True): """ Check vDPA connection :param vm_session: An session to VM :param test_target: Test target env, eg, "mellanox" or "simulator" :param br_name: Bridge name + :config_vdpa: Whether to config vDPA connection """ vm_iface = interface_base.get_vm_iface(vm_session) if test_target == "mellanox": - check_vdpa_network(vm_session, vm_iface, br_name) + check_vdpa_network(vm_session, vm_iface, br_name, + config_vdpa=config_vdpa) elif test_target == "simulator": check_rx_tx_packages(vm_session, vm_iface) From 4dfa358d2f2e187c39726eb8bf9bfe111256c70f Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Mon, 22 Nov 2021 03:38:13 -0500 Subject: [PATCH 25/58] virsh_snapshot_disk: Fix No more available PCI slot Signed-off-by: Liu Yiding --- libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py index 9cf8404545..d422e8a058 100644 --- a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py +++ b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py @@ -14,6 +14,7 @@ from virttest import utils_libvirtd from virttest import gluster from virttest.utils_test import libvirt as utlv +from virttest.utils_libvirt import libvirt_pcicontr from virttest import libvirt_version @@ -189,6 +190,8 @@ def run(test, params, env): extra = "--persistent --subdriver %s" % image_format if not multi_gluster_disks: + # Fix No more available PCI slots + libvirt_pcicontr.reset_pci_num(vm_name, 15) # Do the attach action. out = process.run("qemu-img info %s" % img_path, shell=True) logging.debug("The img info is:\n%s" % out.stdout.strip()) From b3b84842f24e49f7d2dc36aadfb1dc71adaa46a5 Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Mon, 22 Nov 2021 04:47:10 -0500 Subject: [PATCH 26/58] numa_numanode_cpu_info: Run bash cmd in subshell '>' is a shell construct meaning that the command needs to be run as input to a shell. ``` echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages ``` If we don't set shell=True, process just echo this cmd instead of writing the nr_hugepages. Signed-off-by: Liu Yiding --- libvirt/tests/src/numa/numa_numanode_cpu_info.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libvirt/tests/src/numa/numa_numanode_cpu_info.py b/libvirt/tests/src/numa/numa_numanode_cpu_info.py index 4c0d013a3b..adcecaf54e 100644 --- a/libvirt/tests/src/numa/numa_numanode_cpu_info.py +++ b/libvirt/tests/src/numa/numa_numanode_cpu_info.py @@ -40,13 +40,13 @@ def setup_host(online_nodes, pages_list): for pages in pages_list: ret = process.run( 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(pages, online_nodes[index])) + format(pages, online_nodes[index]), shell=True) if ret.exit_status: raise TestError('Cannot set {} hugepages on node {}'. format(pages, online_nodes[index])) ret = process.run( 'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(online_nodes[index])) + format(online_nodes[index]), shell=True) if pages not in ret.stdout_text: raise TestError('Setting {} hugepages on node {} was unsuccessful'. format(pages, online_nodes[index])) From 4990fe53363585f47174f06d9dd61e8c6eef9fa3 Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Mon, 22 Nov 2021 05:55:30 -0500 Subject: [PATCH 27/58] numa_numanode_cpu_info: Add cleanup for node page set Signed-off-by: Liu Yiding --- libvirt/tests/src/numa/numa_numanode_cpu_info.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/libvirt/tests/src/numa/numa_numanode_cpu_info.py b/libvirt/tests/src/numa/numa_numanode_cpu_info.py index adcecaf54e..dc1be68fde 100644 --- a/libvirt/tests/src/numa/numa_numanode_cpu_info.py +++ b/libvirt/tests/src/numa/numa_numanode_cpu_info.py @@ -28,16 +28,21 @@ def update_xml(vm_name, online_nodes, params): vmxml.sync() -def setup_host(online_nodes, pages_list): +def setup_host(online_nodes, pages_list, ori_page_set): """ Setup host for test - update number of hugepages and check :param online_nodes: List of all online nodes with memory available :param pages_list: List of required number of pages for particular nodes + :param ori_page_set: A dict used to save original node page """ index = 0 if len(online_nodes) > 2: for pages in pages_list: + ori_page_set[online_nodes[index]] = process.run( + 'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. + format(online_nodes[index]), shell=True).stdout_text.strip() + logging.debug("ori_page_set is {}".format(ori_page_set)) ret = process.run( 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. format(pages, online_nodes[index]), shell=True) @@ -67,7 +72,8 @@ def run(test, params, env): backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_info = utils_misc.NumaInfo() online_nodes = numa_info.get_online_nodes_withmem() - setup_host(online_nodes, pages_list) + ori_page_set = {} + setup_host(online_nodes, pages_list, ori_page_set) try: if vm.is_alive(): vm.destroy() @@ -80,4 +86,8 @@ def run(test, params, env): except Exception as e: test.error("Unexpected error: {}".format(e)) finally: + for node_index, ori_page in ori_page_set.items(): + process.run( + 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. + format(ori_page, node_index), shell=True) backup_xml.sync() From a01310bfc63a984c8ee9f3c8c7c4b36dbc290218 Mon Sep 17 00:00:00 2001 From: Xiaodai Wang Date: Mon, 22 Nov 2021 16:59:59 +0800 Subject: [PATCH 28/58] Revert ""v2v: modify dir location for -o json"" This reverts commit 11e1f0d7037af8a55179d0d1328afe19db0e2e14. --- v2v/tests/cfg/function_test_esx.cfg | 1 - v2v/tests/src/function_test_esx.py | 10 ---------- 2 files changed, 11 deletions(-) diff --git a/v2v/tests/cfg/function_test_esx.cfg b/v2v/tests/cfg/function_test_esx.cfg index 5c31c0783f..736a64d288 100644 --- a/v2v/tests/cfg/function_test_esx.cfg +++ b/v2v/tests/cfg/function_test_esx.cfg @@ -39,7 +39,6 @@ - json: only dest_json only uefi.win2019,device_map,without_ip_option,env_leak,block_dev - base_os_directory="/var/lib/libvirt/images" variants: - default: no block_dev diff --git a/v2v/tests/src/function_test_esx.py b/v2v/tests/src/function_test_esx.py index 59b2ed8a9c..3bb0466def 100644 --- a/v2v/tests/src/function_test_esx.py +++ b/v2v/tests/src/function_test_esx.py @@ -781,14 +781,6 @@ def vm_check(status_error): if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') - # Create json output dir - if output_mode == 'json': - base_os_directory = params_get(params, 'base_os_directory') - os_directory = None - os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_', dir=base_os_directory) - logging.info("-os dir is %s", os_directory) - params['os_directory'] = os_directory.name - if 'root' in checkpoint and 'ask' in checkpoint: v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '2') @@ -1026,8 +1018,6 @@ def vm_check(status_error): v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') - if output_mode == 'json' and os_directory: - os_directory.cleanup() if 'with_proxy' in checkpoint: logging.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') From 089c29db665b0e1e1a38cba1a73d3c85316e4986 Mon Sep 17 00:00:00 2001 From: Xiaodai Wang Date: Tue, 23 Nov 2021 10:23:00 +0800 Subject: [PATCH 29/58] v2v: cleanup the os_directory content at the end If os_directory was set in cfg file, the directory would not be cleaned up correctly. This patch fixes the issue. Signed-off-by: Xiaodai Wang --- v2v/tests/src/function_test_esx.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/v2v/tests/src/function_test_esx.py b/v2v/tests/src/function_test_esx.py index 59b2ed8a9c..8846a6dba5 100644 --- a/v2v/tests/src/function_test_esx.py +++ b/v2v/tests/src/function_test_esx.py @@ -1034,5 +1034,7 @@ def vm_check(status_error): os.environ.pop('https_proxy') if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) + if params.get('os_directory') and os.path.isdir(params['os_directory']): + shutil.rmtree(params['os_directory'], ignore_errors=True) # Cleanup constant files utils_v2v.cleanup_constant_files(params) From 7a584b8b162bf7381bafab04c2a111a3db852770 Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Tue, 23 Nov 2021 11:29:12 +0800 Subject: [PATCH 30/58] nwfilter_binding_dumpxml: Fix No more available PCI slot Signed-off-by: Liu Yiding --- libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py b/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py index a4d9d02f92..0d94294965 100644 --- a/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py +++ b/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py @@ -6,6 +6,7 @@ from virttest import data_dir from virttest.utils_test import libvirt as utlv from virttest.libvirt_xml.devices import interface +from virttest.utils_libvirt import libvirt_pcicontr from avocado.utils import process @@ -67,8 +68,11 @@ def set_env(): set two interface with different network filter and change interface type """ - virsh.attach_interface(vm_name, option) + # Add enough PCI to attach interface + libvirt_pcicontr.reset_pci_num(vm_name) + virsh.attach_interface(vm_name, option, debug=True) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) + logging.debug("Guest xml is {}".format(vmxml)) devices = vmxml.get_devices('interface') iface_xml = devices[0] iface_xml_2 = devices[1] From abe110bc88e2040ee1e1df4c708751e32bedccc8 Mon Sep 17 00:00:00 2001 From: lcheng Date: Tue, 23 Nov 2021 16:26:56 +0800 Subject: [PATCH 31/58] live_migration: Add case about abort migration RHEL7-17410 - [Migration][domjobabort] Abort migration in PerformPhase on source host and migrate again - non-p2p migration Signed-off-by: lcheng --- libvirt/tests/cfg/migration/live_migration.cfg | 6 +++++- libvirt/tests/src/migration/live_migration.py | 8 +++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/libvirt/tests/cfg/migration/live_migration.cfg b/libvirt/tests/cfg/migration/live_migration.cfg index f94163685e..f94d20ad02 100644 --- a/libvirt/tests/cfg/migration/live_migration.cfg +++ b/libvirt/tests/cfg/migration/live_migration.cfg @@ -36,6 +36,9 @@ - p2p_live_undefinesource: only domjobabort virsh_migrate_options = "--live --p2p --persistent --undefinesource --verbose" + - non_p2p_live_undefinesource: + only domjobabort + virsh_migrate_options = "--live --persistent --undefinesource --verbose" variants: - migrateuri: migrate_speed = 15 @@ -96,7 +99,8 @@ err_msg = 'operation aborted: migration out job: canceled by client' migrate_again_status_error = 'no' vm_state_after_abort = "{'source': 'running', 'target': 'nonexist'}" - migrate_speed = 10 + migrate_speed = 1 + migrate_speed_again = 20 check_local_port = 'yes' return_port = 'yes' action_during_mig = '[{"func": "libvirt_network.check_established", "after_event": "iteration: '1'", "func_param": 'params'}, {"func": "virsh.domjobabort", "after_event": "iteration: '1'", "func_param": "'%s' % params.get('migrate_main_vm')"}]' diff --git a/libvirt/tests/src/migration/live_migration.py b/libvirt/tests/src/migration/live_migration.py index 3d8fe721f6..aa23eafadb 100644 --- a/libvirt/tests/src/migration/live_migration.py +++ b/libvirt/tests/src/migration/live_migration.py @@ -143,6 +143,7 @@ def run(test, params, env): stress_package = params.get("stress_package") action_during_mig = params.get("action_during_mig") migrate_speed = params.get("migrate_speed") + migrate_speed_again = params.get("migrate_speed_again") migrate_again = "yes" == params.get("migrate_again", "no") vm_state_after_abort = params.get("vm_state_after_abort") return_port = "yes" == params.get("return_port", "no") @@ -235,8 +236,8 @@ def run(test, params, env): if stress_package: migration_test.run_stress_in_vm(vm, params) + mode = 'both' if '--postcopy' in postcopy_options else 'precopy' if migrate_speed: - mode = 'both' if '--postcopy' in postcopy_options else 'precopy' migration_test.control_migrate_speed(vm_name, int(migrate_speed), mode) @@ -278,6 +279,11 @@ def run(test, params, env): params, test)) + if migrate_speed_again: + migration_test.control_migrate_speed(vm_name, + int(migrate_speed_again), + mode) + migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, From c26c340dc76c9746a2f43afe9020f5559235fb4d Mon Sep 17 00:00:00 2001 From: lcheng Date: Tue, 23 Nov 2021 18:23:14 +0800 Subject: [PATCH 32/58] migrate_vm: Fix variable 'uri' reference issue Fix error: UnboundLocalError: local variable 'uri' referenced before assignment Signed-off-by: lcheng --- libvirt/tests/src/migration/migrate_vm.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libvirt/tests/src/migration/migrate_vm.py b/libvirt/tests/src/migration/migrate_vm.py index f61e8c48cc..8e48a339c1 100644 --- a/libvirt/tests/src/migration/migrate_vm.py +++ b/libvirt/tests/src/migration/migrate_vm.py @@ -1189,6 +1189,7 @@ def get_target_hugepage_num(params): test_dict = dict(params) vm_name = test_dict.get("main_vm") vm = env.get_vm(vm_name) + uri = params.get("desuri") start_vm = test_dict.get("start_vm", "no") transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") From 47cefca2087b1624d617cc2c856dd0fa592c4983 Mon Sep 17 00:00:00 2001 From: Meina Li Date: Wed, 24 Nov 2021 03:20:01 -0500 Subject: [PATCH 33/58] virsh_vol_create.py: fix secret issue in storage volume with acl test Signed-off-by: Meina Li --- libvirt/tests/src/virsh_cmd/volume/virsh_vol_create.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libvirt/tests/src/virsh_cmd/volume/virsh_vol_create.py b/libvirt/tests/src/virsh_cmd/volume/virsh_vol_create.py index 56929b90c1..8acfe08dce 100644 --- a/libvirt/tests/src/virsh_cmd/volume/virsh_vol_create.py +++ b/libvirt/tests/src/virsh_cmd/volume/virsh_vol_create.py @@ -369,7 +369,7 @@ def attach_volume_disk(vm_name, src_pool_name, vol_name, params): for index, v in enumerate(rule): if v.find("secret") >= 0: nextline = rule[index + 1] - s = nextline.replace("QEMU", "secret").replace( + s = re.sub("QEMU|storage", "secret", nextline).replace( "pool_name", "secret_uuid").replace( "virt-dir-pool", "%s" % luks_secret_uuid) rule[index + 1] = s From e997800a07762d23144bba4800b5c04d8b06ffeb Mon Sep 17 00:00:00 2001 From: lcheng Date: Thu, 18 Nov 2021 00:08:25 +0800 Subject: [PATCH 34/58] migrate_with_various_hostname: Add 3 cases about set hostname 1. [RHEL-123618] - [Migration] Do live VM migration - source and dest host with hostname "localhost.localdomain" 2. [RHEL-123620] - [Migration] Do live VM migration - source and dest with FQDN hostname(with "." in hostname) 3. [RHEL-123619] - [Migration] Do live VM migration - source and dest host with short hostname(no "." in hostname) Signed-off-by: lcheng --- .../migrate_with_various_hostname.cfg | 47 +++++ .../migrate_with_various_hostname.py | 173 ++++++++++++++++++ 2 files changed, 220 insertions(+) create mode 100644 libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg create mode 100644 libvirt/tests/src/migration/migrate_with_various_hostname.py diff --git a/libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg b/libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg new file mode 100644 index 0000000000..6080891484 --- /dev/null +++ b/libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg @@ -0,0 +1,47 @@ +- virsh.migrate_with_various_hostname: + type = migrate_with_various_hostname + migration_setup = "yes" + storage_type = 'nfs' + setup_local_nfs = 'yes' + disk_type = "file" + disk_source_protocol = "netfs" + mnt_path_name = ${nfs_mount_dir} + # Console output can only be monitored via virsh console output + only_pty = True + take_regular_screendumps = no + # Extra options to pass after + virsh_migrate_extra = "" + # SSH connection time out + ssh_timeout = 60 + # Local URI + virsh_migrate_connect_uri = "qemu:///system" + image_convert = 'no' + variants: + - without_postcopy: + postcopy_options = "" + variants: + - non_p2p_live: + virsh_migrate_options = "--live --verbose" + - p2p_live: + virsh_migrate_options = "--live --p2p --verbose" + variants: + - set_src_and_dst_hostname: + set_src_and_dst_hostname = 'yes' + status_error = 'yes' + migrate_again = 'yes' + migrate_again_status_error = 'no' + virsh_migrate_extra_mig_again = "--migrateuri tcp://${migrate_dest_host}" + virsh_migrate_src_state = "shut off" + variants: + - short: + src_hostname = "srctest" + dst_hostname = "dsttest" + err_msg = 'Name or service not known' + - fqdn: + src_hostname = "src.test.redhat.com" + dst_hostname = "dst.test.redhat.com" + err_msg = 'Name or service not known' + - localhost: + src_hostname = "localhost.localdomain" + dst_hostname = "localhost.localdomain" + err_msg = 'hostname on destination resolved to localhost, but migration requires an FQDN' diff --git a/libvirt/tests/src/migration/migrate_with_various_hostname.py b/libvirt/tests/src/migration/migrate_with_various_hostname.py new file mode 100644 index 0000000000..85c3e37f32 --- /dev/null +++ b/libvirt/tests/src/migration/migrate_with_various_hostname.py @@ -0,0 +1,173 @@ +import logging + +from avocado.utils import process + +from virttest import libvirt_vm +from virttest import migration +from virttest import remote as remote_old +from virttest import libvirt_version +from virttest import utils_libvirtd + +from virttest.libvirt_xml import vm_xml +from virttest.utils_test import libvirt + +from provider.migration import migration_base + + +def get_hostname(test, remote_params=None): + """ + Get hostname for source or dest host + + :param test: test object + :param remote_params: Dict of remote host parameters, which should + include: server_ip, server_user, server_pwd + """ + cmd = "hostname" + if remote_params: + ret = remote_old.run_remote_cmd(cmd, remote_params, ignore_status=False) + else: + ret = process.run(cmd, ignore_status=False, shell=True) + output = ret.stdout_text.strip() + if ret.exit_status: + test.fail("Failed to run '%s': %s" % (cmd, output)) + logging.info("Get hostname: %s" % output) + return output + + +def set_hostname(hostname, test, remote_params=None): + """ + Set hostname for source or dest host + + :param hostname: string, hostname + :param test: test object + :param remote_params: Dict of remote host parameters, which should + include: server_ip, server_user, server_pwd + """ + cmd = "hostnamectl set-hostname %s" % hostname + if remote_params: + ret = remote_old.run_remote_cmd(cmd, remote_params, ignore_status=False) + else: + ret = process.run(cmd, ignore_status=False, shell=True) + output = ret.stdout_text.strip() + if ret.exit_status: + test.fail("Failed to run '%s': %s" % (cmd, output)) + logging.info("Set hostname: %s" % hostname) + + +def run(test, params, env): + """ + Run the test + + :param test: test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + libvirt_version.is_libvirt_feature_supported(params) + + vm_name = params.get("migrate_main_vm") + vm = env.get_vm(vm_name) + vm.verify_alive() + bk_uri = vm.connect_uri + + migration_test = migration.MigrationTest() + migration_test.check_parameters(params) + extra_args = migration_test.update_virsh_migrate_extra_args(params) + + extra = params.get("virsh_migrate_extra") + postcopy_options = params.get("postcopy_options") + if postcopy_options: + extra = "%s %s" % (extra, postcopy_options) + params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( + params.get("migrate_dest_host")) + dest_uri = params.get("virsh_migrate_desturi") + options = params.get("virsh_migrate_options", + "--live --p2p --persistent --verbose") + virsh_options = params.get("virsh_options", "") + migrate_again = "yes" == params.get("migrate_again", "no") + src_state = params.get("virsh_migrate_src_state", "shut off") + set_src_and_dst_hostname = "yes" == params.get("set_src_and_dst_hostname", "no") + src_hostname = params.get("src_hostname") + dst_hostname = params.get("dst_hostname") + server_ip = params.get("remote_ip") + server_user = params.get("remote_user", "root") + server_pwd = params.get("remote_pwd") + server_params = {'server_ip': server_ip, + 'server_user': server_user, + 'server_pwd': server_pwd} + + dst_session = None + dst_libvirtd = None + src_libvirtd = None + + # For safety reasons, we'd better back up xmlfile. + new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + orig_config_xml = new_xml.copy() + + try: + # Update guest disk xml + libvirt.set_vm_disk(vm, params) + + if set_src_and_dst_hostname: + old_dst_hostname = get_hostname(test, remote_params=server_params) + set_hostname(dst_hostname, test, remote_params=server_params) + dst_session = remote_old.wait_for_login('ssh', server_ip, '22', + server_user, server_pwd, + r"[\#\$]\s*$") + dst_libvirtd = utils_libvirtd.Libvirtd(session=dst_session) + dst_libvirtd.restart() + old_source_hostname = get_hostname(test) + set_hostname(src_hostname, test) + src_libvirtd = utils_libvirtd.Libvirtd() + src_libvirtd.restart() + + if not vm.is_alive(): + vm.start() + + logging.debug("Guest xml after starting:\n%s", + vm_xml.VMXML.new_from_dumpxml(vm_name)) + + vm.wait_for_login() + + # Execute migration process + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, extra, + None, + extra_args) + + func_returns = dict(migration_test.func_ret) + migration_test.func_ret.clear() + logging.debug("Migration returns function results:%s", func_returns) + + if migrate_again: + if not vm.is_alive(): + vm.start() + vm.wait_for_login() + extra_args['status_error'] = params.get("migrate_again_status_error", "no") + + if params.get("virsh_migrate_extra_mig_again"): + extra = params.get("virsh_migrate_extra_mig_again") + + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, + extra, None, + extra_args) + if int(migration_test.ret.exit_status) == 0: + migration_test.post_migration_check([vm], params, uri=dest_uri) + if not libvirt.check_vm_state(vm_name, state=src_state, uri=bk_uri): + test.fail("Can't get the expected vm state '%s'" % src_state) + finally: + logging.info("Recover test environment") + vm.connect_uri = bk_uri + # Clean VM on destination and source + migration_test.cleanup_vm(vm, dest_uri) + + if set_src_and_dst_hostname: + set_hostname(old_dst_hostname, test, remote_params=server_params) + if dst_libvirtd: + dst_libvirtd.restart() + if dst_session: + dst_session.close() + set_hostname(old_source_hostname, test) + if src_libvirtd: + src_libvirtd.restart() + orig_config_xml.sync() From a71777dd1b3366b5cf2200ef7c4b72e1d7281b18 Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Thu, 25 Nov 2021 13:36:54 +0800 Subject: [PATCH 35/58] numa_memory_spread: Fix session exception handling This test uses memhog to cost memory. The guest may be killed by host OS due to OOM. So we need to check whether the guest is alive during memhog running. Otherwise the cmd doesn't return when OOM and test will be aborted by TIMEOUT exception. Run a thread to run guest mmehog and keep checking whether guest is alive. If guest died, that means memory doesn't spread across multiple nodes, test fail. If cmd timeout, just raise an error. Increase guest memory or timeout may fix this error. Signed-off-by: Liu Yiding --- libvirt/tests/src/numa/numa_memory_spread.py | 36 +++++++++++++++++--- 1 file changed, 31 insertions(+), 5 deletions(-) diff --git a/libvirt/tests/src/numa/numa_memory_spread.py b/libvirt/tests/src/numa/numa_memory_spread.py index ab863cea30..f3b8af1882 100644 --- a/libvirt/tests/src/numa/numa_memory_spread.py +++ b/libvirt/tests/src/numa/numa_memory_spread.py @@ -1,5 +1,6 @@ import logging import re +import threading from avocado.utils import distro from avocado.utils import process @@ -172,6 +173,21 @@ def check_cgget_output(test, cgget_message): test.fail('{} not found in cgget output'.format(cgget_message)) +def run_memhog(test, session, test_memory, memhog_rt): + """ + Run memhog in guest to consume memory + + :param test: test object + :param session: guest session + :param test_memory: the memory that guest needs to consume + :param memhog_rt: save error message + """ + try: + session.cmd('memhog -r1 {}k'.format(test_memory), timeout=120) + except Exception as err: + memhog_rt["err"] = str(err) + + def run(test, params, env): """ Test Live update the numatune nodeset and memory can spread to other node @@ -181,6 +197,7 @@ def run(test, params, env): vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) + memhog_rt = {} try: # Prepare host @@ -204,11 +221,20 @@ def run(test, params, env): # And get the numastat prior the test total_prior = get_qemu_total_for_nodes() # Start test - result = session.cmd('memhog -r1 {}k'.format(memory_to_eat), - timeout=120) - logging.debug(result) - if vm.is_dead(): - test.fail('The VM crashed when memhog was executed.') + memhog_thread = threading.Thread(target=run_memhog, + args=(test, session, + memory_to_eat, memhog_rt)) + memhog_thread.setDaemon(True) + memhog_thread.start() + while True: + if memhog_thread.is_alive(): + if vm.is_dead(): + test.fail("The VM crashed when memhog was executed.") + else: + if memhog_rt: + test.fail("Failed to run memhog:{}". + format(memhog_rt["err"])) + break # Get the numastat after the test total_after = get_qemu_total_for_nodes() limit = int(params.get("limit_mb")) From 1f0abe325ce0cd0352aff48eea7543da79f60e91 Mon Sep 17 00:00:00 2001 From: Kamil Varga Date: Mon, 22 Nov 2021 23:06:56 +0100 Subject: [PATCH 36/58] remote_access:Add new test for TLS with multiple certs This commit is adding the tests for chained CA certificates for remote TLS connection. Signed-off-by: Kamil Varga --- .../remote_tls_multiple_certs.cfg | 15 + .../remote_tls_multiple_certs.py | 499 ++++++++++++++++++ spell.ignore | 2 + 3 files changed, 516 insertions(+) create mode 100644 libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg create mode 100644 libvirt/tests/src/remote_access/remote_tls_multiple_certs.py diff --git a/libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg b/libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg new file mode 100644 index 0000000000..4b57b6acd0 --- /dev/null +++ b/libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg @@ -0,0 +1,15 @@ +- virsh.remote_tls_multiple_certs: + type = remote_tls_multiple_certs + server_ip = ${remote_ip} + server_user = ${remote_user} + server_pwd = ${remote_pwd} + client_ip = ${local_ip} + client_user = root + client_pwd = ${local_pwd} + start_vm = "no" + port = "22" + tls_port = "16514" + variants: + - positive_testing: + err_msg = "The certificate hasn't got a known issuer" + diff --git a/libvirt/tests/src/remote_access/remote_tls_multiple_certs.py b/libvirt/tests/src/remote_access/remote_tls_multiple_certs.py new file mode 100644 index 0000000000..434f4f7041 --- /dev/null +++ b/libvirt/tests/src/remote_access/remote_tls_multiple_certs.py @@ -0,0 +1,499 @@ +import logging +import os + +import aexpect +from aexpect import remote + +from avocado.core import exceptions +from avocado.utils import process + +from virttest import data_dir +from virttest import libvirt_version +from virttest import utils_iptables +from virttest import utils_libvirtd +from virttest import utils_misc +from virttest import utils_split_daemons +from virttest import remote as remote_old +from virttest.utils_test import libvirt + + +def get_server_details(params): + """ + Get the server details from the configuration parameters + + :param params: avocado params object + :returns: required server information + """ + server_info = {'ip': params.get('server_ip'), + 'user': params.get('server_user'), + 'pwd': params.get('server_pwd')} + return server_info + + +def get_client_details(params): + """ + Get the client details from the configuration parameters + + :param params: avocado params object + :returns: required client information + """ + client_info = {'ip': params.get('client_ip'), + 'user': params.get('client_user'), + 'pwd': params.get('client_pwd')} + return client_info + + +def prepare_a_certs_dictionary(server_info): + """ + Prepare a dictionary with the required information for info files and + certificates. + + :param server_info: dictionary with the server information + :returns: dictionary with required information + """ + caroot = {'caroot': {'info': ['cn = Libvirt Root CA', 'ca', + 'cert_signing_key'], + 'ca_cert': '', + } + } + cachild1 = {'cachild1': {'info': ['cn = Libvirt Child CA 1', 'ca', + 'cert_signing_key'], + 'ca_cert': 'caroot', + } + } + cachild2 = {'cachild2': {'info': ['cn = Libvirt Child CA 2', 'ca', + 'cert_signing_key'], + 'ca_cert': 'caroot', + } + } + server1 = {'server1': {'info': ['organization = Red Hat', + 'cn = host1.example.com', + 'dns_name = host1.example.com', + 'ip_address = ' + server_info['ip'], + 'tls_www_server', 'encryption_key', + 'signing_key'], + 'ca_cert': 'cachild1', + } + } + server2 = {'server2': {'info': ['organization = Red Hat', + 'cn = host2.example.com', 'tls_www_server', + 'encryption_key', 'signing_key'], + 'ca_cert': 'cachild2', + } + } + client1 = {'client1': {'info': ['country = GB', 'state = London', + 'locality = London', + 'organization = Red Hat', 'cn = client1', + 'tls_www_client', 'encryption_key', + 'signing_key'], + 'ca_cert': 'cachild1', + } + } + client2 = {'client2': {'info': ['country = GB', 'state = London', + 'locality = London', + 'organization = Red Hat', 'cn = client2', + 'tls_www_client', 'encryption_key', + 'signing_key'], + 'ca_cert': 'cachild2', + } + } + + certs_dict = {**caroot, **cachild1, **cachild2, **server1, **server2, + **client1, **client2} + + return certs_dict + + +def prepare_info_files(certs_dict, dir_path): + """ + Prepare info files based on information provided + + :param certs_dict: dictionary with the required certificates information + :param dir_path: path pointing to required info files destination + :returns: None + """ + for name in certs_dict: + info_file = name+'.info' + with open(os.path.join(dir_path, info_file), 'w') as info_file: + for line in certs_dict[name]['info']: + info_file.write(line+'\n') + + +def generate_keys(certs_dir): + """ + Generate keys based on info files + + :param certs_dir: path pointing to directory with certificates + :returns: None + """ + for name in os.listdir(certs_dir): + try: + basename, extension = name.split('.') + except ValueError: + continue + if extension == 'info': + cert_name = basename + 'key.pem' + cmd = "certtool --generate-privkey --outfile={}".\ + format(os.path.join(certs_dir, cert_name)) + logging.debug('Command to generate keys: {}'.format(cmd)) + process.run(cmd, shell=True) + + +def generate_certificates(certs_dict, certs_dir): + """ + Generate certificates from the information provided + + :param certs_dict: dictionary with the required certificates information + :param certs_dir: path pointing to directory with certificates + :returns: None + """ + cwd = os.getcwd() + os.chdir(certs_dir) + for name in certs_dict: + cmd = 'certtool {generate_keyword} --load-privkey {name}key.pem ' \ + '{cacert} {ca_private_key} --template {name}.info --outfile ' \ + '{name}cert.pem'.\ + format(generate_keyword='--generate-certificate' if certs_dict[name]['ca_cert'] else '--generate-self-signed', + name=name, + cacert=' --load-ca-certificate '+certs_dict[name]['ca_cert']+'cert.pem' if certs_dict[name]['ca_cert'] else '', + ca_private_key='--load-ca-privkey '+certs_dict[name]['ca_cert']+'key.pem' if certs_dict[name]['ca_cert'] else '', + ) + logging.debug('Command to generate certificate:\n{}'.format(cmd)) + process.run(cmd, shell=True) + os.chdir(cwd) + + +def concatenate_certificates(certs_dir, *certificates): + """ + Concatenate certificates chain into one CA certificate + + :param certs_dir: path pointing to directory with certificates + :param certificates: multiple certificate names in required order + :returns: None + """ + cwd = os.getcwd() + os.chdir(certs_dir) + cacert_filename = 'cacert.pem' + if not os.path.exists(cacert_filename): + process.run('touch {}'.format(cacert_filename), shell=True) + cert_string = '' + for cert in certificates: + cert_string += cert + ' ' + cmd = "cat {}> {}".format(cert_string, cacert_filename) + process.run(cmd, shell=True) + os.chdir(cwd) + + +def copy_ca_certs_to_hosts(certs_dir, *host_info): + """ + Copy certificates to required destination path + + :param certs_dir: path pointing to directory with certificates + :param host_info: multiple dictionaries with the host information + :returns: path to destination CA certificate + """ + ca_cert_path = os.path.join(certs_dir, 'cacert.pem') + remote_ca_cert_path = '/etc/pki/CA/cacert.pem' + try: + for host in host_info: + remote.copy_files_to(host['ip'], 'scp', host['user'], + host['pwd'], '22', ca_cert_path, + remote_ca_cert_path) + except remote.SCPError as detail: + raise exceptions.TestError(detail) + return remote_ca_cert_path + + +def prepare_certs_and_keys_on_host(session, host, certs_dir, key_name): + """ + Prepare certificates and keys on the host + + :param session: RemoteSession object for host connection + :param host: dictionary with the host information + :param certs_dir: path pointing to directory with certificates + :param key_name: string with a name used for a key and certificate + :returns: tuple of paths for key and certificate + """ + libvirt_pki_private_dir = '/etc/pki/libvirt/private' + libvirt_pki_dir = '/etc/pki/libvirt' + cmd = "mkdir -p {}".format(libvirt_pki_private_dir) + status, output = session.cmd_status_output(cmd) + logging.debug("Making directory for certificates has failed due to: {}". + format(output)) + src_key_path = os.path.join(certs_dir, key_name + 'key.pem') + src_cert_path = os.path.join(certs_dir, key_name + 'cert.pem') + dest_key_path = os.path.join(libvirt_pki_private_dir, + key_name[:-1] + 'key.pem') + dest_cert_path = os.path.join(libvirt_pki_dir, + key_name[:-1] + 'cert.pem') + # SCP server cert and server key to server + remote.copy_files_to(host['ip'], 'scp', host['user'], host['pwd'], + '22', src_key_path, dest_key_path) + remote.copy_files_to(host['ip'], 'scp', host['user'], host['pwd'], + '22', src_cert_path, dest_cert_path) + return dest_key_path, dest_cert_path + + +def get_server_syslibvirtd(server_info): + """" + Get the RemoteFile object of the syslibvirtd file + + :param server_info: dictionary with the server information + :returns: RemoteFile object of the syslibvirtd file + """ + syslibvirtd = remote_old.RemoteFile( + address=server_info['ip'], + client='scp', + username=server_info['user'], + password=server_info['pwd'], + port='22', + remote_path='/etc/sysconfig/libvirtd') + return syslibvirtd + + +def get_daemon_configs(): + """ + Get the daemon configs + + :returns: daemon configs file path + """ + if utils_split_daemons.is_modular_daemon(): + daemon_conf = "/etc/libvirt/virtproxyd.conf" + daemon_socket_conf = "/usr/lib/systemd/system/virtproxyd-tls.socket" + else: + daemon_conf = "/etc/libvirt/libvirtd.conf" + daemon_socket_conf = "/usr/lib/systemd/system/libvirtd-tls.socket" + return daemon_conf, daemon_socket_conf + + +def get_server_libvirtdconf(server_info): + """ + Get the RemoteFile object of the libvirtdconf file + + :param server_info: dictionary with the server information + :returns: RemoteFile object of the libvirtdconf file + """ + daemon_conf, _daemon_socket_conf = get_daemon_configs() + server_libvirtdconf = remote_old.RemoteFile( + address=server_info['ip'], + client='scp', + username=server_info['user'], + password=server_info['pwd'], + port='22', + remote_path=daemon_conf) + return server_libvirtdconf + + +def restart_libvirtd_on_server(session): + """ + Restart libvirtd service(s) on the remote server to apply changes + + :param session: RemoteSession object for server connection + :returns: None + """ + if libvirt_version.version_compare(5, 6, 0, session): + tls_socket_service = utils_libvirtd.DaemonSocket( + "virtproxyd-tls.socket", session=session) + tls_socket_service.restart() + else: + libvirtd_service = utils_libvirtd.Libvirtd( + session=session) + libvirtd_service.restart() + + +def setup_libvirt_on_server(server_session, server_info): + """ + Setup libvirtd on remote server to allow TLS connection. + + :param server_session: RemoteSession object for server connection + :param server_info: dictionary with the server information + :returns: tuple of the RemoteFile objects with libvirtdconf and syslibvirtd + """ + libvirtdconf = get_server_libvirtdconf(server_info) + syslibvirtd = None + if not libvirt_version.version_compare(5, 6, 0, server_session): + syslibvirtd = get_server_syslibvirtd(server_info) + # edit the /etc/sysconfig/libvirtd to add --listen args in libvirtd + pattern_to_repl = { + r".*LIBVIRTD_ARGS\s*=\s*\"\s*--listen\s*\".*": + "LIBVIRTD_ARGS=\"--listen\"" + } + syslibvirtd.sub_else_add(pattern_to_repl) + # edit the /etc/libvirt/libvirtd.conf to add listen_tls=1 + pattern_to_repl = {r".*listen_tls\s*=\s*.*": "listen_tls=1"} + libvirtdconf.sub_else_add(pattern_to_repl) + + pattern_to_repl = {r".*auth_tls\s*=\s*.*": 'auth_tls="none"'} + libvirtdconf.sub_else_add(pattern_to_repl) + + try: + restart_libvirtd_on_server(server_session) + except (remote.LoginError, aexpect.ShellError) as detail: + raise exceptions.TestError(detail) + return libvirtdconf, syslibvirtd + + +def stop_iptables(): + """ + Clear iptables to make sure no rule prevents connection + + :returns: None + """ + cmd = "iptables -F" + process.run(cmd, shell=True) + + +def allow_port_in_fw(server_session): + """ + Allow the libvirt TLS port in the firewall on the remote server + + :param server_session: RemoteSession object for server connection + :returns: None + """ + firewalld_port = '16514' + firewall_cmd = utils_iptables.Firewall_cmd(server_session) + firewall_cmd.add_port(firewalld_port, 'tcp', permanent=True) + + +def connect_to_remote(server_info, err_msg=None): + """ + Try connection to the remote server with TLS + + :param server_info: dictionary with the server information + :param err_msg: expected error messages (if any) + :returns: None + """ + expected_fails = [err_msg] if err_msg else [] + result = process.run('virsh -c qemu+tls://{}/system'. + format(server_info['ip']), shell=True, + ignore_status=True) + libvirt.check_result(result, expected_fails=expected_fails, + check_both_on_error=True) + + +def get_log(server_info): + """ + Tail output appended data as the file /var/log/messages grows + + :param server_info: dictionary with the server information + :returns: the appended data tailed from /var/log/messages + """ + tailed_log_file = os.path.join(data_dir.get_tmp_dir(), 'tail_log') + tail_session = remote.remote_login('ssh', server_info['ip'], '22', + server_info['user'], + server_info['pwd'], + r"[\#\$]\s*$", + log_function=utils_misc.log_line, + log_filename=tailed_log_file) + tail_session.sendline('tail -f /var/log/messages') + return tail_session + + +def run(test, params, env): + """ + Test remote access with TLS connection and multiple CA certificates + """ + config_files = [] + server_files = [] + client_files = [] + ca_cert_file = None + + server_info = get_server_details(params) + server_session = remote.wait_for_login('ssh', server_info['ip'], '22', + server_info['user'], + server_info['pwd'], + r"[\#\$]\s*$") + client_info = get_client_details(params) + client_session = remote.wait_for_login('ssh', client_info['ip'], '22', + client_info['user'], + client_info['pwd'], + r"[\#\$]\s*$") + try: + # NOTE: The Test can be divided to multiple parts, however the first + # part - setup is a time consuming and it is therefore better to do it + # once only. + certs_dict = prepare_a_certs_dictionary(server_info) + certs_dir = os.getcwd() + + prepare_info_files(certs_dict, certs_dir) + generate_keys(certs_dir) + generate_certificates(certs_dict, certs_dir) + concatenate_certificates(certs_dir, + 'carootcert.pem', + 'cachild1cert.pem', + 'cachild2cert.pem') + ca_cert_file = copy_ca_certs_to_hosts(certs_dir, + server_info, + client_info) + server_files = prepare_certs_and_keys_on_host(server_session, + server_info, + certs_dir, + 'server1') + config_files = setup_libvirt_on_server(server_session, server_info) + stop_iptables() + allow_port_in_fw(server_session) + restart_libvirtd_on_server(server_session) + client_files = prepare_certs_and_keys_on_host(client_session, + client_info, + certs_dir, + 'client1') + # Connect to server1 hypervisor on client1 + connect_to_remote(server_info) + + # Test with other CA certificates order + for new_order in [ + ['cachild2cert.pem', 'carootcert.pem', 'cachild1cert.pem'], + ['cachild1cert.pem', 'carootcert.pem', 'cachild2cert.pem'], + ]: + concatenate_certificates(certs_dir, *new_order) + copy_ca_certs_to_hosts(certs_dir, server_info, client_info) + restart_libvirtd_on_server(server_session) + connect_to_remote(server_info) + + # Test with missing issuing CA + concatenate_certificates(certs_dir, + 'cachild2cert.pem', + 'carootcert.pem') + # Publish to server only + copy_ca_certs_to_hosts(certs_dir, server_info) + # Start reading the /var/log/messages on server + tail_messages = get_log(server_info) + restart_libvirtd_on_server(server_session) + err_msg = params.get('err_msg') + output = tail_messages.get_output() + tail_messages.close() + if err_msg not in output: + test.fail("Unexpected output of the /var/log/messages on remote " + "server: {}".format(output)) + # Fix the CA certificates + concatenate_certificates(certs_dir, + 'cachild2cert.pem', + 'carootcert.pem', + 'cachild1cert.pem') + # Copy to server + copy_ca_certs_to_hosts(certs_dir, server_info) + restart_libvirtd_on_server(server_session) + # Check if the connection can be established again + connect_to_remote(server_info) + # Create an invalid CA cert for client + concatenate_certificates(certs_dir, + 'cachild2cert.pem', + 'carootcert.pem') + # Copy to client + copy_ca_certs_to_hosts(certs_dir, client_info) + connect_to_remote(server_info, err_msg) + except Exception as e: + test.fail('Unexpected failure: {}'.format(e)) + finally: + if config_files: + for config in config_files: + del config + if server_files: + for file_path in server_files: + server_session.cmd_status_output('rm -f {}'.format(file_path)) + if client_files: + for file_path in client_files: + client_session.cmd_status_output('rm -f {}'.format(file_path)) + if ca_cert_file: + server_session.cmd_status_output('rm -f {}'.format(ca_cert_file)) + client_session.cmd_status_output('rm -f {}'.format(ca_cert_file)) diff --git a/spell.ignore b/spell.ignore index 35f6af3764..92b0f59aa3 100644 --- a/spell.ignore +++ b/spell.ignore @@ -480,6 +480,7 @@ libvird libvirt Libvirt libvirtd +libvirtdconf libvirtd's libvirt's libvirts @@ -935,6 +936,7 @@ sys sysconfig sysfs sysinfo +syslibvirtd syslog syspath sysprep From f0f53588ffca67fdb949f1935ae9dff4406b4ef4 Mon Sep 17 00:00:00 2001 From: Meina Li Date: Thu, 25 Nov 2021 22:24:29 -0500 Subject: [PATCH 37/58] virsh_boot.cfg: update error info of negative case Signed-off-by: Meina Li --- libvirt/tests/cfg/bios/virsh_boot.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libvirt/tests/cfg/bios/virsh_boot.cfg b/libvirt/tests/cfg/bios/virsh_boot.cfg index 210a8814a2..4c7a1f6857 100644 --- a/libvirt/tests/cfg/bios/virsh_boot.cfg +++ b/libvirt/tests/cfg/bios/virsh_boot.cfg @@ -183,7 +183,7 @@ template = "/usr/share/OVMF/OVMF_VARS.secboot.fd" uefi_device_bus = "sata" uefi_target_dev = "sda" - check_prompt = "error:.*has invalid signature" + check_prompt = ".*has invalid signature" || "'Verification failed: (0x1A) Security Violation'" - by_seabios: boot_type = "seabios" loader = "/usr/share/seabios/bios-256k.bin" From 161216e93377ecfccae7ae969e7cf2f9ccb4f98d Mon Sep 17 00:00:00 2001 From: Yanqiu Zhang Date: Tue, 23 Nov 2021 02:09:26 -0500 Subject: [PATCH 38/58] tpm_device: Expose error when test suite failed Expose error details when test suite failed, since it has more than one tests. Signed-off-by: Yanqiu Zhang --- libvirt/tests/src/virtual_device/tpm_device.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/libvirt/tests/src/virtual_device/tpm_device.py b/libvirt/tests/src/virtual_device/tpm_device.py index c506c78039..d3deb3f242 100644 --- a/libvirt/tests/src/virtual_device/tpm_device.py +++ b/libvirt/tests/src/virtual_device/tpm_device.py @@ -482,7 +482,10 @@ def run_test_suite_in_guest(session): for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): - test.fail("test suite check failed.") + if "ERROR" in output: + test.fail("test suite check failed: %s" % re.findall(r'test_.* ... ERROR', output)) + else: + test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def persistent_test(vm, vm_xml): From 9660893123aaa272a545c89c03785f8dd28d2704 Mon Sep 17 00:00:00 2001 From: Yingshun Cui Date: Mon, 29 Nov 2021 14:57:41 +0800 Subject: [PATCH 39/58] Fix up inspekt check failure Inspekt check fails with the latest pylint, so fix to a stable version as a workaround. Signed-off-by: Yingshun Cui --- requirements-travis.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements-travis.txt b/requirements-travis.txt index 3709b32645..7e2b9d656d 100644 --- a/requirements-travis.txt +++ b/requirements-travis.txt @@ -5,4 +5,5 @@ tox==1.5.0 virtualenv==1.9.1 simplejson==3.8.1 inspektor==0.5.2 +pylint==2.11.1 pyenchant From c72257ff21404242958bc20effa4465a070bcc6b Mon Sep 17 00:00:00 2001 From: Meina Li Date: Tue, 30 Nov 2021 00:58:37 -0500 Subject: [PATCH 40/58] virsh_pool.py: update to restart virtstoraged service Signed-off-by: Meina Li --- libvirt/tests/src/virsh_cmd/pool/virsh_pool.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libvirt/tests/src/virsh_cmd/pool/virsh_pool.py b/libvirt/tests/src/virsh_cmd/pool/virsh_pool.py index 118e9fbf79..80d9589c46 100644 --- a/libvirt/tests/src/virsh_cmd/pool/virsh_pool.py +++ b/libvirt/tests/src/virsh_cmd/pool/virsh_pool.py @@ -297,7 +297,7 @@ def check_pool_info(pool_info, check_point, value): # Step (11) # Restart libvirtd and check the autostart pool utils_libvirtd.unmark_storage_autostarted() - utils_libvirtd.libvirtd_restart() + utils_libvirtd.Libvirtd("virtstoraged").restart() option = "--autostart --persistent" check_pool_list(pool_name, option) @@ -316,7 +316,7 @@ def check_pool_info(pool_info, check_point, value): # Step (14) # Repeat step (11) - utils_libvirtd.libvirtd_restart() + utils_libvirtd.Libvirtd("virtstoraged").restart() option = "--autostart" check_pool_list(pool_name, option, True) From b54064a4d8c8b64f5e9ba9bdb87c9a5050fcac7f Mon Sep 17 00:00:00 2001 From: Liu Yiding Date: Tue, 30 Nov 2021 05:18:21 -0500 Subject: [PATCH 41/58] Start guest after reset guest PCI Signed-off-by: Liu Yiding --- libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py index d422e8a058..971141e0cc 100644 --- a/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py +++ b/libvirt/tests/src/virsh_cmd/snapshot/virsh_snapshot_disk.py @@ -192,6 +192,7 @@ def run(test, params, env): if not multi_gluster_disks: # Fix No more available PCI slots libvirt_pcicontr.reset_pci_num(vm_name, 15) + vm.start() # Do the attach action. out = process.run("qemu-img info %s" % img_path, shell=True) logging.debug("The img info is:\n%s" % out.stdout.strip()) From d806e040821efd4f260e89a2fe5159bec38885a3 Mon Sep 17 00:00:00 2001 From: nanli Date: Mon, 29 Nov 2021 03:44:04 -0500 Subject: [PATCH 42/58] Clean snap file and raise unclean xml error if precious case not clean env completely --- .../src/virsh_cmd/domain/virsh_blockcommit.py | 31 +++++++++++++++++-- 1 file changed, 29 insertions(+), 2 deletions(-) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py index 0f14acffd1..cbe48ecea7 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py @@ -144,7 +144,7 @@ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False, i snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) - + clean_snap_file(disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, @@ -196,6 +196,33 @@ def check_snapshot_tree(): if is_check_snapshot_tree: check_snapshot_tree() + def check_vm_disk_file(vm): + """ + Check current vm disk source. + + :param vm: The vm to be checked + """ + image_name1, image_format = params.get("image_name", "image"), params.get("image_format", "qcow2") + image_dir = os.path.join(data_dir.get_data_dir(), image_name1) + original_image_path = image_dir + "." + image_format + logging.debug("Source file should be : %s", original_image_path) + + vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) + disk = vmxml.get_devices('disk')[0] + logging.debug("Current disk info is : %s", disk) + if disk.source.attrs['file'] != original_image_path: + test.error("Please check current vm disk source") + + def clean_snap_file(snap_path): + """ + Clean the existed duplicate snap file. + + :param snap_path: snap file path + """ + if os.path.exists(snap_path): + os.remove(snap_path) + logging.debug("Cleaned snap file before creating :%s" % snap_path) + def get_first_disk_source(): """ Get disk source of first device @@ -389,7 +416,7 @@ def do_blockcommit_pivot_repeatedly(): if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) - + check_vm_disk_file(vm) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test From f32eb56874ebc9c5c11be6c18e6121e8fc3a11cf Mon Sep 17 00:00:00 2001 From: Dan Zheng Date: Wed, 1 Dec 2021 18:08:33 +0800 Subject: [PATCH 43/58] controller_functional: Fix model for ppc On ppc, there is only pci-root model, not pcie-root-port model. Signed-off-by: Dan Zheng --- libvirt/tests/src/controller/controller_functional.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libvirt/tests/src/controller/controller_functional.py b/libvirt/tests/src/controller/controller_functional.py index 0970687ec7..3c8b17d1b4 100644 --- a/libvirt/tests/src/controller/controller_functional.py +++ b/libvirt/tests/src/controller/controller_functional.py @@ -799,8 +799,9 @@ def check_multifunction(): if remove_nic: remove_devices(vm_xml, 'interface') # Get the max controller index in current vm xml + the_model = 'pci-root' if 'ppc' in platform.machine() else 'pcie-root-port' if add_contrl_list: - ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml, 'pci', 'pcie-root-port') + ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml, 'pci', the_model) if ret_indexes and len(ret_indexes) > 0: if auto_bus: new_index = "0x%02x" % (int(ret_indexes[0]) + 1) From 1825bde3b56490cfb5a8909dc592dab731a04d7a Mon Sep 17 00:00:00 2001 From: Yingshun Cui Date: Thu, 2 Dec 2021 09:29:31 +0800 Subject: [PATCH 44/58] migrate_vm: Add '-F' option to qemu-img create command To create a backing file, the option '-F' is required, so update accordingly. Signed-off-by: Yingshun Cui --- libvirt/tests/cfg/migration/migrate_vm.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libvirt/tests/cfg/migration/migrate_vm.cfg b/libvirt/tests/cfg/migration/migrate_vm.cfg index b4187fcb72..bd0ebc4925 100644 --- a/libvirt/tests/cfg/migration/migrate_vm.cfg +++ b/libvirt/tests/cfg/migration/migrate_vm.cfg @@ -758,7 +758,7 @@ target_image_name = "foo_bar_test.${target_image_format}" new_disk_source = "${target_image_path}/${target_image_name}" # create disk image based on ${nfs_mount_dir}/${image_name} both local and remote host - create_local_disk_backfile_cmd = "qemu-img create ${new_disk_source} -f qcow2 -b ${nfs_mount_dir}/" + create_local_disk_backfile_cmd = "qemu-img create ${new_disk_source} -f qcow2 -F ${target_image_format} -b ${nfs_mount_dir}/" create_remote_disk_backfile_cmd = "${create_local_disk_backfile_cmd}" # create disk image in the VM dd_image_count = 100000 From 10a69ee816248bd433e85c612e9bee2f0d866ad6 Mon Sep 17 00:00:00 2001 From: lcheng Date: Thu, 2 Dec 2021 11:56:17 +0800 Subject: [PATCH 45/58] Adjust the order to set hostname Signed-off-by: lcheng --- .../tests/src/migration/migrate_with_various_hostname.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/libvirt/tests/src/migration/migrate_with_various_hostname.py b/libvirt/tests/src/migration/migrate_with_various_hostname.py index 85c3e37f32..7d6ef26c95 100644 --- a/libvirt/tests/src/migration/migrate_with_various_hostname.py +++ b/libvirt/tests/src/migration/migrate_with_various_hostname.py @@ -109,16 +109,16 @@ def run(test, params, env): if set_src_and_dst_hostname: old_dst_hostname = get_hostname(test, remote_params=server_params) - set_hostname(dst_hostname, test, remote_params=server_params) + old_source_hostname = get_hostname(test) dst_session = remote_old.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") dst_libvirtd = utils_libvirtd.Libvirtd(session=dst_session) - dst_libvirtd.restart() - old_source_hostname = get_hostname(test) - set_hostname(src_hostname, test) src_libvirtd = utils_libvirtd.Libvirtd() + set_hostname(src_hostname, test) src_libvirtd.restart() + set_hostname(dst_hostname, test, remote_params=server_params) + dst_libvirtd.restart() if not vm.is_alive(): vm.start() From 7a5e6feee772ddebdab1c30de6855fbef450b6fd Mon Sep 17 00:00:00 2001 From: Yanqiu Zhang Date: Thu, 2 Dec 2021 00:10:56 -0500 Subject: [PATCH 46/58] virsh_event: metadata-change event output fix There're still 2 areas of metadata-change event output not updated. Fix them this time. Signed-off-by: Yanqiu Zhang --- .../tests/src/virsh_cmd/domain/virsh_event.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_event.py b/libvirt/tests/src/virsh_cmd/domain/virsh_event.py index 63e44c0143..b2e51aee76 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_event.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_event.py @@ -434,16 +434,24 @@ def trigger_events(dom, events_list=[]): options="", key=metadata_key, **virsh_dargs) - expected_events_list.append("'metadata-change' for %s: " - "element http://app.org/") + if not libvirt_version.version_compare(7, 10, 0): + expected_events_list.append("'metadata-change' for %s: " + "element http://app.org/") + else: + expected_events_list.append("'metadata-change' for %s: " + "type element, uri http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) - expected_events_list.append("'metadata-change' for %s: " - "element http://app.org/") + if not libvirt_version.version_compare(7, 10, 0): + expected_events_list.append("'metadata-change' for %s: " + "element http://app.org/") + else: + expected_events_list.append("'metadata-change' for %s: " + "type element, uri http://app.org/") elif event == "blockcommit": disk_path = dom.get_blk_devices()['vda']['source'] virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs) From 003219610085f5ff388f1d6435a18366b48f722c Mon Sep 17 00:00:00 2001 From: chunfuwen Date: Mon, 27 Sep 2021 00:45:44 -0400 Subject: [PATCH 47/58] Add attaching ccw address devices test cases Cases come from Bugzilla: 1258361[1] RHEL7-110399,RHEL7-110400, RHEL7-110401, RHEL-116653 [1]https://bugzilla.redhat.com/show_bug.cgi?id=1258361 Signed-off-by: chunfuwen --- .../virtual_disks/virtual_disks_ccw_addr.cfg | 43 +++++ .../virtual_disks/virtual_disks_ccw_addr.py | 178 ++++++++++++++++++ 2 files changed, 221 insertions(+) create mode 100755 libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg create mode 100755 libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py diff --git a/libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg b/libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg new file mode 100755 index 0000000000..ca50219e0d --- /dev/null +++ b/libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg @@ -0,0 +1,43 @@ +- virtual_disks.ccw_addr: + type = virtual_disks_ccw_addr + take_regular_screendumps = "no" + start_vm = "no" + target_bus = "virtio" + variants: + - start_vm: + variants: + - negative_test: + status_error = "yes" + define_error = "yes" + error_msg = "cannot use CCW address type for device" + variants: + - attach_no_exist_disk: + only coldplug + target_format = "raw" + type_name = "file" + target_dev = "vdb" + device_type = "disk" + driver_type = 'raw' + backend_device = "disk" + - attach_disk: + target_format = "raw" + virt_disk_device_source = "/var/lib/libvirt/images/test.img" + type_name = "file" + target_dev = "vdb" + device_type = "disk" + driver_type = 'raw' + backend_device = "disk" + addr_attrs = "{'type': 'ccw', 'cssid': '0x0', 'ssid': '0x0', 'devno': '0x0000'}" + - attach_controller: + backend_device = "controller" + addr_attrs = "{'type': 'ccw', 'cssid': '0x0', 'ssid': '0x0', 'devno': '0x0000'}" + - attach_rng: + backend_device = "rng" + backend_dev = "/dev/hwrng" + rng_model = "virtio" + addr_attrs = "{'type': 'ccw', 'cssid': '0x0', 'ssid': '0x0', 'devno': '0x0000'}" + variants: + - hotplug: + virt_device_hotplug = "yes" + - coldplug: + virt_device_hotplug = "no" diff --git a/libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py b/libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py new file mode 100755 index 0000000000..aecfa3bf63 --- /dev/null +++ b/libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py @@ -0,0 +1,178 @@ +import logging +import os + +from avocado.utils import process + +from virttest import virt_vm +from virttest import virsh +from virttest import utils_libvirtd +from virttest import utils_split_daemons + +from virttest.libvirt_xml import vm_xml, xcepts + +from virttest.utils_test import libvirt +from virttest.utils_libvirt import libvirt_disk + + +def create_ccw_addr_controller(params): + """ + Create one ccw address controller device + + :param params: dict wrapped with params + """ + + contr_dict = {'controller_type': 'scsi', + 'controller_index': '10'} + + ccw_addr_controller = libvirt.create_controller_xml(contr_dict) + + addr_dict = eval(params.get("addr_attrs")) + ccw_addr_controller.address = ccw_addr_controller.new_controller_address( + **{"attrs": addr_dict}) + logging.debug("create_ccw_addr_controller xml: %s", ccw_addr_controller) + return ccw_addr_controller + + +def create_ccw_addr_rng(params): + """ + Create one ccw address rng device + + :param params: dict wrapped with params + """ + rng = libvirt.create_rng_xml(params) + addr_dict = eval(params.get("addr_attrs")) + rng.address = rng.new_rng_address( + **{"attrs": addr_dict}) + logging.debug("create_ccw_addr_rng xml: %s", rng) + return rng + + +def create_ccw_addr_disk(params): + """ + Create one ccw address disk + + :param params: dict wrapped with params + """ + type_name = params.get("type_name") + disk_device = params.get("device_type") + device_target = params.get("target_dev") + device_bus = params.get("target_bus") + device_format = params.get("target_format") + source_file_path = params.get("virt_disk_device_source") + disk_src_dict = {"attrs": {"file": source_file_path}} + addr_str = params.get("addr_attrs") + + if source_file_path: + libvirt.create_local_disk("file", source_file_path, 1, device_format) + ccw_addr_disk = libvirt_disk.create_primitive_disk_xml( + type_name, disk_device, + device_target, device_bus, + device_format, disk_src_dict, None) + if addr_str: + addr_dict = eval(addr_str) + ccw_addr_disk.address = ccw_addr_disk.new_disk_address( + **{"attrs": addr_dict}) + logging.debug("create_ccw_addr_disk xml: %s", ccw_addr_disk) + return ccw_addr_disk + + +def check_libvirtd_process_id(ori_pid_libvirtd, test): + """ + Check libvirtd process id not change + + :param params: original libvirtd process id + :param test: test assert object + """ + if not utils_split_daemons.is_modular_daemon(): + aft_pid_libvirtd = process.getoutput("pidof libvirtd") + if not utils_libvirtd.libvirtd_is_running() or ori_pid_libvirtd != aft_pid_libvirtd: + test.fail("Libvirtd crash after attaching ccw addr devices") + + +def run(test, params, env): + """ + Test attach device with ccw address option. + + 1.Prepare test environment,destroy or suspend a VM. + 2.Prepare test xml for different devices. + 3.Perform test operation. + 4.Recover test environment. + 5.Confirm the test result. + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + virsh_dargs = {'debug': True, 'ignore_status': True} + + # Disk specific attributes. + image_path = params.get("virt_disk_device_source", "/var/lib/libvirt/images/test.img") + backend_device = params.get("backend_device", "disk") + logging.debug("eval devei backed:%s", backend_device) + + hotplug = "yes" == params.get("virt_device_hotplug") + status_error = "yes" == params.get("status_error") + define_error = "yes" == params.get("define_error", "no") + expected_fails_msg = [] + error_msg = params.get("error_msg", "cannot use CCW address type for device") + expected_fails_msg.append(error_msg) + + device_obj = None + + # Back up xml file. + vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + try: + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + if backend_device == "disk": + device_obj = create_ccw_addr_disk(params) + elif backend_device == "rng": + device_obj = create_ccw_addr_rng(params) + elif backend_device == "controller": + device_obj = create_ccw_addr_controller(params) + # Check libvirtd should not crash during the process + if not utils_split_daemons.is_modular_daemon(): + ori_pid_libvirtd = process.getoutput("pidof libvirtd") + if not hotplug: + # Sync VM xml. + vmxml.add_device(device_obj) + vmxml.sync() + vm.start() + vm.wait_for_login().close() + if status_error: + if hotplug: + logging.info("attaching devices, expecting error...") + result = virsh.attach_device(vm_name, device_obj.xml, debug=True) + libvirt.check_result(result, expected_fails=expected_fails_msg) + else: + test.fail("VM started unexpectedly.") + except virt_vm.VMStartError as e: + if status_error: + if hotplug: + test.fail("In hotplug scenario, VM should " + "start successfully but not." + "Error: %s", str(e)) + else: + logging.debug("VM failed to start as expected." + "Error: %s", str(e)) + else: + test.fail("VM failed to start." + "Error: %s" % str(e)) + check_libvirtd_process_id(ori_pid_libvirtd, test) + except xcepts.LibvirtXMLError as xml_error: + if not define_error: + test.fail("Failed to define VM:\n%s" % xml_error) + else: + logging.info("As expected, failed to define VM") + check_libvirtd_process_id(ori_pid_libvirtd, test) + except Exception as ex: + test.fail("unexpected exception happen: %s" % str(ex)) + check_libvirtd_process_id(ori_pid_libvirtd, test) + finally: + # Recover VM. + if vm.is_alive(): + vm.destroy(gracefully=False) + logging.info("Restoring vm...") + vmxml_backup.sync() + # Clean up images + for file_path in [image_path]: + if os.path.exists(file_path): + os.remove(file_path) From 29247549997ce08c54cfdf9713b82be4de9abe66 Mon Sep 17 00:00:00 2001 From: Kyla Zhang Date: Fri, 3 Dec 2021 03:25:33 -0500 Subject: [PATCH 48/58] Add cmd status check for blockcopy As the old method will ignore the cmd err check so when it happened it will timeout Signed-off-by: Kyla Zhang --- libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py index b72438db41..ef08049647 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py @@ -172,6 +172,9 @@ def finish_job(vm_name, target, timeout): """ job_time = 0 while job_time < timeout: + # Check cmd execute status and report error directly if have + virsh.blockjob(vm_name, target, "--info", debug=True, ignore_status=False) + # As BZ#1359679, blockjob may disappear during the process, # so we need check it all the time if utl.check_blockjob(vm_name, target, 'none', '0'): From 5e57445645a4118b30ea0b20325e8bd4943192a5 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Wed, 1 Dec 2021 11:24:57 -0500 Subject: [PATCH 49/58] virtual_disks_dasd: add test case Add test case to confirm dasd partition tables can be read when using virtio-blk. Signed-off-by: Sebastian Mitterle --- .../cfg/virtual_disks/virtual_disks_dasd.cfg | 6 + .../src/virtual_disks/virtual_disks_dasd.py | 146 ++++++++++++++++++ provider/vfio/ccw.py | 4 + 3 files changed, 156 insertions(+) create mode 100644 libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg create mode 100644 libvirt/tests/src/virtual_disks/virtual_disks_dasd.py diff --git a/libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg b/libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg new file mode 100644 index 0000000000..7af6a843cd --- /dev/null +++ b/libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg @@ -0,0 +1,6 @@ +- virtual_disks.dasd: + type = virtual_disks_dasd + only s390-virtio + variants: + - read_native_partition_table: + diff --git a/libvirt/tests/src/virtual_disks/virtual_disks_dasd.py b/libvirt/tests/src/virtual_disks/virtual_disks_dasd.py new file mode 100644 index 0000000000..f77a3486e7 --- /dev/null +++ b/libvirt/tests/src/virtual_disks/virtual_disks_dasd.py @@ -0,0 +1,146 @@ +# pylint: disable=spelling +# disable pylint spell checker to allow for dasda, fdasda, vdb, vda, virtio, blk +import logging +import re + +from avocado.core.exceptions import TestError + +from virttest import virsh +from virttest.utils_zchannels import SubchannelPaths +from virttest.utils_misc import cmd_status_output, wait_for +from virttest.libvirt_xml.vm_xml import VMXML + +from provider.vfio import ccw + +TEST_DASD_ID = None +TARGET = "vdb" # suppose guest has only one disk 'vda' + + +def get_partitioned_dasd_path(): + """ + Selects and prepares DASD for test case + + :return path: absolute path to block device, e.g. '/dev/dasda' + """ + paths = SubchannelPaths() + paths.get_info() + device = paths.get_first_unused_and_safely_removable() + if not device: + raise TestError("Couldn't find dasd device for test") + global TEST_DASD_ID + TEST_DASD_ID = device[paths.HEADER["Device"]] + enable_disk(TEST_DASD_ID) + disk_path = get_device_path(TEST_DASD_ID) + wait_for(lambda: ccw.format_dasd(disk_path, None), 10, first=1.0) + wait_for(lambda: ccw.make_dasd_part(disk_path, None), 10, first=1.0) + return disk_path + + +def enable_disk(disk_id): + """ + Enables the disk so it can be used + + :param id: disk id cssid.ssid.devno, e.g. 0.0.5000 + :raises: TestError if can't use disk + """ + + cmd = "chzdev -e %s" % disk_id + err, out = cmd_status_output(cmd, shell=True) + if err: + raise TestError("Couldn't enable dasd '%s'. %s" % (disk_id, out)) + + +def disable_disk(disk_id): + """ + Enables the disk so it can be used + + :param disk_id: disk id cssid.ssid.devno, e.g. 0.0.5000 + :raises: TestError if can't use disk + """ + + cmd = "chzdev -d %s" % disk_id + err, out = cmd_status_output(cmd, shell=True) + if err: + raise TestError("Couldn't disable dasd '%s'. %s" % (disk_id, out)) + + +def get_device_path(disk_id): + """ + Gets the device path for the DASD disk + + :param disk_id: disk id cssid.ssid.devno, e.g. 0.0.5000 + :return: absolute device path, e.g. '/dev/dasda' + """ + + cmd = "lszdev %s" % disk_id + err, out = cmd_status_output(cmd, shell=True) + if err: + raise TestError("Couldn't get device info. %s" % out) + """ Expected output looks like: + TYPE ID ON PERS NAMES + dasd-eckd 0.0.5000 yes yes dasda + """ + try: + info = out.split('\n') + values = re.split(r"\s+", info[1]) + name = values[-1] + return "/dev/" + name + except: + raise TestError("Couldn't create device path from '%s', '%s', '%s'" % + (out, info, values)) + + +def attach_disk(vm_name, target, path): + """ + Attaches the disk on path as block device + + :param vm_name: VM name + :param target: //target@dev, e.g. 'vdb' + :param path: device path e.g. '/dev/dasda' + """ + + source_info = " --sourcetype block" + virsh.attach_disk(vm_name, path, target, source_info, ignore_status=False) + + +def check_dasd_partition_table(session, device_target): + """ + Checks that the partition table can be read + with 'fdasd' + + :param session: guest session, run command on host if None + :param device_target: the expected target device name, e.g. 'vdb' + """ + + cmd = "fdasd -p /dev/%s" % device_target + err, out = cmd_status_output(cmd, shell=True, session=session) + if err or not re.findall("reading vtoc.*ok", out): + raise TestError("Couldn't get partition table. %s" % out) + logging.debug("Confirmed partition table was read correctly:") + logging.debug(out) + + +def run(test, params, env): + """ + Confirm native 'dasd' partitions can be read + when attached via 'virtio-blk' + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + + vmxml = VMXML.new_from_inactive_dumpxml(vm_name) + backup_xml = vmxml.copy() + + try: + disk_path = get_partitioned_dasd_path() + attach_disk(vm_name, TARGET, disk_path) + + session = vm.wait_for_login() + check_dasd_partition_table(session, TARGET) + finally: + # sync will release attached disk, precondition for disablement + backup_xml.sync() + global TEST_DASD_ID + if TEST_DASD_ID: + disable_disk(TEST_DASD_ID) diff --git a/provider/vfio/ccw.py b/provider/vfio/ccw.py index c489c8e1c8..315f2e5fcc 100644 --- a/provider/vfio/ccw.py +++ b/provider/vfio/ccw.py @@ -72,12 +72,14 @@ def make_dasd_part(path, session): :param path: dasd disk path, e.g. /dev/dasda :param session: guest session + :return: True if partitioning succeeded """ cmd = "fdasd -a %s" % path err, out = cmd_status_output(cmd, shell=True, session=session) if err: raise TestError("Couldn't create partition. %s" % out) + return True def make_dasd_fs(path, part, session): @@ -101,12 +103,14 @@ def format_dasd(path, session): :param path: dasd disk path, e.g. /dev/dasda :param session: guest session :raises TestError: if disk can't be formatted + :return: True if formatting succeeded """ cmd = "dasdfmt -b 4096 -M quick --force -p -y %s" % path err, out = cmd_status_output(cmd, shell=True, session=session) if err: raise TestError("Couldn't format disk. %s" % out) + return True def umount(session): From 72f3f1503b95df18df8e2e009690e818b860b506 Mon Sep 17 00:00:00 2001 From: lcheng Date: Mon, 6 Dec 2021 17:19:08 +0800 Subject: [PATCH 50/58] migrate_option_mix: Fix variable 'vm_xml_backup' reference issue Fix error: UnboundLocalError: local variable 'vm_xml_backup' referenced before assignment Signed-off-by: lcheng --- .../domain/virsh_migrate_option_mix.py | 25 +++++++++---------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py b/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py index 95873ee2bc..ba7e479353 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py @@ -48,6 +48,18 @@ def cleanup_vm(vm, vm_name='', uri=''): if isinstance(v, string_types) and v.count("EXAMPLE"): test.cancel("Please set real value for %s" % v) + # Back up vm name for recovery in finally + vm_name_backup = params.get("migrate_main_vm") + # Get a vm object for migration + logging.debug("Get a vm object for migration") + vm = env.get_vm(vm_name_backup) + + # Back up vm xml for recovery in finally + logging.debug("Backup vm xml before migration") + vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) + if not vm_xml_backup: + test.error("Backing up xmlfile failed.") + # Params for virsh migrate options: live_migration = params.get("live_migration") == "yes" offline_migration = params.get("offline_migration") == "yes" @@ -154,19 +166,6 @@ def cleanup_vm(vm, vm_name='', uri=''): transport_port = '16509' obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port) - # Back up vm name for recovery in finally - vm_name_backup = params.get("migrate_main_vm") - - # Get a vm object for migration - logging.debug("Get a vm object for migration") - vm = env.get_vm(vm_name_backup) - - # Back up vm xml for recovery in finally - logging.debug("Backup vm xml before migration") - vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) - if not vm_xml_backup: - test.error("Backing up xmlfile failed.") - # Prepare shared disk in vm xml for live migration: # Change the source of the first disk of vm to shared disk if live_migration: From c9fb1d0ab57f1b085ef0eac06521bbf3413de054 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 7 Dec 2021 10:31:47 -0500 Subject: [PATCH 51/58] virt_install: test with fix mdev nodedev and manual log inspection Test case currently runs and can confirm in lscss output nodedev has been attached correctly TODO: 1. create nodedev inside code (same as for lbvirt-ccw_passthrough) 2. test if attached at expected address Signed-off-by: Sebastian Mitterle --- .../tests/cfg/virt_install/hostdev_mdev.cfg | 7 ++ .../tests/cfg/virt_install/s390x_hostdev.cfg | 5 -- .../tests/src/virt_install/hostdev_mdev.py | 73 +++++++++++++++++++ .../tests/src/virt_install/s390x_hostdev.py | 55 -------------- 4 files changed, 80 insertions(+), 60 deletions(-) create mode 100644 virttools/tests/cfg/virt_install/hostdev_mdev.cfg delete mode 100644 virttools/tests/cfg/virt_install/s390x_hostdev.cfg create mode 100644 virttools/tests/src/virt_install/hostdev_mdev.py delete mode 100644 virttools/tests/src/virt_install/s390x_hostdev.py diff --git a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg new file mode 100644 index 0000000000..90ccc0fe3b --- /dev/null +++ b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg @@ -0,0 +1,7 @@ +- virt_install.hostdev.mdev: + type = hostdev_mdev + variants: + - check_present_inside_guest: + only s390-virtio + mdev_type = vfio_ccw-io + address_type = ccw diff --git a/virttools/tests/cfg/virt_install/s390x_hostdev.cfg b/virttools/tests/cfg/virt_install/s390x_hostdev.cfg deleted file mode 100644 index 51be7040cf..0000000000 --- a/virttools/tests/cfg/virt_install/s390x_hostdev.cfg +++ /dev/null @@ -1,5 +0,0 @@ -- s390x_hostdev: - type = s390x_hostdev - only s390-virtio - variants: - - check_present_inside_guest: diff --git a/virttools/tests/src/virt_install/hostdev_mdev.py b/virttools/tests/src/virt_install/hostdev_mdev.py new file mode 100644 index 0000000000..26250fde68 --- /dev/null +++ b/virttools/tests/src/virt_install/hostdev_mdev.py @@ -0,0 +1,73 @@ +import os +import logging +from avocado.core import data_dir +from avocado.core.exceptions import TestError +from provider.vfio import ccw +from virttest.libvirt_xml.vm_xml import VMXML +from virttest.utils_misc import cmd_status_output +from virttest import storage + +def disk_for_import(vmxml): + """ + Returns the absolute path to a disk image for import. + Assume the boot image is the first disk and an image file. + + :param vmxml: VMXML instance + """ + disks = vmxml.get_disk_all() + disk_list = list(disks.values()) + first_disk = disk_list[0] + return first_disk.find('source').get('file') + + +def mdev_nodedev_for(mdev_type): + """ creates and returns name of a nodedev of type mdev_type """ + return "mdev_59ce75a4_7419_4426_8689_8d0c2002f23c_0_0_26aa" + + +def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): + """ Runs virt-install with hostdev""" + cmd = ("virt-install --import --name %s" + " --hostdev %s,%s" + " --disk %s" + " --vcpus 2 --memory 2048" + " --nographics --noautoconsole" % + (vm_name, mdev_nodedev, target_address, disk_path)) + err, out = cmd_status_output(cmd, shell=True, verbose=True) + if err: + raise TestError("Couldn't install vm with hostdev: %s" % out) + + +def target_address_for(address_type): + """ returns a valid target device address """ + return "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" + + +def run(test, params, env): + """ + Confirm that a mediated device can be used by virt-install. + For this we import a disk we know will boot + and check the result inside the guest. + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + vmxml = VMXML.new_from_inactive_dumpxml(vm_name) + mdev_type = params.get("mdev_type", "vfio_ccw-io") + address_type = params.get("address_type", "ccw") + + try: + + vm.undefine() + disk = disk_for_import(vmxml) + mdev_nodedev = mdev_nodedev_for(mdev_type) + target_address = target_address_for(address_type) + virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk) + + session = vm.wait_for_login() + _, out = cmd_status_output("lscss", session=session, + shell=True, verbose=True) + logging.debug("SMIT: %s" % out) + + finally: + vmxml.sync() diff --git a/virttools/tests/src/virt_install/s390x_hostdev.py b/virttools/tests/src/virt_install/s390x_hostdev.py deleted file mode 100644 index 4d304e2e86..0000000000 --- a/virttools/tests/src/virt_install/s390x_hostdev.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import logging -from avocado.core import data_dir -from virttest.utils_zcrypt import CryptoDeviceInfoBuilder, \ - APMaskHelper, load_vfio_ap, unload_vfio_ap -from provider.vfio import ccw -from uuid import uuid4 -from virttest import storage - -def run(test, env, params): - disk = storage.get_image_filename_filesytem(env, data_dir.get_data_dir())) - - pass - -def __fake_run(test, env, params): - """ - Import machine with supported --hostdevs on s390x. - Start the machine and confirm the passthrough. - """ - - ccw.assure_preconditions() - schid, chpids = ccw.get_device_info() - uuid = str(uuid4()) - ccw.set_override(schid) - ccw.start_device(uuid, schid) - - - load_vfio_ap() - info = CryptoDeviceInfoBuilder.get() - - devices = [info.domains[0]] - mask_helper = APMaskHelper.from_infos(devices) - matrix_dev = MatrixDevice.from_infos(devices) - - result = vires.nodedev_list(cap="mdev", debug=True) - logging.debug(result, str(result)) - - """ - vm.start() - - if not ccw.device_is_listed(session, chpids): - test.fail("CCW device not listed") - """ - try: - pass - finally: - if matrix_dev: - matrix_dev.unassign_all() - if mask_helper: - mask_helper.unassign_all() - unload_vfio_ap() - if uuid: - ccw.stop_device(uuid) - if schid: - ccw.unset_override(schid) From 1ffc580449644a4c8efca6759947a9906f440561 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 7 Dec 2021 12:07:34 -0500 Subject: [PATCH 52/58] virttools: add passing hostdev mdev test inspektor passes, need to check pylint Signed-off-by: Sebastian Mitterle --- provider/vfio/ccw.py | 15 ++ .../tests/cfg/virt_install/hostdev_mdev.cfg | 1 - .../tests/src/virt_install/hostdev_mdev.py | 139 +++++++++++++++--- 3 files changed, 134 insertions(+), 21 deletions(-) diff --git a/provider/vfio/ccw.py b/provider/vfio/ccw.py index c489c8e1c8..f548e98a2e 100644 --- a/provider/vfio/ccw.py +++ b/provider/vfio/ccw.py @@ -137,6 +137,21 @@ def mount(session): raise TestError("Couldn't mount partition. %s" % out) +def set_device_offline(device_id, session=None): + """ + Sets device offline + + :param device_id: cssid.ssid.devno, e.g. 0.0.560a + :param session: guest session, command is run on host if None + :raises TestError: if the device can't be set online + """ + + cmd = "chccwdev -d %s" % device_id + err, out = cmd_status_output(cmd, shell=True, session=session) + if err: + raise TestError("Could not set device offline. %s" % out) + + def set_device_online(device_id, session=None): """ Sets device online diff --git a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg index 90ccc0fe3b..747e5cd64c 100644 --- a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg +++ b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg @@ -4,4 +4,3 @@ - check_present_inside_guest: only s390-virtio mdev_type = vfio_ccw-io - address_type = ccw diff --git a/virttools/tests/src/virt_install/hostdev_mdev.py b/virttools/tests/src/virt_install/hostdev_mdev.py index 26250fde68..37964b5cc5 100644 --- a/virttools/tests/src/virt_install/hostdev_mdev.py +++ b/virttools/tests/src/virt_install/hostdev_mdev.py @@ -1,11 +1,106 @@ -import os -import logging -from avocado.core import data_dir +from uuid import uuid4 from avocado.core.exceptions import TestError +from avocado.core.exceptions import TestFail from provider.vfio import ccw from virttest.libvirt_xml.vm_xml import VMXML from virttest.utils_misc import cmd_status_output -from virttest import storage +from virttest import virsh + + +class MdevHandler(object): + """ Base class for mdev type specific implementations """ + + def create_nodedev(self): + """ Creates the mdev and returns its name """ + raise NotImplementedError() + + def get_target_address(self): + """ Returns a target address to use for hostdev """ + raise NotImplementedError() + + def check_device_present_inside_guest(self, session): + """ + Checks if the host device is present inside the guest + + :param session: guest session + """ + raise NotImplementedError() + + def clean_up(self): + """ Stops the mediated device and returns resources to the host """ + raise NotImplementedError() + + @staticmethod + def from_type(mdev_type): + """ + Creates implementing instance for mdev_type + + :param mdev_type: The mediated device type as by nodedev API + """ + if mdev_type == "vfio_ccw-io": + return CcwMdevHandler() + else: + raise TestError("Test doesn't know how to handle %s." % mdev_type) + + +class CcwMdevHandler(MdevHandler): + """ Class implementing test methods for vfio_ccw-io """ + + def __init__(self): + self.uuid = None + self.chpids = None + self.schid = None + self.target_address = None + self.expected_device_address = None + self.device_id = None + + def create_nodedev(self): + """ + Creates a mediated device of a specific type + and returns its name from libvirt. + """ + self.schid, self.chpids = ccw.get_device_info() + self.device_id, _ = ccw.get_first_device_identifiers(self.chpids, None) + ccw.set_override(self.schid) + self.uuid = str(uuid4()) + ccw.start_device(self.uuid, self.schid) + + return first_mdev_nodedev_name() + + def get_target_address(self): + """ + Returns a valid target device address + + :param address_type: guest device address type + """ + self.target_address = "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" + self.expected_device_address = "0.0.1111" + return self.target_address + + def check_device_present_inside_guest(self, session): + """ + Fails the test if the device can't be found inside the guest. + + :param session: guest session + :raises: TestFail if device not found + """ + device, _ = ccw.get_first_device_identifiers(self.chpids, session) + if not device == self.expected_device_address: + raise TestFail("Couldn't find device inside guest." + "Expected address %s, found %s." % + (self.expected_device_address, device)) + + def clean_up(self): + """ + Returns the mdev resources to the host. + """ + if self.uuid: + ccw.stop_device(self.uuid) + if self.schid: + ccw.unset_override(self.schid) + if self.device_id: + ccw.set_device_offline(self.device_id) + def disk_for_import(vmxml): """ @@ -20,9 +115,14 @@ def disk_for_import(vmxml): return first_disk.find('source').get('file') -def mdev_nodedev_for(mdev_type): - """ creates and returns name of a nodedev of type mdev_type """ - return "mdev_59ce75a4_7419_4426_8689_8d0c2002f23c_0_0_26aa" +def first_mdev_nodedev_name(): + """ Returns the first nodedev of type mdev known to libvirt """ + result = virsh.nodedev_list(cap="mdev", debug=True) + device_names = result.stdout.strip().splitlines() + if result.exit_status or len(device_names) == 0: + raise TestError("Couldn't create nodedev. %s. %s." % + (result.stderr, result.stdout)) + return device_names[0] def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): @@ -38,36 +138,35 @@ def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): raise TestError("Couldn't install vm with hostdev: %s" % out) -def target_address_for(address_type): - """ returns a valid target device address """ - return "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" - - def run(test, params, env): """ Confirm that a mediated device can be used by virt-install. - For this we import a disk we know will boot - and check the result inside the guest. + For this we import a disk we know will boot and check the + result inside the guest. + The mediated device is created by the test and assumed + to be the only mediated device in the test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) mdev_type = params.get("mdev_type", "vfio_ccw-io") - address_type = params.get("address_type", "ccw") + handler = None try: vm.undefine() + handler = MdevHandler.from_type(mdev_type) disk = disk_for_import(vmxml) - mdev_nodedev = mdev_nodedev_for(mdev_type) - target_address = target_address_for(address_type) + mdev_nodedev = handler.create_nodedev() + target_address = handler.get_target_address() + virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk) session = vm.wait_for_login() - _, out = cmd_status_output("lscss", session=session, - shell=True, verbose=True) - logging.debug("SMIT: %s" % out) + handler.check_device_present_inside_guest(session) finally: vmxml.sync() + if handler: + handler.clean_up() From d110b02ed335c765b076176e9bb6ce91effdba4d Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Fri, 8 Oct 2021 06:12:59 -0400 Subject: [PATCH 53/58] virttools: add type and first test Add new test type for virt tools on virt-manager repo: - virt-install - virt-clone - virt-xml - virt-manager Also, add first test for hostdev support of virt-install on s390x Signed-off-by: Sebastian Mitterle --- .../tests/cfg/virt_install/s390x_hostdev.cfg | 5 +++ .../tests/src/virt_install/s390x_hostdev.py | 45 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 virttools/tests/cfg/virt_install/s390x_hostdev.cfg create mode 100644 virttools/tests/src/virt_install/s390x_hostdev.py diff --git a/virttools/tests/cfg/virt_install/s390x_hostdev.cfg b/virttools/tests/cfg/virt_install/s390x_hostdev.cfg new file mode 100644 index 0000000000..51be7040cf --- /dev/null +++ b/virttools/tests/cfg/virt_install/s390x_hostdev.cfg @@ -0,0 +1,5 @@ +- s390x_hostdev: + type = s390x_hostdev + only s390-virtio + variants: + - check_present_inside_guest: diff --git a/virttools/tests/src/virt_install/s390x_hostdev.py b/virttools/tests/src/virt_install/s390x_hostdev.py new file mode 100644 index 0000000000..38ecd0bfba --- /dev/null +++ b/virttools/tests/src/virt_install/s390x_hostdev.py @@ -0,0 +1,45 @@ +import logging +from virttest.utils_zcrypt import CryptoDeviceInfoBuilder, \ + APMaskHelper, load_vfio_ap, unload_vfio_ap +from provider.vfio import ccw +from uuid import uuid4 + +def run(test, env, params): + """ + Import machine with supported --hostdevs on s390x. + Start the machine and confirm the passthrough. + """ + + ccw.assure_preconditions() + schid, chpids = ccw.get_device_info() + uuid = str(uuid4()) + ccw.set_override(schid) + ccw.start_device(uuid, schid) + + + load_vfio_ap() + info = CryptoDeviceInfoBuilder.get() + + devices = [info.domains[0]] + mask_helper = APMaskHelper.from_infos(devices) + matrix_dev = MatrixDevice.from_infos(devices) + + result = vires.nodedev_list(cap="mdev", debug=True) + logging.debug(result, str(result)) + + """ + vm.start() + + if not ccw.device_is_listed(session, chpids): + test.fail("CCW device not listed") + """ + finally: + if matrix_dev: + matrix_dev.unassign_all() + if mask_helper: + mask_helper.unassign_all() + unload_vfio_ap() + if uuid: + ccw.stop_device(uuid) + if schid: + ccw.unset_override(schid) From c79b09b35e48713d0f5b5a9012ee2875db0bc0d5 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 16 Nov 2021 12:33:02 -0500 Subject: [PATCH 54/58] Determine image path Signed-off-by: Sebastian Mitterle --- virttools/tests/src/virt_install/s390x_hostdev.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/virttools/tests/src/virt_install/s390x_hostdev.py b/virttools/tests/src/virt_install/s390x_hostdev.py index 38ecd0bfba..4d304e2e86 100644 --- a/virttools/tests/src/virt_install/s390x_hostdev.py +++ b/virttools/tests/src/virt_install/s390x_hostdev.py @@ -1,10 +1,18 @@ +import os import logging +from avocado.core import data_dir from virttest.utils_zcrypt import CryptoDeviceInfoBuilder, \ APMaskHelper, load_vfio_ap, unload_vfio_ap from provider.vfio import ccw from uuid import uuid4 +from virttest import storage def run(test, env, params): + disk = storage.get_image_filename_filesytem(env, data_dir.get_data_dir())) + + pass + +def __fake_run(test, env, params): """ Import machine with supported --hostdevs on s390x. Start the machine and confirm the passthrough. @@ -33,6 +41,8 @@ def run(test, env, params): if not ccw.device_is_listed(session, chpids): test.fail("CCW device not listed") """ + try: + pass finally: if matrix_dev: matrix_dev.unassign_all() From c3d2a3309882b3377b845d239454614f0879ac6e Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 7 Dec 2021 10:31:47 -0500 Subject: [PATCH 55/58] virt_install: test with fix mdev nodedev and manual log inspection Test case currently runs and can confirm in lscss output nodedev has been attached correctly TODO: 1. create nodedev inside code (same as for lbvirt-ccw_passthrough) 2. test if attached at expected address Signed-off-by: Sebastian Mitterle --- .../tests/cfg/virt_install/hostdev_mdev.cfg | 7 ++ .../tests/cfg/virt_install/s390x_hostdev.cfg | 5 -- .../tests/src/virt_install/hostdev_mdev.py | 73 +++++++++++++++++++ .../tests/src/virt_install/s390x_hostdev.py | 55 -------------- 4 files changed, 80 insertions(+), 60 deletions(-) create mode 100644 virttools/tests/cfg/virt_install/hostdev_mdev.cfg delete mode 100644 virttools/tests/cfg/virt_install/s390x_hostdev.cfg create mode 100644 virttools/tests/src/virt_install/hostdev_mdev.py delete mode 100644 virttools/tests/src/virt_install/s390x_hostdev.py diff --git a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg new file mode 100644 index 0000000000..90ccc0fe3b --- /dev/null +++ b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg @@ -0,0 +1,7 @@ +- virt_install.hostdev.mdev: + type = hostdev_mdev + variants: + - check_present_inside_guest: + only s390-virtio + mdev_type = vfio_ccw-io + address_type = ccw diff --git a/virttools/tests/cfg/virt_install/s390x_hostdev.cfg b/virttools/tests/cfg/virt_install/s390x_hostdev.cfg deleted file mode 100644 index 51be7040cf..0000000000 --- a/virttools/tests/cfg/virt_install/s390x_hostdev.cfg +++ /dev/null @@ -1,5 +0,0 @@ -- s390x_hostdev: - type = s390x_hostdev - only s390-virtio - variants: - - check_present_inside_guest: diff --git a/virttools/tests/src/virt_install/hostdev_mdev.py b/virttools/tests/src/virt_install/hostdev_mdev.py new file mode 100644 index 0000000000..26250fde68 --- /dev/null +++ b/virttools/tests/src/virt_install/hostdev_mdev.py @@ -0,0 +1,73 @@ +import os +import logging +from avocado.core import data_dir +from avocado.core.exceptions import TestError +from provider.vfio import ccw +from virttest.libvirt_xml.vm_xml import VMXML +from virttest.utils_misc import cmd_status_output +from virttest import storage + +def disk_for_import(vmxml): + """ + Returns the absolute path to a disk image for import. + Assume the boot image is the first disk and an image file. + + :param vmxml: VMXML instance + """ + disks = vmxml.get_disk_all() + disk_list = list(disks.values()) + first_disk = disk_list[0] + return first_disk.find('source').get('file') + + +def mdev_nodedev_for(mdev_type): + """ creates and returns name of a nodedev of type mdev_type """ + return "mdev_59ce75a4_7419_4426_8689_8d0c2002f23c_0_0_26aa" + + +def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): + """ Runs virt-install with hostdev""" + cmd = ("virt-install --import --name %s" + " --hostdev %s,%s" + " --disk %s" + " --vcpus 2 --memory 2048" + " --nographics --noautoconsole" % + (vm_name, mdev_nodedev, target_address, disk_path)) + err, out = cmd_status_output(cmd, shell=True, verbose=True) + if err: + raise TestError("Couldn't install vm with hostdev: %s" % out) + + +def target_address_for(address_type): + """ returns a valid target device address """ + return "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" + + +def run(test, params, env): + """ + Confirm that a mediated device can be used by virt-install. + For this we import a disk we know will boot + and check the result inside the guest. + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + vmxml = VMXML.new_from_inactive_dumpxml(vm_name) + mdev_type = params.get("mdev_type", "vfio_ccw-io") + address_type = params.get("address_type", "ccw") + + try: + + vm.undefine() + disk = disk_for_import(vmxml) + mdev_nodedev = mdev_nodedev_for(mdev_type) + target_address = target_address_for(address_type) + virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk) + + session = vm.wait_for_login() + _, out = cmd_status_output("lscss", session=session, + shell=True, verbose=True) + logging.debug("SMIT: %s" % out) + + finally: + vmxml.sync() diff --git a/virttools/tests/src/virt_install/s390x_hostdev.py b/virttools/tests/src/virt_install/s390x_hostdev.py deleted file mode 100644 index 4d304e2e86..0000000000 --- a/virttools/tests/src/virt_install/s390x_hostdev.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import logging -from avocado.core import data_dir -from virttest.utils_zcrypt import CryptoDeviceInfoBuilder, \ - APMaskHelper, load_vfio_ap, unload_vfio_ap -from provider.vfio import ccw -from uuid import uuid4 -from virttest import storage - -def run(test, env, params): - disk = storage.get_image_filename_filesytem(env, data_dir.get_data_dir())) - - pass - -def __fake_run(test, env, params): - """ - Import machine with supported --hostdevs on s390x. - Start the machine and confirm the passthrough. - """ - - ccw.assure_preconditions() - schid, chpids = ccw.get_device_info() - uuid = str(uuid4()) - ccw.set_override(schid) - ccw.start_device(uuid, schid) - - - load_vfio_ap() - info = CryptoDeviceInfoBuilder.get() - - devices = [info.domains[0]] - mask_helper = APMaskHelper.from_infos(devices) - matrix_dev = MatrixDevice.from_infos(devices) - - result = vires.nodedev_list(cap="mdev", debug=True) - logging.debug(result, str(result)) - - """ - vm.start() - - if not ccw.device_is_listed(session, chpids): - test.fail("CCW device not listed") - """ - try: - pass - finally: - if matrix_dev: - matrix_dev.unassign_all() - if mask_helper: - mask_helper.unassign_all() - unload_vfio_ap() - if uuid: - ccw.stop_device(uuid) - if schid: - ccw.unset_override(schid) From df8e216bcc20af8cb5ac0e381470b54981b36c15 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 7 Dec 2021 12:07:34 -0500 Subject: [PATCH 56/58] virttools: add passing hostdev mdev test inspektor passes, need to check pylint Signed-off-by: Sebastian Mitterle --- provider/vfio/ccw.py | 15 ++ .../tests/cfg/virt_install/hostdev_mdev.cfg | 1 - .../tests/src/virt_install/hostdev_mdev.py | 139 +++++++++++++++--- 3 files changed, 134 insertions(+), 21 deletions(-) diff --git a/provider/vfio/ccw.py b/provider/vfio/ccw.py index 315f2e5fcc..29053680a9 100644 --- a/provider/vfio/ccw.py +++ b/provider/vfio/ccw.py @@ -141,6 +141,21 @@ def mount(session): raise TestError("Couldn't mount partition. %s" % out) +def set_device_offline(device_id, session=None): + """ + Sets device offline + + :param device_id: cssid.ssid.devno, e.g. 0.0.560a + :param session: guest session, command is run on host if None + :raises TestError: if the device can't be set online + """ + + cmd = "chccwdev -d %s" % device_id + err, out = cmd_status_output(cmd, shell=True, session=session) + if err: + raise TestError("Could not set device offline. %s" % out) + + def set_device_online(device_id, session=None): """ Sets device online diff --git a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg index 90ccc0fe3b..747e5cd64c 100644 --- a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg +++ b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg @@ -4,4 +4,3 @@ - check_present_inside_guest: only s390-virtio mdev_type = vfio_ccw-io - address_type = ccw diff --git a/virttools/tests/src/virt_install/hostdev_mdev.py b/virttools/tests/src/virt_install/hostdev_mdev.py index 26250fde68..37964b5cc5 100644 --- a/virttools/tests/src/virt_install/hostdev_mdev.py +++ b/virttools/tests/src/virt_install/hostdev_mdev.py @@ -1,11 +1,106 @@ -import os -import logging -from avocado.core import data_dir +from uuid import uuid4 from avocado.core.exceptions import TestError +from avocado.core.exceptions import TestFail from provider.vfio import ccw from virttest.libvirt_xml.vm_xml import VMXML from virttest.utils_misc import cmd_status_output -from virttest import storage +from virttest import virsh + + +class MdevHandler(object): + """ Base class for mdev type specific implementations """ + + def create_nodedev(self): + """ Creates the mdev and returns its name """ + raise NotImplementedError() + + def get_target_address(self): + """ Returns a target address to use for hostdev """ + raise NotImplementedError() + + def check_device_present_inside_guest(self, session): + """ + Checks if the host device is present inside the guest + + :param session: guest session + """ + raise NotImplementedError() + + def clean_up(self): + """ Stops the mediated device and returns resources to the host """ + raise NotImplementedError() + + @staticmethod + def from_type(mdev_type): + """ + Creates implementing instance for mdev_type + + :param mdev_type: The mediated device type as by nodedev API + """ + if mdev_type == "vfio_ccw-io": + return CcwMdevHandler() + else: + raise TestError("Test doesn't know how to handle %s." % mdev_type) + + +class CcwMdevHandler(MdevHandler): + """ Class implementing test methods for vfio_ccw-io """ + + def __init__(self): + self.uuid = None + self.chpids = None + self.schid = None + self.target_address = None + self.expected_device_address = None + self.device_id = None + + def create_nodedev(self): + """ + Creates a mediated device of a specific type + and returns its name from libvirt. + """ + self.schid, self.chpids = ccw.get_device_info() + self.device_id, _ = ccw.get_first_device_identifiers(self.chpids, None) + ccw.set_override(self.schid) + self.uuid = str(uuid4()) + ccw.start_device(self.uuid, self.schid) + + return first_mdev_nodedev_name() + + def get_target_address(self): + """ + Returns a valid target device address + + :param address_type: guest device address type + """ + self.target_address = "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" + self.expected_device_address = "0.0.1111" + return self.target_address + + def check_device_present_inside_guest(self, session): + """ + Fails the test if the device can't be found inside the guest. + + :param session: guest session + :raises: TestFail if device not found + """ + device, _ = ccw.get_first_device_identifiers(self.chpids, session) + if not device == self.expected_device_address: + raise TestFail("Couldn't find device inside guest." + "Expected address %s, found %s." % + (self.expected_device_address, device)) + + def clean_up(self): + """ + Returns the mdev resources to the host. + """ + if self.uuid: + ccw.stop_device(self.uuid) + if self.schid: + ccw.unset_override(self.schid) + if self.device_id: + ccw.set_device_offline(self.device_id) + def disk_for_import(vmxml): """ @@ -20,9 +115,14 @@ def disk_for_import(vmxml): return first_disk.find('source').get('file') -def mdev_nodedev_for(mdev_type): - """ creates and returns name of a nodedev of type mdev_type """ - return "mdev_59ce75a4_7419_4426_8689_8d0c2002f23c_0_0_26aa" +def first_mdev_nodedev_name(): + """ Returns the first nodedev of type mdev known to libvirt """ + result = virsh.nodedev_list(cap="mdev", debug=True) + device_names = result.stdout.strip().splitlines() + if result.exit_status or len(device_names) == 0: + raise TestError("Couldn't create nodedev. %s. %s." % + (result.stderr, result.stdout)) + return device_names[0] def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): @@ -38,36 +138,35 @@ def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): raise TestError("Couldn't install vm with hostdev: %s" % out) -def target_address_for(address_type): - """ returns a valid target device address """ - return "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" - - def run(test, params, env): """ Confirm that a mediated device can be used by virt-install. - For this we import a disk we know will boot - and check the result inside the guest. + For this we import a disk we know will boot and check the + result inside the guest. + The mediated device is created by the test and assumed + to be the only mediated device in the test environment. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) vmxml = VMXML.new_from_inactive_dumpxml(vm_name) mdev_type = params.get("mdev_type", "vfio_ccw-io") - address_type = params.get("address_type", "ccw") + handler = None try: vm.undefine() + handler = MdevHandler.from_type(mdev_type) disk = disk_for_import(vmxml) - mdev_nodedev = mdev_nodedev_for(mdev_type) - target_address = target_address_for(address_type) + mdev_nodedev = handler.create_nodedev() + target_address = handler.get_target_address() + virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk) session = vm.wait_for_login() - _, out = cmd_status_output("lscss", session=session, - shell=True, verbose=True) - logging.debug("SMIT: %s" % out) + handler.check_device_present_inside_guest(session) finally: vmxml.sync() + if handler: + handler.clean_up() From 69b0509df16e86be464ccd9cebaa0487885070dd Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 7 Dec 2021 12:17:23 -0500 Subject: [PATCH 57/58] virt-install: fix tear down Signed-off-by: Sebastian Mitterle --- virttools/tests/src/virt_install/hostdev_mdev.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/virttools/tests/src/virt_install/hostdev_mdev.py b/virttools/tests/src/virt_install/hostdev_mdev.py index 37964b5cc5..2ce7a2e238 100644 --- a/virttools/tests/src/virt_install/hostdev_mdev.py +++ b/virttools/tests/src/virt_install/hostdev_mdev.py @@ -1,3 +1,4 @@ +from time import sleep from uuid import uuid4 from avocado.core.exceptions import TestError from avocado.core.exceptions import TestFail @@ -98,6 +99,9 @@ def clean_up(self): ccw.stop_device(self.uuid) if self.schid: ccw.unset_override(self.schid) + # need to sleep to avoid issue with setting device offline + # adding a wait_for would likely be more complicated + sleep(1) if self.device_id: ccw.set_device_offline(self.device_id) From 46c00770925f2ac7273eb5602ce1cfd3bd01c5e0 Mon Sep 17 00:00:00 2001 From: Sebastian Mitterle Date: Tue, 7 Dec 2021 12:25:42 -0500 Subject: [PATCH 58/58] spelling: fix some parameter docstrings and add exceptions to spelling ignore file Signed-off-by: Sebastian Mitterle --- provider/vfio/ccw.py | 8 +- spell.ignore | 343 ++++++++++++++++++++++--------------------- 2 files changed, 179 insertions(+), 172 deletions(-) diff --git a/provider/vfio/ccw.py b/provider/vfio/ccw.py index 29053680a9..067bdeda9d 100644 --- a/provider/vfio/ccw.py +++ b/provider/vfio/ccw.py @@ -25,7 +25,7 @@ def read_write_operations_work(session, chpids, makefs=True): Per default the device gets a new filesystem setup. :param session: logged in guest session - :param chipds: string representing CHPIDs, e.g. 11122122 + :param chpids: string representing CHPIDs, e.g. 11122122 :param makefs: if False, the device is expected to have a valid filesystem already :return: True on success @@ -175,7 +175,7 @@ def get_first_device_identifiers(chpids, session): """ Gets the usual device identifier cssid.ssid.devno - :param chpids: chipids where the disk is connected, e.g. "11122122" + :param chpids: chpids where the disk is connected, e.g. "11122122" :param session: guest session :return: Pair of strings, "cssid.ssid.devno" "cssid.ssid.schid" :raises TestError: if the device can't be found inside guest @@ -186,7 +186,7 @@ def get_first_device_identifiers(chpids, session): devices_inside_guest = [x for x in paths.devices if x[paths.HEADER["CHPIDs"]] == chpids] if not devices_inside_guest: - raise TestError("Device with chipds %s wasn't" + raise TestError("Device with chpids %s wasn't" " found inside guest" % chpids) first = devices_inside_guest[0] return first[paths.HEADER["Device"]], first[paths.HEADER["Subchan."]] @@ -198,7 +198,7 @@ def device_is_listed(session, chpids): path ids. :param session: guest console session - :param chipds: chpids where the disk is connected, e.g. "11122122" + :param chpids: chpids where the disk is connected, e.g. "11122122" :return: True if device is listed """ diff --git a/spell.ignore b/spell.ignore index 92b0f59aa3..b60e40ad3f 100644 --- a/spell.ignore +++ b/spell.ignore @@ -1,20 +1,170 @@ -aa ABI +AES +Adapte +Analyse +Auth +Autostart +BZ +Backend +Blockcopy +Blockjob +CCW +CN +Ceph +Cgroup +Chunfu +Ckeck +Cleaup +CmdError +Codec +Coldplug +Coldunplug +Config +DAC +DAX +DMI +DNS +Dompmwakeup +EOI +ESC +ESX +Esx +FIXIT +FS +Fibre +Filt +Getlink +GiB +Gib +Gluster +Guestfish +Guestname +HBA +HPT +Hotplug +Hotplugged +Hotunplug +Hugepage +IDE +IOV +IP +IPs +Infiniband +Init +Iscsi +KSM +KVER +KVM +KiB +LUN +LXC +Libvirt +Lifecycle +Luks +MBs +MCS +MLS +MMU +Makesure +Managedsave +Maskable +Maxmem +Mbps +Metabytes +MiB +Miryala +Mitterle +Multicast +NFS +NMI +NUL +Nodedev +Normalise +Numa +OSP +OVS +Occupt +PCI +PF's +PFs +PID +PPC +Passthrough +Poweroff +Poweron +Pre +Prepareguest +Prepend +Prudhvi +QMP +QXL +RBD +RFE +RHV +RNG +RTC +Radix +Readlink +Refall +Reinstall +RemoteFile +Repeate +Resize +SMT +SSL +STP +SVE +Seabios +Setlink +Sparsify +Spinlock +Sriov +Stdout +Str +Subnet +SysRq +TCP +TLS +TODO +TPM +TestError +TestFail +Testcase +Tpm +Tuple +URI +UTC +Uefi +Unbridge +Unclassifed +Uncompress +Undefine +Unmark +Unprotect +Unregister +Unsets +Updae +VF's +VM +VMs +Virtio +XEN +Xen +Yalan +Zhang +aa accel acl acpi acpiphp -Adapte addr addrss -AES aexpect afeter aggregater agregators amd analyse -Analyse ap api args @@ -28,17 +178,14 @@ augeas augpath ausearch auth -Auth autodestroy autogenerated autologin automake autoport autostart -Autostart autotest backend -Backend backgroup backingchain backingstore @@ -53,20 +200,18 @@ binded bindwidth blk blkdev +blkdev's blkdevio blkdeviotune -blkdev's blkid blkio blkiotune blockcommit blockcopy -Blockcopy blockdev blockdevs blockjcopy blockjob -Blockjob blockjobs blockpull blockresize @@ -86,23 +231,20 @@ breakpoint btrfs buf bz -BZ cachetune +calc cartesian ccw -CCW cd cdrom cellno ceph -Ceph cfg cfq cgconfig cgget cgname cgroup -Cgroup cgstop chardev chardevs @@ -112,37 +254,29 @@ chipset chmod chnaged chown +chpids chronyc -Chunfu chwen cid cidr -Ckeck clas classname -Cleaup cli clocksource clocksources cloop cmd -CmdError cmdline cmds -CN codec -Codec codeset coldplug -Coldplug -Coldunplug commandline compat compcache condn conf config -Config configs coredump corescount @@ -162,11 +296,11 @@ ctrl currentvcpu curvcpu dac -DAC darget dargs +dasd +dasda datetime -DAX dbus de deduplicate @@ -176,10 +310,10 @@ desc dest desturi dev +dev's devname devno devnode -dev's devs df dhcp @@ -196,12 +330,10 @@ distros dmesg dmg dmi -DMI dmidecode dn dname dns -DNS dnsmaq dnsmasq docstring @@ -215,6 +347,7 @@ domblkstat domblkthreshold domcapabilities domcontrol +domdirtyrate domdisplay domfsfreeze domfsinfo @@ -231,7 +364,6 @@ domjobinfo dommemstat domname dompmsuspend -Dompmwakeup domrename domstate domstats @@ -253,12 +385,8 @@ emulatorpin enospace env eo -EOI errmsg -ESC esx -Esx -ESX ethernet ethtool exe @@ -270,13 +398,13 @@ failover fallocate fc fd +fdasd fdisk fds fdset ffree fialed fibre -Fibre fileio filename filepath @@ -284,14 +412,12 @@ filesize filesystem filesystems filetransfer -Filt filterbinding filterref filterrfer findfs finename firewalld -FIXIT flushbufs fmt freecell @@ -300,7 +426,6 @@ freepages freezed frontend fs -FS fsck fsfreeze fstab @@ -317,23 +442,17 @@ gcc genid getconf getlink -Getlink getrandom getspeed gfxcard -Gib -GiB gid gluster -Gluster glusterfs guestfish -Guestfish guestfs guestinfo guestkallsyms guestmount -Guestname guestvcpus guset gzip @@ -341,7 +460,6 @@ happends hardcode hardcoded hba -HBA hba's hbas hd @@ -359,22 +477,17 @@ hostfile hostname hostnuma hotplug -Hotplug hotpluggable hotplugged -Hotplugged hotplugging hotswap hotunplug -Hotunplug hotunplugged hpt -HPT htb htm htp hugepage -Hugepage hugepages hugetlbfs hwinfo @@ -387,7 +500,6 @@ ibm ibvirtd icmp ide -IDE idmap ie iface @@ -400,9 +512,7 @@ imagelabel img increated indice -Infiniband init -Init initctl initramfs initscripts @@ -428,19 +538,15 @@ iothreadpins iothreads iothreadsched iothreadset -IOV iowait iozone ip -IP ipaddress iperf -IPs iptable iptables isa iscsi -Iscsi iscsid isdigit ishotplug @@ -461,13 +567,9 @@ keypresses keyvalue keywards keywrap -KiB kms ksm -KSM -KVER kvm -KVM kwargs lan lchown @@ -478,15 +580,13 @@ libosinfo libs libvird libvirt -Libvirt +libvirt's libvirtd -libvirtdconf libvirtd's -libvirt's +libvirtdconf libvirts libvrtd lifecycle -Lifecycle linux localhost localvm @@ -508,9 +608,7 @@ lstatlist lsusb lszdev luks -Luks lun -LUN lun's lv lvcreate @@ -523,31 +621,25 @@ lvs lvuuid lvxxx lxc -LXC lzop macvlan macvtap -Makesure +makefs managedsave -Managedsave managedsaved maskable -Maskable maxdowntime maximumvcpu maxmem -Maxmem maxvcpu maxvcpus mb -Mbps mbr -MBs mbytes mcast -MCS md mdev +mdevctl mem memballoon memhog @@ -557,25 +649,19 @@ memorytune memspec memtune messege -Metabytes metadata metadatacache -MiB middletotop migratable migrateuri mins -Miryala -Mitterle mkfifo mkfs mkfs'ed mknod mkswap mlock -MLS mmconfig -MMU mnt modprobe monitorxml @@ -590,7 +676,6 @@ msg mtu multi multicast -Multicast multifd multifunction multipath @@ -627,16 +712,13 @@ newdisk newvm newvol nfs -NFS nic nmcli -NMI nodecpumap nodecpustats nodedev -Nodedev -nodedevice nodedev's +nodedevice nodei nodeinfo nodememstats @@ -648,17 +730,14 @@ nogroup nonexist noqueue normalise -Normalise nosharepages npiv nr nss nsswitch ntfs -NUL num numa -Numa numactl numad numanode @@ -668,7 +747,6 @@ nvdimm nvram nwfilter objs -Occupt ok olddisk oldvm @@ -679,14 +757,12 @@ optionstr orinial os osinfo -OSP outrange overcommit overcommits ovf ovirt ovs -OVS ovsbridge pagesize params @@ -697,22 +773,17 @@ partnum partprobe parttype passthrough -Passthrough passwd pci -PCI +pci's pcie pcihole -pci's pem perf perl persistency pfs -PFs -PF's pid -PID pids pingable pkgs @@ -728,28 +799,21 @@ postcopy postfix postprocess poweroff -Poweroff -Poweron powerpc powershell ppc -PPC ppid pre -Pre pread preallocates preallocation precopy predicatable -Prepareguest -Prepend prepended priveledge proc propoer proto -Prudhvi ps pseries pts @@ -770,27 +834,18 @@ qed qem qemu qmp -QMP quiesce qxl -QXL qxldod -Radix rases rawio rbd -RBD rdm -Readlink readonly readwrite realtime redhat redirdev -Refall -Reinstall -RemoteFile -Repeate repo requiste rereadpt @@ -798,27 +853,22 @@ resctrl resettable resie resize -Resize resized resizing restared restorecon ret retlist -RFE rhel rhelx rhev rhsrvany rhv -RHV rng -RNG rom rootfs rrunner rsyslog -RTC rtype runnable runtest @@ -832,6 +882,7 @@ sasl sata scenaries schedinfo +schid scp scsi sd @@ -839,7 +890,6 @@ sda sdb sdx seabios -Seabios seclabel sectorsize securetty @@ -850,7 +900,6 @@ sendkey servcie servercert setlink -Setlink setmaxdowntime setmaxmem setmem @@ -869,33 +918,28 @@ smbios smitterl smm smt -SMT snaplist snapname snapshotname socketscount sourse -Sparsify sparsing spasified specifice spicevm spicevmc spinlock -Spinlock spinlocking spinlocks src srcuri sriov -Sriov srv sshd sshfs sshkeys ssid ssl -SSL stacktrace startup startupPolicy @@ -903,16 +947,13 @@ statvfs stderr stdin stdout -Stdout stdout's stdouts -stdouts storagepool storagevol stp -STP str -Str +stressapptest struct subchannel subclasses @@ -920,13 +961,11 @@ subcommand subelement sublement subnet -Subnet suboption subprocess substring subtests sve -SVE svirt swaplabel swtpm @@ -941,7 +980,6 @@ syslog syspath sysprep sysrq -SysRq systemctl systemd systemlink @@ -952,17 +990,13 @@ tartget tb tc tcp -TCP tcpdump's tcsd teardown tempfile testcase -Testcase testcases testdir -TestError -TestFail testxml tftp tgtd @@ -974,16 +1008,12 @@ timedatectl timestamp tis tls -TLS tlsconnection tmp tmpfile tmpfs -TODO topo tpm -Tpm -TPM tseg ttcp tty @@ -991,26 +1021,20 @@ ttyconsole tunap tunnelled tuple -Tuple txt typervisor ubuntu udev udp uefi -Uefi uid umask umount umounted un unbridge -Unbridge -Unclassifed uncompress -Uncompress undefine -Undefine undefines undefinesource undefining @@ -1022,22 +1046,17 @@ unixbench unlink unlinked unmap -Unmark unmount unordered unpaused unpluging unprotect -Unprotect -Unregister unresettable untar upadte -Updae uperf uptime uri -URI uris url usb @@ -1047,17 +1066,16 @@ username userserial usr utc -UTC utils uuid uuidgen valiable valut vcpu +vcpu's vcpucount vcpuinfo vcpupin -vcpu's vcpus vcpuset vd @@ -1071,11 +1089,10 @@ vdx vepa verfiy vf +vf's vfat vfio -vf's vfs -VF's vg vgamem vgcreate @@ -1099,7 +1116,6 @@ virsh virt virtadmin virtio -Virtio virtiofs virtiofsd virtlogd @@ -1111,17 +1127,15 @@ virtualization virtualport vlan vm -VM +vm'infomation +vm'name +vm's vmcheck vmchecker vmdk -vm'infomation vmlinuz -vm'name vmname -vm's vms -VMs vmtype vmware vmxml @@ -1153,8 +1167,6 @@ wwns wwpn xattr xen -Xen -XEN xfs xfstest xhci @@ -1167,10 +1179,5 @@ xxxx xxxxx xyz xz -Yalan yalzhang yyyy -Zhang -stressapptest -calc -domdirtyrate