diff --git a/libvirt/tests/cfg/bios/virsh_boot.cfg b/libvirt/tests/cfg/bios/virsh_boot.cfg index 210a8814a2..4c7a1f6857 100644 --- a/libvirt/tests/cfg/bios/virsh_boot.cfg +++ b/libvirt/tests/cfg/bios/virsh_boot.cfg @@ -183,7 +183,7 @@ template = "/usr/share/OVMF/OVMF_VARS.secboot.fd" uefi_device_bus = "sata" uefi_target_dev = "sda" - check_prompt = "error:.*has invalid signature" + check_prompt = ".*has invalid signature" || "'Verification failed: (0x1A) Security Violation'" - by_seabios: boot_type = "seabios" loader = "/usr/share/seabios/bios-256k.bin" diff --git a/libvirt/tests/cfg/daemon/daemon_functional.cfg b/libvirt/tests/cfg/daemon/daemon_functional.cfg index 104ee8cbf0..cbe4aee1a2 100644 --- a/libvirt/tests/cfg/daemon/daemon_functional.cfg +++ b/libvirt/tests/cfg/daemon/daemon_functional.cfg @@ -6,6 +6,29 @@ exit_time_tolerance = 1 pseries: exit_time_tolerance = 2 + variants: + - legacy_daemon: + require_modular_daemon = "no" + variants: + - libvirtd: + daemon_name = "libvirtd" + - modular_daemon: + require_modular_daemon = "yes" + variants: + - virtnetworkd: + daemon_name = "virtnetworkd" + - virtnodedevd: + daemon_name = "virtnodedevd" + - virtsecretd: + daemon_name = "virtsecretd" + - virtstoraged: + daemon_name = "virtstoraged" + - virtinterfaced: + daemon_name = "virtinterfaced" + - virtnwfilterd: + daemon_name = "virtnwfilterd" + - virtqemud: + daemon_name = "virtqemud" variants: - no_opt: - opt_help: @@ -20,11 +43,12 @@ libvirtd_arg = '--timeout 3' expected_exit_time = 3 - opt_timeout_3_live_vm: + only virtqemud, libvirtd libvirtd_arg = '--timeout 3' start_vm = yes - opt_config: - libvirtd_arg = '--config /tmp/virt-test.conf' - expected_config_path = '/tmp/virt-test.conf' + test_config = 'yes' + libvirtd_arg = '--config ' - opt_pid: libvirtd_arg = '--pid-file /tmp/virt-test.pid' expected_pid_path = '/tmp/virt-test.pid' diff --git a/libvirt/tests/cfg/migration/live_migration.cfg b/libvirt/tests/cfg/migration/live_migration.cfg index 43d0e1e2a6..f94d20ad02 100644 --- a/libvirt/tests/cfg/migration/live_migration.cfg +++ b/libvirt/tests/cfg/migration/live_migration.cfg @@ -36,6 +36,9 @@ - p2p_live_undefinesource: only domjobabort virsh_migrate_options = "--live --p2p --persistent --undefinesource --verbose" + - non_p2p_live_undefinesource: + only domjobabort + virsh_migrate_options = "--live --persistent --undefinesource --verbose" variants: - migrateuri: migrate_speed = 15 @@ -96,7 +99,8 @@ err_msg = 'operation aborted: migration out job: canceled by client' migrate_again_status_error = 'no' vm_state_after_abort = "{'source': 'running', 'target': 'nonexist'}" - migrate_speed = 10 + migrate_speed = 1 + migrate_speed_again = 20 check_local_port = 'yes' return_port = 'yes' action_during_mig = '[{"func": "libvirt_network.check_established", "after_event": "iteration: '1'", "func_param": 'params'}, {"func": "virsh.domjobabort", "after_event": "iteration: '1'", "func_param": "'%s' % params.get('migrate_main_vm')"}]' @@ -208,3 +212,10 @@ migrate_again = 'yes' migrate_again_status_error = 'no' action_during_mig = '[{"func": "virsh.destroy", "after_event": "iteration: '1'", "func_param": "'%s' % params.get('migrate_main_vm')"}]' + - migration_minport_occupied: + check_port = 'yes' + min_port = 49152 + qemu_conf_list = '["migration_port_min", "migration_port_max"]' + qemu_conf_path = '/etc/libvirt/qemu.conf' + migrate_speed = 10 + action_during_mig = '[{"func": "libvirt_network.check_established", "after_event": "iteration: '1'", "func_param": 'params'}]' diff --git a/libvirt/tests/cfg/migration/migrate_service_control.cfg b/libvirt/tests/cfg/migration/migrate_service_control.cfg new file mode 100644 index 0000000000..b06cde1f1e --- /dev/null +++ b/libvirt/tests/cfg/migration/migrate_service_control.cfg @@ -0,0 +1,46 @@ +- virsh.migrate_service_control: + type = migrate_service_control + migration_setup = "yes" + storage_type = 'nfs' + setup_local_nfs = 'yes' + disk_type = "file" + disk_source_protocol = "netfs" + mnt_path_name = ${nfs_mount_dir} + # Console output can only be monitored via virsh console output + only_pty = True + take_regular_screendumps = no + # Extra options to pass after + virsh_migrate_extra = "" + # SSH connection time out + ssh_timeout = 60 + # Local URI + virsh_migrate_connect_uri = "qemu:///system" + image_convert = 'no' + variants: + - without_postcopy: + postcopy_options = "" + variants: + - p2p_live: + virsh_migrate_options = "--live --p2p --persistent --verbose" + variants: + - kill_service: + kill_service = 'yes' + vm_state_after_abort = "{'source': 'running', 'target': 'nonexist'}" + migrate_speed = 10 + status_error = 'yes' + action_during_mig = '[{"func": "utils_misc.kill_service", "after_event": "migration-iteration", "before_event": "Suspended Migrated", "func_param": "params"}]' + migrate_again = 'yes' + migrate_again_status_error = 'no' + expected_image_ownership = 'qemu:qemu' + variants: + - kill_libvirtd_on_dst: + service_name = "libvirtd" + service_on_dst = "yes" + err_msg = 'client socket is closed' + - kill_qemu_on_dst: + service_name = "qemu-kvm" + service_on_dst = "yes" + err_msg = 'qemu unexpectedly closed the monitor|domain is no longer running' + - kill_libvirtd_on_src: + service_name = "libvirtd" + err_msg = 'End of file while reading data: Input/output error' diff --git a/libvirt/tests/cfg/migration/migrate_vm.cfg b/libvirt/tests/cfg/migration/migrate_vm.cfg index b4187fcb72..bd0ebc4925 100644 --- a/libvirt/tests/cfg/migration/migrate_vm.cfg +++ b/libvirt/tests/cfg/migration/migrate_vm.cfg @@ -758,7 +758,7 @@ target_image_name = "foo_bar_test.${target_image_format}" new_disk_source = "${target_image_path}/${target_image_name}" # create disk image based on ${nfs_mount_dir}/${image_name} both local and remote host - create_local_disk_backfile_cmd = "qemu-img create ${new_disk_source} -f qcow2 -b ${nfs_mount_dir}/" + create_local_disk_backfile_cmd = "qemu-img create ${new_disk_source} -f qcow2 -F ${target_image_format} -b ${nfs_mount_dir}/" create_remote_disk_backfile_cmd = "${create_local_disk_backfile_cmd}" # create disk image in the VM dd_image_count = 100000 diff --git a/libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg b/libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg new file mode 100644 index 0000000000..6080891484 --- /dev/null +++ b/libvirt/tests/cfg/migration/migrate_with_various_hostname.cfg @@ -0,0 +1,47 @@ +- virsh.migrate_with_various_hostname: + type = migrate_with_various_hostname + migration_setup = "yes" + storage_type = 'nfs' + setup_local_nfs = 'yes' + disk_type = "file" + disk_source_protocol = "netfs" + mnt_path_name = ${nfs_mount_dir} + # Console output can only be monitored via virsh console output + only_pty = True + take_regular_screendumps = no + # Extra options to pass after + virsh_migrate_extra = "" + # SSH connection time out + ssh_timeout = 60 + # Local URI + virsh_migrate_connect_uri = "qemu:///system" + image_convert = 'no' + variants: + - without_postcopy: + postcopy_options = "" + variants: + - non_p2p_live: + virsh_migrate_options = "--live --verbose" + - p2p_live: + virsh_migrate_options = "--live --p2p --verbose" + variants: + - set_src_and_dst_hostname: + set_src_and_dst_hostname = 'yes' + status_error = 'yes' + migrate_again = 'yes' + migrate_again_status_error = 'no' + virsh_migrate_extra_mig_again = "--migrateuri tcp://${migrate_dest_host}" + virsh_migrate_src_state = "shut off" + variants: + - short: + src_hostname = "srctest" + dst_hostname = "dsttest" + err_msg = 'Name or service not known' + - fqdn: + src_hostname = "src.test.redhat.com" + dst_hostname = "dst.test.redhat.com" + err_msg = 'Name or service not known' + - localhost: + src_hostname = "localhost.localdomain" + dst_hostname = "localhost.localdomain" + err_msg = 'hostname on destination resolved to localhost, but migration requires an FQDN' diff --git a/libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg b/libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg new file mode 100644 index 0000000000..4b57b6acd0 --- /dev/null +++ b/libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg @@ -0,0 +1,15 @@ +- virsh.remote_tls_multiple_certs: + type = remote_tls_multiple_certs + server_ip = ${remote_ip} + server_user = ${remote_user} + server_pwd = ${remote_pwd} + client_ip = ${local_ip} + client_user = root + client_pwd = ${local_pwd} + start_vm = "no" + port = "22" + tls_port = "16514" + variants: + - positive_testing: + err_msg = "The certificate hasn't got a known issuer" + diff --git a/libvirt/tests/cfg/remote_access/remote_with_unix.cfg b/libvirt/tests/cfg/remote_access/remote_with_unix.cfg index 44dc01b51e..b189fdb110 100644 --- a/libvirt/tests/cfg/remote_access/remote_with_unix.cfg +++ b/libvirt/tests/cfg/remote_access/remote_with_unix.cfg @@ -149,5 +149,7 @@ main_vm = "avocado-vt-vm1" status_error = "no" patterns_virsh_cmd = ".*authentication unavailable.*" + auth_pwd = "${local_pwd}" + error_pattern = 'System policy prevents management of local virtualized systems' polkit_pkla = "/etc/polkit-1/localauthority/50-local.d/polkit.pkla" polkit_pkla_cxt = "[Allow ${su_user} libvirt monitor permissions]\nIdentity=unix-user:${su_user}\nAction=org.libvirt.unix.monitor\nResultAny=yes\nResultInactive=yes\nResultActive=yes" diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg index 48803e0818..1eff5b236b 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_cpu_baseline.cfg @@ -24,6 +24,11 @@ vms = "avocado-vt-vm1" main_vm = "avocado-vt-vm1" config_guest = "yes" + - migratable: + only q35 + cpu_baseline_extra = '--migratable' + cpu_baseline_test_feature = "invtsc" + feature_exist = "no" - negative_test: status_error = "yes" variants: diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg index 5f1f491241..e1f178e13c 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_domjobinfo.cfg @@ -21,9 +21,14 @@ domjobinfo_action = "dump" variants: - live_dump: - dump_opt="--live" + dump_opt = "--live" - crash_dump: - dump_opt="--crash" + dump_opt = "--crash" + - keep_complete_test: + only running_state + only vm_name + dump_opt = "--live" + keep_complete = "yes" - save_action: domjobinfo_action = "save" - managedsave_action: diff --git a/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg b/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg index a7c07e7d93..ae04540e50 100644 --- a/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg +++ b/libvirt/tests/cfg/virsh_cmd/domain/virsh_event.cfg @@ -46,6 +46,10 @@ panic_model = 'isa' addr_type = 'isa' addr_iobase = '0x505' + - kill_qemu: + only test_events + events_list = "kill_qemu" + signal = 'SIGKILL' - reboot_event: event_name = "reboot" events_list = "reset" diff --git a/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg b/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg index 367c50dc13..63318918b0 100644 --- a/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg +++ b/libvirt/tests/cfg/virsh_cmd/network/virsh_net_update.cfg @@ -369,6 +369,11 @@ - add: only ip_dhcp_range update_command = "add" + - index_nonexist: + error_type = "index-nonexist" + parent_index = 1 + only ip_dhcp_range + update_command = "add" - delete_host_mismatch: only ip_dhcp_host error_type = "host-mismatch" diff --git a/libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg b/libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg new file mode 100755 index 0000000000..ca50219e0d --- /dev/null +++ b/libvirt/tests/cfg/virtual_disks/virtual_disks_ccw_addr.cfg @@ -0,0 +1,43 @@ +- virtual_disks.ccw_addr: + type = virtual_disks_ccw_addr + take_regular_screendumps = "no" + start_vm = "no" + target_bus = "virtio" + variants: + - start_vm: + variants: + - negative_test: + status_error = "yes" + define_error = "yes" + error_msg = "cannot use CCW address type for device" + variants: + - attach_no_exist_disk: + only coldplug + target_format = "raw" + type_name = "file" + target_dev = "vdb" + device_type = "disk" + driver_type = 'raw' + backend_device = "disk" + - attach_disk: + target_format = "raw" + virt_disk_device_source = "/var/lib/libvirt/images/test.img" + type_name = "file" + target_dev = "vdb" + device_type = "disk" + driver_type = 'raw' + backend_device = "disk" + addr_attrs = "{'type': 'ccw', 'cssid': '0x0', 'ssid': '0x0', 'devno': '0x0000'}" + - attach_controller: + backend_device = "controller" + addr_attrs = "{'type': 'ccw', 'cssid': '0x0', 'ssid': '0x0', 'devno': '0x0000'}" + - attach_rng: + backend_device = "rng" + backend_dev = "/dev/hwrng" + rng_model = "virtio" + addr_attrs = "{'type': 'ccw', 'cssid': '0x0', 'ssid': '0x0', 'devno': '0x0000'}" + variants: + - hotplug: + virt_device_hotplug = "yes" + - coldplug: + virt_device_hotplug = "no" diff --git a/libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg b/libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg new file mode 100644 index 0000000000..7af6a843cd --- /dev/null +++ b/libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg @@ -0,0 +1,6 @@ +- virtual_disks.dasd: + type = virtual_disks_dasd + only s390-virtio + variants: + - read_native_partition_table: + diff --git a/libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg b/libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg new file mode 100644 index 0000000000..634bda04c0 --- /dev/null +++ b/libvirt/tests/cfg/virtual_interface/domain_lifecycle.cfg @@ -0,0 +1,14 @@ +- iface.domain_lifecycle: + type = domain_lifecycle + start_vm = no + + variants dev_type: + - vdpa: + only x86_64 + func_supported_since_libvirt_ver = (7, 3, 0) + func_supported_since_qemu_kvm_ver = (6, 0, 0) + iface_dict = {"source": {'dev':'/dev/vhost-vdpa-0'}} + save_error = "yes" + variants test_target: + - simulator: + - mellanox: diff --git a/libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg b/libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg new file mode 100644 index 0000000000..9737882b57 --- /dev/null +++ b/libvirt/tests/cfg/virtual_interface/interface_hotplug.cfg @@ -0,0 +1,14 @@ +- interface_hotplug: + type = interface_hotplug + start_vm = no + repeat_times = 1 + + variants dev_type: + - vdpa: + only x86_64 + func_supported_since_libvirt_ver = (7, 3, 0) + func_supported_since_qemu_kvm_ver = (6, 0, 0) + iface_dict = {"source": {'dev':'/dev/vhost-vdpa-0'}} + variants test_target: + - simulator: + - mellanox: diff --git a/libvirt/tests/cfg/virtual_network/iface_network.cfg b/libvirt/tests/cfg/virtual_network/iface_network.cfg index 25ab47aaf7..0adadbc8eb 100644 --- a/libvirt/tests/cfg/virtual_network/iface_network.cfg +++ b/libvirt/tests/cfg/virtual_network/iface_network.cfg @@ -267,6 +267,13 @@ forward_iface = "eno1 eno2 eno2" net_forward = "{'mode':'bridge'}" define_error = "yes" + - direct_br_vlan: + create_network = "no" + change_iface_option = "yes" + iface_type = "direct" + iface_source = "{'dev':'eno1','mode':'bridge'}" + iface_vlan = "{'trunk': 'no', 'tags': [{'id': '47'}]}" + start_error = "yes" - net_bridge: change_iface_option = "yes" iface_source = "{'network':'nettest'}" diff --git a/libvirt/tests/src/controller/controller_functional.py b/libvirt/tests/src/controller/controller_functional.py index 0970687ec7..3c8b17d1b4 100644 --- a/libvirt/tests/src/controller/controller_functional.py +++ b/libvirt/tests/src/controller/controller_functional.py @@ -799,8 +799,9 @@ def check_multifunction(): if remove_nic: remove_devices(vm_xml, 'interface') # Get the max controller index in current vm xml + the_model = 'pci-root' if 'ppc' in platform.machine() else 'pcie-root-port' if add_contrl_list: - ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml, 'pci', 'pcie-root-port') + ret_indexes = libvirt_pcicontr.get_max_contr_indexes(vm_xml, 'pci', the_model) if ret_indexes and len(ret_indexes) > 0: if auto_bus: new_index = "0x%02x" % (int(ret_indexes[0]) + 1) diff --git a/libvirt/tests/src/daemon/daemon_functional.py b/libvirt/tests/src/daemon/daemon_functional.py index 314c6dceb4..b2be12a43b 100644 --- a/libvirt/tests/src/daemon/daemon_functional.py +++ b/libvirt/tests/src/daemon/daemon_functional.py @@ -6,9 +6,10 @@ from virttest import virsh from virttest import utils_config +from virttest import utils_split_daemons +from virttest import virt_admin from virttest.utils_libvirtd import LibvirtdSession from virttest.utils_libvirtd import Libvirtd -from virttest.libvirt_xml import capability_xml from virttest import libvirt_version @@ -42,7 +43,7 @@ def check_version(params): Check whether the output is libvirtd version. """ expected_version = params.get('expected_version', 'no') == 'yes' - is_version = log[0].startswith('{} (libvirt)'.format(Libvirtd().service_list[0])) + is_version = log[0].startswith('{} (libvirt)'.format(daemon_name)) if expected_version != is_version: test.fail( 'Expected output version is %s, but get output:\n%s' % @@ -76,7 +77,7 @@ def check_pid_file(): with open(pid_path) as pid_file: pid = int(pid_file.readline()) - result = process.run('pgrep %s' % Libvirtd().service_list[0], + result = process.run('pgrep %s' % daemon_name, ignore_status=True, shell=True) expected_pid = int(result.stdout_text.strip().split()[0]) @@ -86,12 +87,20 @@ def check_pid_file(): def check_config_file(): """ - Check whether the config file take effects by checking UUID. + Check whether the config file take effects by checking max_clients. """ - cur_uuid = capability_xml.CapabilityXML()['uuid'] - if cur_uuid != check_uuid: - test.fail('Expected host UUID is %s, but got %s' % - (check_uuid, cur_uuid)) + if daemon_name == "libvirtd": + connect_uri = "qemu:///system" + else: + connect_uri = daemon_name + ":///system" + vp = virt_admin.VirtadminPersistent(uri=connect_uri) + result = vp.srv_clients_info(daemon_name, uri=connect_uri, ignore_status=True, debug=True) + output = result.stdout.strip().splitlines() + out_split = [item.split(':') for item in output] + out_dict = dict([[item[0].strip(), item[1].strip()] for item in out_split]) + if int(out_dict["nclients_max"]) != check_max_clients: + test.fail('Expected max_clients is %s, but got %s' % + (check_max_clients, out_dict["nclients_max"])) MAX_TIMEOUT = 10 arg_str = params.get("libvirtd_arg", "") @@ -99,7 +108,11 @@ def check_config_file(): expected_exit_time = float(params.get("expected_exit_time", 'inf')) config_path = params.get('expected_config_path', "") pid_path = params.get('expected_pid_path', "") + daemon_name = params.get('daemon_name', "") + test_config = params.get('test_config', "no") == "yes" + require_modular_daemon = params.get('require_modular_daemon', "no") == "yes" + utils_split_daemons.daemon_mode_check(require_modular_daemon) if expected_exit_time == float('inf'): timeout = MAX_TIMEOUT else: @@ -109,21 +122,21 @@ def check_config_file(): 'there exists living domain') timeout = expected_exit_time + time_tolerance - libvirtd = LibvirtdSession( - logging_handler=_logger, - ) + libvirtd = LibvirtdSession(service_name=daemon_name, logging_handler=_logger) # Setup config file. - check_uuid = '13371337-1337-1337-1337-133713371337' - if config_path: - open(config_path, 'a').close() - config = utils_config.LibvirtdConfig(config_path) - config.host_uuid = check_uuid + check_max_clients = int(101) + if test_config: + config = utils_config.get_conf_obj(daemon_name) + logging.debug(config.conf_path) + config_path = config.conf_path + config.max_clients = check_max_clients + arg_str = arg_str + config_path try: check_unix_socket_files() - Libvirtd().stop() + Libvirtd(daemon_name).stop() libvirtd.start(arg_str=arg_str, wait_for_working=False) start = time.time() @@ -150,20 +163,19 @@ def check_config_file(): (expected_exit_time, time_tolerance, wait_time)) not libvirt_version.version_compare(5, 6, 0) and check_unix_socket_files() - if config_path: + if test_config: check_config_file() if pid_path: check_pid_file() finally: libvirtd.exit() - Libvirtd().stop() - Libvirtd("libvirtd.socket").restart() - Libvirtd().start() + Libvirtd(daemon_name).stop() + socket_name = daemon_name + ".socket" + Libvirtd(socket_name).restart() + Libvirtd(daemon_name).start() # Clean up config file - if config_path: + if test_config: config.restore() - if os.path.exists(config_path): - os.remove(config_path) if os.path.exists(pid_path): os.remove(pid_path) diff --git a/libvirt/tests/src/libvirtd_start.py b/libvirt/tests/src/libvirtd_start.py index bf7ee48412..2fa42b1a20 100644 --- a/libvirt/tests/src/libvirtd_start.py +++ b/libvirt/tests/src/libvirtd_start.py @@ -119,6 +119,7 @@ def _check_errors(): errors = [] # Run libvirt session and collect errors in log. libvirtd_session = utils_libvirtd.LibvirtdSession( + service_name="virtnetworkd", logging_handler=_error_handler, logging_params=(errors,), logging_pattern=r'[-\d]+ [.:+\d]+ [:\d]+ error :', @@ -143,6 +144,8 @@ def _check_errors(): _check_errors() finally: logging.info('Recovering services status') + #Restart socket service after starting process at foreground + utils_libvirtd.Libvirtd("virtnetworkd.socket").restart() # If service do not exists, then backup status and current status # will all be none and nothing will be done if service_mgr.status('iptables') != backup_iptables_status: diff --git a/libvirt/tests/src/migration/live_migration.py b/libvirt/tests/src/migration/live_migration.py index 2ee22dbb7d..aa23eafadb 100644 --- a/libvirt/tests/src/migration/live_migration.py +++ b/libvirt/tests/src/migration/live_migration.py @@ -143,6 +143,7 @@ def run(test, params, env): stress_package = params.get("stress_package") action_during_mig = params.get("action_during_mig") migrate_speed = params.get("migrate_speed") + migrate_speed_again = params.get("migrate_speed_again") migrate_again = "yes" == params.get("migrate_again", "no") vm_state_after_abort = params.get("vm_state_after_abort") return_port = "yes" == params.get("return_port", "no") @@ -153,10 +154,17 @@ def run(test, params, env): setup_tls = "yes" == params.get("setup_tls", "no") qemu_conf_dest = params.get("qemu_conf_dest", "{}") migrate_tls_force_default = "yes" == params.get("migrate_tls_force_default", "no") - server_params = {'server_ip': params.get("migrate_dest_host"), - 'server_user': params.get("remote_user", "root"), - 'server_pwd': params.get("migrate_dest_pwd")} poweroff_src_vm = "yes" == params.get("poweroff_src_vm", "no") + check_port = "yes" == params.get("check_port", "no") + server_ip = params.get("migrate_dest_host") + server_user = params.get("remote_user", "root") + server_pwd = params.get("migrate_dest_pwd") + server_params = {'server_ip': server_ip, + 'server_user': server_user, + 'server_pwd': server_pwd} + qemu_conf_list = eval(params.get("qemu_conf_list", "[]")) + qemu_conf_path = params.get("qemu_conf_path") + min_port = params.get("min_port") vm_session = None qemu_conf_remote = None @@ -181,6 +189,13 @@ def run(test, params, env): # Setup migrate_tls_force default value on local remove_key_local = libvirt_config.remove_key_in_conf(value_list, "qemu") + + if check_port: + server_params['file_path'] = qemu_conf_path + remove_key_remote = libvirt_config.remove_key_in_conf(qemu_conf_list, + "qemu", + remote_params=server_params) + # Update only remote qemu conf if qemu_conf_dest: qemu_conf_remote = libvirt_remote.update_remote_file( @@ -198,6 +213,13 @@ def run(test, params, env): else: remote_file_list.append(libvirt_disk.create_remote_disk_by_same_metadata(vm, params)) + if check_port: + # Create a remote runner + runner_on_target = remote_old.RemoteRunner(host=server_ip, + username=server_user, + password=server_pwd) + cmd = "nc -l -p %s &" % min_port + remote_old.run_remote_cmd(cmd, params, runner_on_target, ignore_status=False) if not vm.is_alive(): vm.start() @@ -214,8 +236,8 @@ def run(test, params, env): if stress_package: migration_test.run_stress_in_vm(vm, params) + mode = 'both' if '--postcopy' in postcopy_options else 'precopy' if migrate_speed: - mode = 'both' if '--postcopy' in postcopy_options else 'precopy' migration_test.control_migrate_speed(vm_name, int(migrate_speed), mode) @@ -230,6 +252,10 @@ def run(test, params, env): logging.debug("Migration returns function results:%s", func_returns) if return_port: port_used = get_used_port(func_returns) + if check_port: + port_used = get_used_port(func_returns) + if int(port_used) != int(min_port) + 1: + test.fail("Wrong port for migration.") if vm_state_after_abort: check_vm_state_after_abort(vm_name, vm_state_after_abort, @@ -253,6 +279,11 @@ def run(test, params, env): params, test)) + if migrate_speed_again: + migration_test.control_migrate_speed(vm_name, + int(migrate_speed_again), + mode) + migration_base.do_migration(vm, migration_test, None, dest_uri, options, virsh_options, extra, action_during_mig, diff --git a/libvirt/tests/src/migration/migrate_service_control.py b/libvirt/tests/src/migration/migrate_service_control.py new file mode 100644 index 0000000000..ee18c4a32b --- /dev/null +++ b/libvirt/tests/src/migration/migrate_service_control.py @@ -0,0 +1,176 @@ +import logging +import os + +from pwd import getpwuid +from grp import getgrgid + +from virttest import libvirt_vm +from virttest import migration +from virttest import virsh +from virttest import libvirt_version +from virttest import remote +from virttest import utils_libvirtd + +from virttest.libvirt_xml import vm_xml +from virttest.utils_test import libvirt + +from provider.migration import migration_base + + +def check_image_ownership(vm_name, exp_ownership, test): + """ + Check ownership of image + + :param vm_name: vm name + :param exp_ownership: the expected ownership + :param test: test object + """ + sourcelist = vm_xml.VMXML.get_disk_source(vm_name) + disk_source = sourcelist[0].find('source').get('file') + logging.debug("image file: %s" % disk_source) + image_ownership = "%s:%s" % (getpwuid(os.stat(disk_source).st_uid).pw_name, + getgrgid(os.stat(disk_source).st_gid).gr_name) + logging.debug("image ownership: %s" % image_ownership) + if image_ownership != exp_ownership: + test.fail("The ownership {} is not expected, it should be {}." + .format(image_ownership, exp_ownership)) + + +def check_vm_state_after_abort(vm_name, vm_state_after_abort, src_uri, dest_uri, test): + """ + Check the VM state after domjobabort the migration + + :param vm_name: str, vm name + :param vm_state_after_abort: str, like "{'source': 'running', 'target': 'nonexist'}" + source: local host, target: remote host + :param src_uri: uri for source host + :param dest_uri: uri for target host + :param test: test object + """ + state_dict = eval(vm_state_after_abort) + logging.debug("Check guest state should be {} on source host".format(state_dict['source'])) + libvirt.check_vm_state(vm_name, state=state_dict['source'], uri=src_uri) + logging.debug("Check guest persistent on source host") + cmd_res = virsh.domstats(vm_name, '--list-persistent', debug=True, ignore_status=False) + if not cmd_res.stdout_text.count(vm_name): + test.fail("The guest is expected to be persistent on source host, but it isn't") + logging.debug("Check guest state should be {} on target host".format(state_dict['target'])) + if state_dict['target'] == 'nonexist': + if virsh.domain_exists(vm_name, uri=dest_uri): + test.fail("The domain on target host is found, but expected not") + else: + libvirt.check_vm_state(vm_name, state=state_dict['target'], uri=dest_uri) + + +def run(test, params, env): + """ + Run the test + + :param test: test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + libvirt_version.is_libvirt_feature_supported(params) + + vm_name = params.get("migrate_main_vm") + vm = env.get_vm(vm_name) + vm.verify_alive() + bk_uri = vm.connect_uri + + migration_test = migration.MigrationTest() + migration_test.check_parameters(params) + extra_args = migration_test.update_virsh_migrate_extra_args(params) + + extra = params.get("virsh_migrate_extra") + postcopy_options = params.get("postcopy_options") + if postcopy_options: + extra = "%s %s" % (extra, postcopy_options) + params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( + params.get("migrate_dest_host")) + dest_uri = params.get("virsh_migrate_desturi") + options = params.get("virsh_migrate_options", + "--live --p2p --persistent --verbose") + virsh_options = params.get("virsh_options", "") + action_during_mig = params.get("action_during_mig") + migrate_speed = params.get("migrate_speed") + migrate_again = "yes" == params.get("migrate_again", "no") + vm_state_after_abort = params.get("vm_state_after_abort") + + kill_service = "yes" == params.get("kill_service", "no") + expected_image_ownership = params.get("expected_image_ownership") + service_name = params.get("service_name", "libvirtd") + service_on_dst = "yes" == params.get("service_on_dst", "no") + server_ip = params.get("remote_ip") + server_user = params.get("remote_user", "root") + server_pwd = params.get("remote_pwd") + + # For safety reasons, we'd better back up xmlfile. + new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + orig_config_xml = new_xml.copy() + + try: + # Update guest disk xml + libvirt.set_vm_disk(vm, params) + + logging.debug("Guest xml after starting:\n%s", + vm_xml.VMXML.new_from_dumpxml(vm_name)) + + vm.wait_for_login().close() + + if kill_service: + check_image_ownership(vm_name, expected_image_ownership, test) + if service_name == "libvirtd": + if service_on_dst: + remote_session = remote.wait_for_login('ssh', server_ip, '22', + server_user, server_pwd, + r"[\#\$]\s*$") + service_name = utils_libvirtd.Libvirtd(session=remote_session).service_name + remote_session.close() + else: + service_name = utils_libvirtd.Libvirtd().service_name + params.update({'service_name': service_name}) + + if migrate_speed: + mode = 'both' if '--postcopy' in postcopy_options else 'precopy' + migration_test.control_migrate_speed(vm_name, + int(migrate_speed), + mode) + + if action_during_mig: + action_during_mig = migration_base.parse_funcs(action_during_mig, + test, params) + + # Execute migration process + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, extra, + action_during_mig, + extra_args) + + func_returns = dict(migration_test.func_ret) + migration_test.func_ret.clear() + logging.debug("Migration returns function results:%s", func_returns) + + if vm_state_after_abort: + check_vm_state_after_abort(vm_name, vm_state_after_abort, + bk_uri, dest_uri, test) + + if kill_service: + check_image_ownership(vm_name, expected_image_ownership, test) + + if migrate_again: + action_during_mig = migration_base.parse_funcs(params.get('action_during_mig_again'), + test, params) + extra_args['status_error'] = params.get("migrate_again_status_error", "no") + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, + extra, action_during_mig, + extra_args) + if int(migration_test.ret.exit_status) == 0: + migration_test.post_migration_check([vm], params, uri=dest_uri) + finally: + logging.info("Recover test environment") + vm.connect_uri = bk_uri + # Clean VM on destination and source + migration_test.cleanup_vm(vm, dest_uri) + + orig_config_xml.sync() diff --git a/libvirt/tests/src/migration/migrate_vm.py b/libvirt/tests/src/migration/migrate_vm.py index f61e8c48cc..8e48a339c1 100644 --- a/libvirt/tests/src/migration/migrate_vm.py +++ b/libvirt/tests/src/migration/migrate_vm.py @@ -1189,6 +1189,7 @@ def get_target_hugepage_num(params): test_dict = dict(params) vm_name = test_dict.get("main_vm") vm = env.get_vm(vm_name) + uri = params.get("desuri") start_vm = test_dict.get("start_vm", "no") transport = test_dict.get("transport") plus = test_dict.get("conn_plus", "+") diff --git a/libvirt/tests/src/migration/migrate_with_various_hostname.py b/libvirt/tests/src/migration/migrate_with_various_hostname.py new file mode 100644 index 0000000000..7d6ef26c95 --- /dev/null +++ b/libvirt/tests/src/migration/migrate_with_various_hostname.py @@ -0,0 +1,173 @@ +import logging + +from avocado.utils import process + +from virttest import libvirt_vm +from virttest import migration +from virttest import remote as remote_old +from virttest import libvirt_version +from virttest import utils_libvirtd + +from virttest.libvirt_xml import vm_xml +from virttest.utils_test import libvirt + +from provider.migration import migration_base + + +def get_hostname(test, remote_params=None): + """ + Get hostname for source or dest host + + :param test: test object + :param remote_params: Dict of remote host parameters, which should + include: server_ip, server_user, server_pwd + """ + cmd = "hostname" + if remote_params: + ret = remote_old.run_remote_cmd(cmd, remote_params, ignore_status=False) + else: + ret = process.run(cmd, ignore_status=False, shell=True) + output = ret.stdout_text.strip() + if ret.exit_status: + test.fail("Failed to run '%s': %s" % (cmd, output)) + logging.info("Get hostname: %s" % output) + return output + + +def set_hostname(hostname, test, remote_params=None): + """ + Set hostname for source or dest host + + :param hostname: string, hostname + :param test: test object + :param remote_params: Dict of remote host parameters, which should + include: server_ip, server_user, server_pwd + """ + cmd = "hostnamectl set-hostname %s" % hostname + if remote_params: + ret = remote_old.run_remote_cmd(cmd, remote_params, ignore_status=False) + else: + ret = process.run(cmd, ignore_status=False, shell=True) + output = ret.stdout_text.strip() + if ret.exit_status: + test.fail("Failed to run '%s': %s" % (cmd, output)) + logging.info("Set hostname: %s" % hostname) + + +def run(test, params, env): + """ + Run the test + + :param test: test object + :param params: Dictionary with the test parameters + :param env: Dictionary with test environment. + """ + libvirt_version.is_libvirt_feature_supported(params) + + vm_name = params.get("migrate_main_vm") + vm = env.get_vm(vm_name) + vm.verify_alive() + bk_uri = vm.connect_uri + + migration_test = migration.MigrationTest() + migration_test.check_parameters(params) + extra_args = migration_test.update_virsh_migrate_extra_args(params) + + extra = params.get("virsh_migrate_extra") + postcopy_options = params.get("postcopy_options") + if postcopy_options: + extra = "%s %s" % (extra, postcopy_options) + params["virsh_migrate_desturi"] = libvirt_vm.complete_uri( + params.get("migrate_dest_host")) + dest_uri = params.get("virsh_migrate_desturi") + options = params.get("virsh_migrate_options", + "--live --p2p --persistent --verbose") + virsh_options = params.get("virsh_options", "") + migrate_again = "yes" == params.get("migrate_again", "no") + src_state = params.get("virsh_migrate_src_state", "shut off") + set_src_and_dst_hostname = "yes" == params.get("set_src_and_dst_hostname", "no") + src_hostname = params.get("src_hostname") + dst_hostname = params.get("dst_hostname") + server_ip = params.get("remote_ip") + server_user = params.get("remote_user", "root") + server_pwd = params.get("remote_pwd") + server_params = {'server_ip': server_ip, + 'server_user': server_user, + 'server_pwd': server_pwd} + + dst_session = None + dst_libvirtd = None + src_libvirtd = None + + # For safety reasons, we'd better back up xmlfile. + new_xml = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + orig_config_xml = new_xml.copy() + + try: + # Update guest disk xml + libvirt.set_vm_disk(vm, params) + + if set_src_and_dst_hostname: + old_dst_hostname = get_hostname(test, remote_params=server_params) + old_source_hostname = get_hostname(test) + dst_session = remote_old.wait_for_login('ssh', server_ip, '22', + server_user, server_pwd, + r"[\#\$]\s*$") + dst_libvirtd = utils_libvirtd.Libvirtd(session=dst_session) + src_libvirtd = utils_libvirtd.Libvirtd() + set_hostname(src_hostname, test) + src_libvirtd.restart() + set_hostname(dst_hostname, test, remote_params=server_params) + dst_libvirtd.restart() + + if not vm.is_alive(): + vm.start() + + logging.debug("Guest xml after starting:\n%s", + vm_xml.VMXML.new_from_dumpxml(vm_name)) + + vm.wait_for_login() + + # Execute migration process + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, extra, + None, + extra_args) + + func_returns = dict(migration_test.func_ret) + migration_test.func_ret.clear() + logging.debug("Migration returns function results:%s", func_returns) + + if migrate_again: + if not vm.is_alive(): + vm.start() + vm.wait_for_login() + extra_args['status_error'] = params.get("migrate_again_status_error", "no") + + if params.get("virsh_migrate_extra_mig_again"): + extra = params.get("virsh_migrate_extra_mig_again") + + migration_base.do_migration(vm, migration_test, None, dest_uri, + options, virsh_options, + extra, None, + extra_args) + if int(migration_test.ret.exit_status) == 0: + migration_test.post_migration_check([vm], params, uri=dest_uri) + if not libvirt.check_vm_state(vm_name, state=src_state, uri=bk_uri): + test.fail("Can't get the expected vm state '%s'" % src_state) + finally: + logging.info("Recover test environment") + vm.connect_uri = bk_uri + # Clean VM on destination and source + migration_test.cleanup_vm(vm, dest_uri) + + if set_src_and_dst_hostname: + set_hostname(old_dst_hostname, test, remote_params=server_params) + if dst_libvirtd: + dst_libvirtd.restart() + if dst_session: + dst_session.close() + set_hostname(old_source_hostname, test) + if src_libvirtd: + src_libvirtd.restart() + orig_config_xml.sync() diff --git a/libvirt/tests/src/numa/numa_memory_spread.py b/libvirt/tests/src/numa/numa_memory_spread.py index ab863cea30..f3b8af1882 100644 --- a/libvirt/tests/src/numa/numa_memory_spread.py +++ b/libvirt/tests/src/numa/numa_memory_spread.py @@ -1,5 +1,6 @@ import logging import re +import threading from avocado.utils import distro from avocado.utils import process @@ -172,6 +173,21 @@ def check_cgget_output(test, cgget_message): test.fail('{} not found in cgget output'.format(cgget_message)) +def run_memhog(test, session, test_memory, memhog_rt): + """ + Run memhog in guest to consume memory + + :param test: test object + :param session: guest session + :param test_memory: the memory that guest needs to consume + :param memhog_rt: save error message + """ + try: + session.cmd('memhog -r1 {}k'.format(test_memory), timeout=120) + except Exception as err: + memhog_rt["err"] = str(err) + + def run(test, params, env): """ Test Live update the numatune nodeset and memory can spread to other node @@ -181,6 +197,7 @@ def run(test, params, env): vm_name = params.get("main_vm") vm = env.get_vm(vm_name) backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) + memhog_rt = {} try: # Prepare host @@ -204,11 +221,20 @@ def run(test, params, env): # And get the numastat prior the test total_prior = get_qemu_total_for_nodes() # Start test - result = session.cmd('memhog -r1 {}k'.format(memory_to_eat), - timeout=120) - logging.debug(result) - if vm.is_dead(): - test.fail('The VM crashed when memhog was executed.') + memhog_thread = threading.Thread(target=run_memhog, + args=(test, session, + memory_to_eat, memhog_rt)) + memhog_thread.setDaemon(True) + memhog_thread.start() + while True: + if memhog_thread.is_alive(): + if vm.is_dead(): + test.fail("The VM crashed when memhog was executed.") + else: + if memhog_rt: + test.fail("Failed to run memhog:{}". + format(memhog_rt["err"])) + break # Get the numastat after the test total_after = get_qemu_total_for_nodes() limit = int(params.get("limit_mb")) diff --git a/libvirt/tests/src/numa/numa_numanode_cpu_info.py b/libvirt/tests/src/numa/numa_numanode_cpu_info.py index 4c0d013a3b..dc1be68fde 100644 --- a/libvirt/tests/src/numa/numa_numanode_cpu_info.py +++ b/libvirt/tests/src/numa/numa_numanode_cpu_info.py @@ -28,25 +28,30 @@ def update_xml(vm_name, online_nodes, params): vmxml.sync() -def setup_host(online_nodes, pages_list): +def setup_host(online_nodes, pages_list, ori_page_set): """ Setup host for test - update number of hugepages and check :param online_nodes: List of all online nodes with memory available :param pages_list: List of required number of pages for particular nodes + :param ori_page_set: A dict used to save original node page """ index = 0 if len(online_nodes) > 2: for pages in pages_list: + ori_page_set[online_nodes[index]] = process.run( + 'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. + format(online_nodes[index]), shell=True).stdout_text.strip() + logging.debug("ori_page_set is {}".format(ori_page_set)) ret = process.run( 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(pages, online_nodes[index])) + format(pages, online_nodes[index]), shell=True) if ret.exit_status: raise TestError('Cannot set {} hugepages on node {}'. format(pages, online_nodes[index])) ret = process.run( 'cat /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. - format(online_nodes[index])) + format(online_nodes[index]), shell=True) if pages not in ret.stdout_text: raise TestError('Setting {} hugepages on node {} was unsuccessful'. format(pages, online_nodes[index])) @@ -67,7 +72,8 @@ def run(test, params, env): backup_xml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) numa_info = utils_misc.NumaInfo() online_nodes = numa_info.get_online_nodes_withmem() - setup_host(online_nodes, pages_list) + ori_page_set = {} + setup_host(online_nodes, pages_list, ori_page_set) try: if vm.is_alive(): vm.destroy() @@ -80,4 +86,8 @@ def run(test, params, env): except Exception as e: test.error("Unexpected error: {}".format(e)) finally: + for node_index, ori_page in ori_page_set.items(): + process.run( + 'echo {} > /sys/devices/system/node/node{}/hugepages/hugepages-2048kB/nr_hugepages'. + format(ori_page, node_index), shell=True) backup_xml.sync() diff --git a/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py b/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py index a4d9d02f92..0d94294965 100644 --- a/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py +++ b/libvirt/tests/src/nwfilter/nwfilter_binding_dumpxml.py @@ -6,6 +6,7 @@ from virttest import data_dir from virttest.utils_test import libvirt as utlv from virttest.libvirt_xml.devices import interface +from virttest.utils_libvirt import libvirt_pcicontr from avocado.utils import process @@ -67,8 +68,11 @@ def set_env(): set two interface with different network filter and change interface type """ - virsh.attach_interface(vm_name, option) + # Add enough PCI to attach interface + libvirt_pcicontr.reset_pci_num(vm_name) + virsh.attach_interface(vm_name, option, debug=True) vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm_name) + logging.debug("Guest xml is {}".format(vmxml)) devices = vmxml.get_devices('interface') iface_xml = devices[0] iface_xml_2 = devices[1] diff --git a/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py b/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py index 0efee5af77..78442f5a92 100644 --- a/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py +++ b/libvirt/tests/src/passthrough/ap/libvirt_ap_passthrough.py @@ -51,6 +51,7 @@ def run(test, params, env): load_vfio_ap() info = CryptoDeviceInfoBuilder.get() + logging.debug("Host lszcrypt got %s", info) if not info.entries or int(info.domains[0].hwtype) < MIN_HWTYPE: test.error("vfio-ap requires at least HWTYPE %s." % MIN_HWTYPE) diff --git a/libvirt/tests/src/remote_access/remote_access.py b/libvirt/tests/src/remote_access/remote_access.py index 01f7449c05..73608d9225 100644 --- a/libvirt/tests/src/remote_access/remote_access.py +++ b/libvirt/tests/src/remote_access/remote_access.py @@ -60,8 +60,15 @@ def remote_access(params, test): fp.close() logging.info("Succeed to connect libvirt daemon.") else: - test.fail("Failed to connect libvirt daemon!!output: {}" - .format(output)) + if error_pattern: + if error_pattern in output: + logging.info("Expected libvirt output!!") + else: + test.fail("Unexpected output: {}, when looking for: {} " + "pattern".format(output, error_pattern)) + else: + test.fail("Failed to connect libvirt daemon!!output: {}" + .format(output)) else: if not ret: if error_pattern: diff --git a/libvirt/tests/src/remote_access/remote_tls_multiple_certs.py b/libvirt/tests/src/remote_access/remote_tls_multiple_certs.py new file mode 100644 index 0000000000..434f4f7041 --- /dev/null +++ b/libvirt/tests/src/remote_access/remote_tls_multiple_certs.py @@ -0,0 +1,499 @@ +import logging +import os + +import aexpect +from aexpect import remote + +from avocado.core import exceptions +from avocado.utils import process + +from virttest import data_dir +from virttest import libvirt_version +from virttest import utils_iptables +from virttest import utils_libvirtd +from virttest import utils_misc +from virttest import utils_split_daemons +from virttest import remote as remote_old +from virttest.utils_test import libvirt + + +def get_server_details(params): + """ + Get the server details from the configuration parameters + + :param params: avocado params object + :returns: required server information + """ + server_info = {'ip': params.get('server_ip'), + 'user': params.get('server_user'), + 'pwd': params.get('server_pwd')} + return server_info + + +def get_client_details(params): + """ + Get the client details from the configuration parameters + + :param params: avocado params object + :returns: required client information + """ + client_info = {'ip': params.get('client_ip'), + 'user': params.get('client_user'), + 'pwd': params.get('client_pwd')} + return client_info + + +def prepare_a_certs_dictionary(server_info): + """ + Prepare a dictionary with the required information for info files and + certificates. + + :param server_info: dictionary with the server information + :returns: dictionary with required information + """ + caroot = {'caroot': {'info': ['cn = Libvirt Root CA', 'ca', + 'cert_signing_key'], + 'ca_cert': '', + } + } + cachild1 = {'cachild1': {'info': ['cn = Libvirt Child CA 1', 'ca', + 'cert_signing_key'], + 'ca_cert': 'caroot', + } + } + cachild2 = {'cachild2': {'info': ['cn = Libvirt Child CA 2', 'ca', + 'cert_signing_key'], + 'ca_cert': 'caroot', + } + } + server1 = {'server1': {'info': ['organization = Red Hat', + 'cn = host1.example.com', + 'dns_name = host1.example.com', + 'ip_address = ' + server_info['ip'], + 'tls_www_server', 'encryption_key', + 'signing_key'], + 'ca_cert': 'cachild1', + } + } + server2 = {'server2': {'info': ['organization = Red Hat', + 'cn = host2.example.com', 'tls_www_server', + 'encryption_key', 'signing_key'], + 'ca_cert': 'cachild2', + } + } + client1 = {'client1': {'info': ['country = GB', 'state = London', + 'locality = London', + 'organization = Red Hat', 'cn = client1', + 'tls_www_client', 'encryption_key', + 'signing_key'], + 'ca_cert': 'cachild1', + } + } + client2 = {'client2': {'info': ['country = GB', 'state = London', + 'locality = London', + 'organization = Red Hat', 'cn = client2', + 'tls_www_client', 'encryption_key', + 'signing_key'], + 'ca_cert': 'cachild2', + } + } + + certs_dict = {**caroot, **cachild1, **cachild2, **server1, **server2, + **client1, **client2} + + return certs_dict + + +def prepare_info_files(certs_dict, dir_path): + """ + Prepare info files based on information provided + + :param certs_dict: dictionary with the required certificates information + :param dir_path: path pointing to required info files destination + :returns: None + """ + for name in certs_dict: + info_file = name+'.info' + with open(os.path.join(dir_path, info_file), 'w') as info_file: + for line in certs_dict[name]['info']: + info_file.write(line+'\n') + + +def generate_keys(certs_dir): + """ + Generate keys based on info files + + :param certs_dir: path pointing to directory with certificates + :returns: None + """ + for name in os.listdir(certs_dir): + try: + basename, extension = name.split('.') + except ValueError: + continue + if extension == 'info': + cert_name = basename + 'key.pem' + cmd = "certtool --generate-privkey --outfile={}".\ + format(os.path.join(certs_dir, cert_name)) + logging.debug('Command to generate keys: {}'.format(cmd)) + process.run(cmd, shell=True) + + +def generate_certificates(certs_dict, certs_dir): + """ + Generate certificates from the information provided + + :param certs_dict: dictionary with the required certificates information + :param certs_dir: path pointing to directory with certificates + :returns: None + """ + cwd = os.getcwd() + os.chdir(certs_dir) + for name in certs_dict: + cmd = 'certtool {generate_keyword} --load-privkey {name}key.pem ' \ + '{cacert} {ca_private_key} --template {name}.info --outfile ' \ + '{name}cert.pem'.\ + format(generate_keyword='--generate-certificate' if certs_dict[name]['ca_cert'] else '--generate-self-signed', + name=name, + cacert=' --load-ca-certificate '+certs_dict[name]['ca_cert']+'cert.pem' if certs_dict[name]['ca_cert'] else '', + ca_private_key='--load-ca-privkey '+certs_dict[name]['ca_cert']+'key.pem' if certs_dict[name]['ca_cert'] else '', + ) + logging.debug('Command to generate certificate:\n{}'.format(cmd)) + process.run(cmd, shell=True) + os.chdir(cwd) + + +def concatenate_certificates(certs_dir, *certificates): + """ + Concatenate certificates chain into one CA certificate + + :param certs_dir: path pointing to directory with certificates + :param certificates: multiple certificate names in required order + :returns: None + """ + cwd = os.getcwd() + os.chdir(certs_dir) + cacert_filename = 'cacert.pem' + if not os.path.exists(cacert_filename): + process.run('touch {}'.format(cacert_filename), shell=True) + cert_string = '' + for cert in certificates: + cert_string += cert + ' ' + cmd = "cat {}> {}".format(cert_string, cacert_filename) + process.run(cmd, shell=True) + os.chdir(cwd) + + +def copy_ca_certs_to_hosts(certs_dir, *host_info): + """ + Copy certificates to required destination path + + :param certs_dir: path pointing to directory with certificates + :param host_info: multiple dictionaries with the host information + :returns: path to destination CA certificate + """ + ca_cert_path = os.path.join(certs_dir, 'cacert.pem') + remote_ca_cert_path = '/etc/pki/CA/cacert.pem' + try: + for host in host_info: + remote.copy_files_to(host['ip'], 'scp', host['user'], + host['pwd'], '22', ca_cert_path, + remote_ca_cert_path) + except remote.SCPError as detail: + raise exceptions.TestError(detail) + return remote_ca_cert_path + + +def prepare_certs_and_keys_on_host(session, host, certs_dir, key_name): + """ + Prepare certificates and keys on the host + + :param session: RemoteSession object for host connection + :param host: dictionary with the host information + :param certs_dir: path pointing to directory with certificates + :param key_name: string with a name used for a key and certificate + :returns: tuple of paths for key and certificate + """ + libvirt_pki_private_dir = '/etc/pki/libvirt/private' + libvirt_pki_dir = '/etc/pki/libvirt' + cmd = "mkdir -p {}".format(libvirt_pki_private_dir) + status, output = session.cmd_status_output(cmd) + logging.debug("Making directory for certificates has failed due to: {}". + format(output)) + src_key_path = os.path.join(certs_dir, key_name + 'key.pem') + src_cert_path = os.path.join(certs_dir, key_name + 'cert.pem') + dest_key_path = os.path.join(libvirt_pki_private_dir, + key_name[:-1] + 'key.pem') + dest_cert_path = os.path.join(libvirt_pki_dir, + key_name[:-1] + 'cert.pem') + # SCP server cert and server key to server + remote.copy_files_to(host['ip'], 'scp', host['user'], host['pwd'], + '22', src_key_path, dest_key_path) + remote.copy_files_to(host['ip'], 'scp', host['user'], host['pwd'], + '22', src_cert_path, dest_cert_path) + return dest_key_path, dest_cert_path + + +def get_server_syslibvirtd(server_info): + """" + Get the RemoteFile object of the syslibvirtd file + + :param server_info: dictionary with the server information + :returns: RemoteFile object of the syslibvirtd file + """ + syslibvirtd = remote_old.RemoteFile( + address=server_info['ip'], + client='scp', + username=server_info['user'], + password=server_info['pwd'], + port='22', + remote_path='/etc/sysconfig/libvirtd') + return syslibvirtd + + +def get_daemon_configs(): + """ + Get the daemon configs + + :returns: daemon configs file path + """ + if utils_split_daemons.is_modular_daemon(): + daemon_conf = "/etc/libvirt/virtproxyd.conf" + daemon_socket_conf = "/usr/lib/systemd/system/virtproxyd-tls.socket" + else: + daemon_conf = "/etc/libvirt/libvirtd.conf" + daemon_socket_conf = "/usr/lib/systemd/system/libvirtd-tls.socket" + return daemon_conf, daemon_socket_conf + + +def get_server_libvirtdconf(server_info): + """ + Get the RemoteFile object of the libvirtdconf file + + :param server_info: dictionary with the server information + :returns: RemoteFile object of the libvirtdconf file + """ + daemon_conf, _daemon_socket_conf = get_daemon_configs() + server_libvirtdconf = remote_old.RemoteFile( + address=server_info['ip'], + client='scp', + username=server_info['user'], + password=server_info['pwd'], + port='22', + remote_path=daemon_conf) + return server_libvirtdconf + + +def restart_libvirtd_on_server(session): + """ + Restart libvirtd service(s) on the remote server to apply changes + + :param session: RemoteSession object for server connection + :returns: None + """ + if libvirt_version.version_compare(5, 6, 0, session): + tls_socket_service = utils_libvirtd.DaemonSocket( + "virtproxyd-tls.socket", session=session) + tls_socket_service.restart() + else: + libvirtd_service = utils_libvirtd.Libvirtd( + session=session) + libvirtd_service.restart() + + +def setup_libvirt_on_server(server_session, server_info): + """ + Setup libvirtd on remote server to allow TLS connection. + + :param server_session: RemoteSession object for server connection + :param server_info: dictionary with the server information + :returns: tuple of the RemoteFile objects with libvirtdconf and syslibvirtd + """ + libvirtdconf = get_server_libvirtdconf(server_info) + syslibvirtd = None + if not libvirt_version.version_compare(5, 6, 0, server_session): + syslibvirtd = get_server_syslibvirtd(server_info) + # edit the /etc/sysconfig/libvirtd to add --listen args in libvirtd + pattern_to_repl = { + r".*LIBVIRTD_ARGS\s*=\s*\"\s*--listen\s*\".*": + "LIBVIRTD_ARGS=\"--listen\"" + } + syslibvirtd.sub_else_add(pattern_to_repl) + # edit the /etc/libvirt/libvirtd.conf to add listen_tls=1 + pattern_to_repl = {r".*listen_tls\s*=\s*.*": "listen_tls=1"} + libvirtdconf.sub_else_add(pattern_to_repl) + + pattern_to_repl = {r".*auth_tls\s*=\s*.*": 'auth_tls="none"'} + libvirtdconf.sub_else_add(pattern_to_repl) + + try: + restart_libvirtd_on_server(server_session) + except (remote.LoginError, aexpect.ShellError) as detail: + raise exceptions.TestError(detail) + return libvirtdconf, syslibvirtd + + +def stop_iptables(): + """ + Clear iptables to make sure no rule prevents connection + + :returns: None + """ + cmd = "iptables -F" + process.run(cmd, shell=True) + + +def allow_port_in_fw(server_session): + """ + Allow the libvirt TLS port in the firewall on the remote server + + :param server_session: RemoteSession object for server connection + :returns: None + """ + firewalld_port = '16514' + firewall_cmd = utils_iptables.Firewall_cmd(server_session) + firewall_cmd.add_port(firewalld_port, 'tcp', permanent=True) + + +def connect_to_remote(server_info, err_msg=None): + """ + Try connection to the remote server with TLS + + :param server_info: dictionary with the server information + :param err_msg: expected error messages (if any) + :returns: None + """ + expected_fails = [err_msg] if err_msg else [] + result = process.run('virsh -c qemu+tls://{}/system'. + format(server_info['ip']), shell=True, + ignore_status=True) + libvirt.check_result(result, expected_fails=expected_fails, + check_both_on_error=True) + + +def get_log(server_info): + """ + Tail output appended data as the file /var/log/messages grows + + :param server_info: dictionary with the server information + :returns: the appended data tailed from /var/log/messages + """ + tailed_log_file = os.path.join(data_dir.get_tmp_dir(), 'tail_log') + tail_session = remote.remote_login('ssh', server_info['ip'], '22', + server_info['user'], + server_info['pwd'], + r"[\#\$]\s*$", + log_function=utils_misc.log_line, + log_filename=tailed_log_file) + tail_session.sendline('tail -f /var/log/messages') + return tail_session + + +def run(test, params, env): + """ + Test remote access with TLS connection and multiple CA certificates + """ + config_files = [] + server_files = [] + client_files = [] + ca_cert_file = None + + server_info = get_server_details(params) + server_session = remote.wait_for_login('ssh', server_info['ip'], '22', + server_info['user'], + server_info['pwd'], + r"[\#\$]\s*$") + client_info = get_client_details(params) + client_session = remote.wait_for_login('ssh', client_info['ip'], '22', + client_info['user'], + client_info['pwd'], + r"[\#\$]\s*$") + try: + # NOTE: The Test can be divided to multiple parts, however the first + # part - setup is a time consuming and it is therefore better to do it + # once only. + certs_dict = prepare_a_certs_dictionary(server_info) + certs_dir = os.getcwd() + + prepare_info_files(certs_dict, certs_dir) + generate_keys(certs_dir) + generate_certificates(certs_dict, certs_dir) + concatenate_certificates(certs_dir, + 'carootcert.pem', + 'cachild1cert.pem', + 'cachild2cert.pem') + ca_cert_file = copy_ca_certs_to_hosts(certs_dir, + server_info, + client_info) + server_files = prepare_certs_and_keys_on_host(server_session, + server_info, + certs_dir, + 'server1') + config_files = setup_libvirt_on_server(server_session, server_info) + stop_iptables() + allow_port_in_fw(server_session) + restart_libvirtd_on_server(server_session) + client_files = prepare_certs_and_keys_on_host(client_session, + client_info, + certs_dir, + 'client1') + # Connect to server1 hypervisor on client1 + connect_to_remote(server_info) + + # Test with other CA certificates order + for new_order in [ + ['cachild2cert.pem', 'carootcert.pem', 'cachild1cert.pem'], + ['cachild1cert.pem', 'carootcert.pem', 'cachild2cert.pem'], + ]: + concatenate_certificates(certs_dir, *new_order) + copy_ca_certs_to_hosts(certs_dir, server_info, client_info) + restart_libvirtd_on_server(server_session) + connect_to_remote(server_info) + + # Test with missing issuing CA + concatenate_certificates(certs_dir, + 'cachild2cert.pem', + 'carootcert.pem') + # Publish to server only + copy_ca_certs_to_hosts(certs_dir, server_info) + # Start reading the /var/log/messages on server + tail_messages = get_log(server_info) + restart_libvirtd_on_server(server_session) + err_msg = params.get('err_msg') + output = tail_messages.get_output() + tail_messages.close() + if err_msg not in output: + test.fail("Unexpected output of the /var/log/messages on remote " + "server: {}".format(output)) + # Fix the CA certificates + concatenate_certificates(certs_dir, + 'cachild2cert.pem', + 'carootcert.pem', + 'cachild1cert.pem') + # Copy to server + copy_ca_certs_to_hosts(certs_dir, server_info) + restart_libvirtd_on_server(server_session) + # Check if the connection can be established again + connect_to_remote(server_info) + # Create an invalid CA cert for client + concatenate_certificates(certs_dir, + 'cachild2cert.pem', + 'carootcert.pem') + # Copy to client + copy_ca_certs_to_hosts(certs_dir, client_info) + connect_to_remote(server_info, err_msg) + except Exception as e: + test.fail('Unexpected failure: {}'.format(e)) + finally: + if config_files: + for config in config_files: + del config + if server_files: + for file_path in server_files: + server_session.cmd_status_output('rm -f {}'.format(file_path)) + if client_files: + for file_path in client_files: + client_session.cmd_status_output('rm -f {}'.format(file_path)) + if ca_cert_file: + server_session.cmd_status_output('rm -f {}'.format(ca_cert_file)) + client_session.cmd_status_output('rm -f {}'.format(ca_cert_file)) diff --git a/libvirt/tests/src/svirt/default_dac_check.py b/libvirt/tests/src/svirt/default_dac_check.py index 873d6995d6..a381299488 100644 --- a/libvirt/tests/src/svirt/default_dac_check.py +++ b/libvirt/tests/src/svirt/default_dac_check.py @@ -5,6 +5,7 @@ from virttest import utils_libvirtd from virttest import utils_misc +from virttest import libvirt_version from virttest.libvirt_xml import vm_xml from virttest.staging import utils_memory from virttest.staging.utils_memory import drop_caches @@ -86,7 +87,10 @@ def check_ownership(filename): result = process.run("ls -ld %s" % filename, shell=True).stdout_text.strip().split(' ') ownership = "%s:%s" % (result[2], result[3]) logging.debug(ownership) - expect_result = "qemu:qemu" + if libvirt_version.version_compare(7, 8, 0) and filename == "/var/cache/libvirt/qemu": + expect_result = "root:root" + else: + expect_result = "qemu:qemu" if ownership != expect_result: test.fail("The ownership of %s is %s" % (filename, ownership)) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py index 0f14acffd1..cbe48ecea7 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcommit.py @@ -144,7 +144,7 @@ def make_disk_snapshot(postfix_n, snapshot_take, is_check_snapshot_tree=False, i snapshot_external_disks.append(disk_external) options += " %s,snapshot=external,file=%s" % (disk, disk_external) - + clean_snap_file(disk_external) if is_check_snapshot_tree: options = options.replace("--no-metadata", "") cmd_result = virsh.snapshot_create_as(vm_name, options, @@ -196,6 +196,33 @@ def check_snapshot_tree(): if is_check_snapshot_tree: check_snapshot_tree() + def check_vm_disk_file(vm): + """ + Check current vm disk source. + + :param vm: The vm to be checked + """ + image_name1, image_format = params.get("image_name", "image"), params.get("image_format", "qcow2") + image_dir = os.path.join(data_dir.get_data_dir(), image_name1) + original_image_path = image_dir + "." + image_format + logging.debug("Source file should be : %s", original_image_path) + + vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) + disk = vmxml.get_devices('disk')[0] + logging.debug("Current disk info is : %s", disk) + if disk.source.attrs['file'] != original_image_path: + test.error("Please check current vm disk source") + + def clean_snap_file(snap_path): + """ + Clean the existed duplicate snap file. + + :param snap_path: snap file path + """ + if os.path.exists(snap_path): + os.remove(snap_path) + logging.debug("Cleaned snap file before creating :%s" % snap_path) + def get_first_disk_source(): """ Get disk source of first device @@ -389,7 +416,7 @@ def do_blockcommit_pivot_repeatedly(): if len(exsiting_snaps) != 0: test.fail("There are snapshots created for %s already" % vm_name) - + check_vm_disk_file(vm) snapshot_external_disks = [] cmd_session = None # Prepare a blank params to confirm if delete the configure at the end of the test diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py index a602d77bdb..ef08049647 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_blockcopy.py @@ -172,6 +172,9 @@ def finish_job(vm_name, target, timeout): """ job_time = 0 while job_time < timeout: + # Check cmd execute status and report error directly if have + virsh.blockjob(vm_name, target, "--info", debug=True, ignore_status=False) + # As BZ#1359679, blockjob may disappear during the process, # so we need check it all the time if utl.check_blockjob(vm_name, target, 'none', '0'): @@ -443,7 +446,7 @@ def run(test, params, env): extra_dict = {'uri': uri, 'unprivileged_user': unprivileged_user, 'debug': True, 'ignore_status': True, 'timeout': timeout} - libvirtd_utl = utils_libvirtd.Libvirtd() + libvirtd_utl = utils_libvirtd.Libvirtd('virtqemud') libvirtd_log_path = os.path.join(data_dir.get_tmp_dir(), "libvirt_daemons.log") libvirtd_conf_dict = {"log_filter": '"3:json 1:libvirt 1:qemu"', "log_outputs": '"1:file:%s"' % libvirtd_log_path} diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py b/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py index a57c6cee60..b13b60c511 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_change_media_matrix.py @@ -298,6 +298,8 @@ def check_result(vm_name, disk_source, disk_type, disk_target, wait_for_event = False else: wait_for_event = True + if vm.is_alive(): + vm.wait_for_login().close() ret = virsh.change_media(vm_ref, target_device, all_options, wait_for_event=wait_for_event, event_timeout=40, diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py index aac9437ba1..4c7c0be999 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_baseline.py @@ -46,12 +46,30 @@ def create_attach_xml(cpu_xmlfile, test_feature): with open(cpu_xmlfile, 'w') as xmlfile: xmlfile.write(content) + def validate_host(to_file, test_feature): + """ + Validate the host meets the test requirement which includes a + certain feature + + :param to_file: the output to be written to + :param test_feature: feature name to be searched + :raises: test.cancel if the host does not include the tested feature + """ + + output = virsh.capabilities(to_file=to_file, + ignore_status=False, + debug=True) + if not check_xml(output, test_feature): + test.cancel("The capabilities do not include feature '%s'. " + "Skip the test" % test_feature) + def check_xml(xml_output, test_feature): """ Check if result output contains tested feature. :param xml_output: virsh cpu-baseline command's result. :param test_feature: Test feature element. + :return: a match object if the feature exists, otherwise None """ feature_name = "" dom = parseString(xml_output) @@ -59,8 +77,7 @@ def check_xml(xml_output, test_feature): for names in feature: feature_name += names.getAttribute("name") dom.unlink() - if not re.search(test_feature, feature_name): - test.fail("Cannot see '%s' feature" % test_feature) + return re.search(test_feature, feature_name) # Get all parameters. file_name = params.get("cpu_baseline_cpu_file", "cpu.xml") @@ -69,9 +86,13 @@ def check_xml(xml_output, test_feature): test_feature = params.get("cpu_baseline_test_feature", "acpi") status_error = "yes" == params.get("status_error", "no") cpu_xmlfile = os.path.join(data_dir.get_tmp_dir(), file_name) + exp_feature_exist = "yes" == params.get("feature_exist", "yes") - # Prepare a xml file. - create_attach_xml(cpu_xmlfile, test_feature) + if '--migratable' not in extra: + # Prepare a xml file. + create_attach_xml(cpu_xmlfile, test_feature) + else: + validate_host(cpu_xmlfile, test_feature) if cpu_ref == "file": cpu_ref = cpu_xmlfile @@ -92,7 +113,11 @@ def check_xml(xml_output, test_feature): else: if status != 0: test.fail("Run failed with right command") - check_xml(output, test_feature) + feature_found = check_xml(output, test_feature) + if exp_feature_exist and not feature_found: + test.fail("Cannot see '%s' feature" % test_feature) + if not exp_feature_exist and feature_found: + test.fail("Can see '%s' feature, but not expected" % test_feature) # Use the output to config VM config_guest = "yes" == params.get("config_guest", "no") diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py index 7e7875debb..5b40196d34 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_cpu_stats.py @@ -123,7 +123,7 @@ def run(test, params, env): system_time = int(total_list[7]) # check Total cpu_time >= User + System cpu_time - if user_time + system_time >= total_time: + if user_time + system_time > total_time: test.fail("total cpu_time < user_time + " "system_time") logging.debug("Check total cpu_time %d >= user + system " diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py b/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py index 1fb468b8fe..07e092f971 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_domjobinfo.py @@ -64,7 +64,7 @@ def cmp_jobinfo(result, info_list, job_type, actions): if out_dict["Job type"].strip() != job_type: test.fail("Expect %s Job type but got %s" % (job_type, out_dict["Job type"].strip())) - if out_dict["Operation"].strip() != actions.capitalize(): + if job_type != "None" and out_dict["Operation"].strip() != actions.capitalize(): test.fail("Expect %s Operation but got %s" % (actions.capitalize(), out_dict["Operation"].strip())) @@ -89,6 +89,7 @@ def cmp_jobinfo(result, info_list, job_type, actions): act_opt = params.get("dump_opt", "") vm_ref = params.get("domjobinfo_vm_ref") status_error = params.get("status_error", "no") + keep_complete = "yes" == params.get("keep_complete", "no") libvirtd = params.get("libvirtd", "on") # Use tmp_pipe to act as target file for job operation in subprocess, # such as vm.dump, vm.save, etc. @@ -183,7 +184,13 @@ def cmp_jobinfo(result, info_list, job_type, actions): except OSError: pass - # Get completed domjobinfo + # Get completed domjobinfo with --keep-completed option, next completed domjobinfo gathering will still get statistics. + if keep_complete: + time.sleep(5) + vm_ref_tmp = "%s --completed --keep-completed" % vm_ref + virsh.domjobinfo(vm_ref_tmp, ignore_status=False, debug=True) + + # Get completed domjobinfo.(Without -keep-completed option, later completed domjobinfo gathering will get None.) if status_error == "no": time.sleep(5) if act_opt != "--live" and vm_ref == domid: @@ -193,6 +200,10 @@ def cmp_jobinfo(result, info_list, job_type, actions): ret_cmplt = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True) status_cmplt = ret_cmplt.exit_status + # Get completed domjobinfo again, get None. + if keep_complete: + ret_cmplt_later = virsh.domjobinfo(vm_ref, ignore_status=True, debug=True) + # Recover the environment. if actions == "managedsave": virsh.managedsave_remove(vm_name, ignore_status=True) @@ -225,3 +236,7 @@ def cmp_jobinfo(result, info_list, job_type, actions): info_list[info_list.index("Expected downtime")] = "Total downtime" logging.debug("The expected info_list for completed job is %s", info_list) cmp_jobinfo(ret_cmplt, info_list, "Completed", actions) + # Check output of later "virsh domjobinfo --completed" + if keep_complete: + info_list = ["Job type"] + cmp_jobinfo(ret_cmplt_later, info_list, "None", actions) diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_event.py b/libvirt/tests/src/virsh_cmd/domain/virsh_event.py index 4a2ea6adfc..b2e51aee76 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_event.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_event.py @@ -434,16 +434,24 @@ def trigger_events(dom, events_list=[]): options="", key=metadata_key, **virsh_dargs) - expected_events_list.append("'metadata-change' for %s: " - "element http://app.org/") + if not libvirt_version.version_compare(7, 10, 0): + expected_events_list.append("'metadata-change' for %s: " + "element http://app.org/") + else: + expected_events_list.append("'metadata-change' for %s: " + "type element, uri http://app.org/") elif event == "metadata_remove": virsh.metadata(dom.name, metadata_uri, options="--remove", key=metadata_key, **virsh_dargs) - expected_events_list.append("'metadata-change' for %s: " - "element http://app.org/") + if not libvirt_version.version_compare(7, 10, 0): + expected_events_list.append("'metadata-change' for %s: " + "element http://app.org/") + else: + expected_events_list.append("'metadata-change' for %s: " + "type element, uri http://app.org/") elif event == "blockcommit": disk_path = dom.get_blk_devices()['vda']['source'] virsh.snapshot_create_as(dom.name, "s1 --disk-only --no-metadata", **virsh_dargs) @@ -539,6 +547,10 @@ def trigger_events(dom, events_list=[]): ret = virsh.domstate(dom.name, "--reason", **virsh_dargs) if ret.stdout.strip() != "paused (I/O error)": test.fail("Domain state should still be paused due to I/O error!") + elif event == "kill_qemu": + os.kill(dom.get_pid(), getattr(signal, signal_name)) + expected_events_list.append("'lifecycle' for %s:" + " Stopped Failed") else: test.error("Unsupported event: %s" % event) # Event may not received immediately diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py b/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py index 95873ee2bc..ba7e479353 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_migrate_option_mix.py @@ -48,6 +48,18 @@ def cleanup_vm(vm, vm_name='', uri=''): if isinstance(v, string_types) and v.count("EXAMPLE"): test.cancel("Please set real value for %s" % v) + # Back up vm name for recovery in finally + vm_name_backup = params.get("migrate_main_vm") + # Get a vm object for migration + logging.debug("Get a vm object for migration") + vm = env.get_vm(vm_name_backup) + + # Back up vm xml for recovery in finally + logging.debug("Backup vm xml before migration") + vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) + if not vm_xml_backup: + test.error("Backing up xmlfile failed.") + # Params for virsh migrate options: live_migration = params.get("live_migration") == "yes" offline_migration = params.get("offline_migration") == "yes" @@ -154,19 +166,6 @@ def cleanup_vm(vm, vm_name='', uri=''): transport_port = '16509' obj_migration.migrate_pre_setup(dest_uri, params, ports=transport_port) - # Back up vm name for recovery in finally - vm_name_backup = params.get("migrate_main_vm") - - # Get a vm object for migration - logging.debug("Get a vm object for migration") - vm = env.get_vm(vm_name_backup) - - # Back up vm xml for recovery in finally - logging.debug("Backup vm xml before migration") - vm_xml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) - if not vm_xml_backup: - test.error("Backing up xmlfile failed.") - # Prepare shared disk in vm xml for live migration: # Change the source of the first disk of vm to shared disk if live_migration: diff --git a/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py b/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py index af4c23a1b0..688aabf34a 100644 --- a/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py +++ b/libvirt/tests/src/virsh_cmd/domain/virsh_qemu_monitor_command.py @@ -58,7 +58,8 @@ def run(test, params, env): if status_error: if not status: # Return status is 0 with unknown command - if "unknown command:" in output: + # From libvirt-7.9.0, return status is 0 with CommandNotFound + if "unknown command:" in output or "CommandNotFound" in output: logging.debug("Command failed: %s" % output) else: test.fail("Expect fail, but run successfully.") diff --git a/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py b/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py index a2db663a67..cd8b8ea188 100644 --- a/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py +++ b/libvirt/tests/src/virsh_cmd/network/virsh_net_update.py @@ -447,6 +447,11 @@ def section_update(ori_pre, new_pre): without_ip_dhcp == "no"): test_xml.del_element(element="/ip/dhcp", index=section_index) + if error_type == "index-nonexist": + for idx in [3, 2, 1]: + test_xml.del_element(element="/ip", index=idx) + test_xml.del_element(element="/route") + if loop == 0: try: # Define and start network @@ -563,6 +568,10 @@ def section_update(ori_pre, new_pre): # range-mismatch error info err_dic["range-mismatch"] = "couldn't locate a matching dhcp " + \ "range entry in network " + # index-nonexist error info + err_dic["index-nonexist"] = "couldn't update dhcp host entry " + \ + "- no = 0: nextline = rule[index + 1] - s = nextline.replace("QEMU", "secret").replace( + s = re.sub("QEMU|storage", "secret", nextline).replace( "pool_name", "secret_uuid").replace( "virt-dir-pool", "%s" % luks_secret_uuid) rule[index + 1] = s diff --git a/libvirt/tests/src/virtual_device/tpm_device.py b/libvirt/tests/src/virtual_device/tpm_device.py index c506c78039..d3deb3f242 100644 --- a/libvirt/tests/src/virtual_device/tpm_device.py +++ b/libvirt/tests/src/virtual_device/tpm_device.py @@ -482,7 +482,10 @@ def run_test_suite_in_guest(session): for test_sh in ["test_smoke.sh", "test_space.sh"]: pattern = "ok .* selftests: tpm2: %s" % test_sh if not re.search(pattern, output) or ("not ok" in output): - test.fail("test suite check failed.") + if "ERROR" in output: + test.fail("test suite check failed: %s" % re.findall(r'test_.* ... ERROR', output)) + else: + test.fail("test suite check failed.") logging.info("------PASS on kernel test suite check------") def persistent_test(vm, vm_xml): diff --git a/libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py b/libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py new file mode 100755 index 0000000000..aecfa3bf63 --- /dev/null +++ b/libvirt/tests/src/virtual_disks/virtual_disks_ccw_addr.py @@ -0,0 +1,178 @@ +import logging +import os + +from avocado.utils import process + +from virttest import virt_vm +from virttest import virsh +from virttest import utils_libvirtd +from virttest import utils_split_daemons + +from virttest.libvirt_xml import vm_xml, xcepts + +from virttest.utils_test import libvirt +from virttest.utils_libvirt import libvirt_disk + + +def create_ccw_addr_controller(params): + """ + Create one ccw address controller device + + :param params: dict wrapped with params + """ + + contr_dict = {'controller_type': 'scsi', + 'controller_index': '10'} + + ccw_addr_controller = libvirt.create_controller_xml(contr_dict) + + addr_dict = eval(params.get("addr_attrs")) + ccw_addr_controller.address = ccw_addr_controller.new_controller_address( + **{"attrs": addr_dict}) + logging.debug("create_ccw_addr_controller xml: %s", ccw_addr_controller) + return ccw_addr_controller + + +def create_ccw_addr_rng(params): + """ + Create one ccw address rng device + + :param params: dict wrapped with params + """ + rng = libvirt.create_rng_xml(params) + addr_dict = eval(params.get("addr_attrs")) + rng.address = rng.new_rng_address( + **{"attrs": addr_dict}) + logging.debug("create_ccw_addr_rng xml: %s", rng) + return rng + + +def create_ccw_addr_disk(params): + """ + Create one ccw address disk + + :param params: dict wrapped with params + """ + type_name = params.get("type_name") + disk_device = params.get("device_type") + device_target = params.get("target_dev") + device_bus = params.get("target_bus") + device_format = params.get("target_format") + source_file_path = params.get("virt_disk_device_source") + disk_src_dict = {"attrs": {"file": source_file_path}} + addr_str = params.get("addr_attrs") + + if source_file_path: + libvirt.create_local_disk("file", source_file_path, 1, device_format) + ccw_addr_disk = libvirt_disk.create_primitive_disk_xml( + type_name, disk_device, + device_target, device_bus, + device_format, disk_src_dict, None) + if addr_str: + addr_dict = eval(addr_str) + ccw_addr_disk.address = ccw_addr_disk.new_disk_address( + **{"attrs": addr_dict}) + logging.debug("create_ccw_addr_disk xml: %s", ccw_addr_disk) + return ccw_addr_disk + + +def check_libvirtd_process_id(ori_pid_libvirtd, test): + """ + Check libvirtd process id not change + + :param params: original libvirtd process id + :param test: test assert object + """ + if not utils_split_daemons.is_modular_daemon(): + aft_pid_libvirtd = process.getoutput("pidof libvirtd") + if not utils_libvirtd.libvirtd_is_running() or ori_pid_libvirtd != aft_pid_libvirtd: + test.fail("Libvirtd crash after attaching ccw addr devices") + + +def run(test, params, env): + """ + Test attach device with ccw address option. + + 1.Prepare test environment,destroy or suspend a VM. + 2.Prepare test xml for different devices. + 3.Perform test operation. + 4.Recover test environment. + 5.Confirm the test result. + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + virsh_dargs = {'debug': True, 'ignore_status': True} + + # Disk specific attributes. + image_path = params.get("virt_disk_device_source", "/var/lib/libvirt/images/test.img") + backend_device = params.get("backend_device", "disk") + logging.debug("eval devei backed:%s", backend_device) + + hotplug = "yes" == params.get("virt_device_hotplug") + status_error = "yes" == params.get("status_error") + define_error = "yes" == params.get("define_error", "no") + expected_fails_msg = [] + error_msg = params.get("error_msg", "cannot use CCW address type for device") + expected_fails_msg.append(error_msg) + + device_obj = None + + # Back up xml file. + vmxml_backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm_name) + try: + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + if backend_device == "disk": + device_obj = create_ccw_addr_disk(params) + elif backend_device == "rng": + device_obj = create_ccw_addr_rng(params) + elif backend_device == "controller": + device_obj = create_ccw_addr_controller(params) + # Check libvirtd should not crash during the process + if not utils_split_daemons.is_modular_daemon(): + ori_pid_libvirtd = process.getoutput("pidof libvirtd") + if not hotplug: + # Sync VM xml. + vmxml.add_device(device_obj) + vmxml.sync() + vm.start() + vm.wait_for_login().close() + if status_error: + if hotplug: + logging.info("attaching devices, expecting error...") + result = virsh.attach_device(vm_name, device_obj.xml, debug=True) + libvirt.check_result(result, expected_fails=expected_fails_msg) + else: + test.fail("VM started unexpectedly.") + except virt_vm.VMStartError as e: + if status_error: + if hotplug: + test.fail("In hotplug scenario, VM should " + "start successfully but not." + "Error: %s", str(e)) + else: + logging.debug("VM failed to start as expected." + "Error: %s", str(e)) + else: + test.fail("VM failed to start." + "Error: %s" % str(e)) + check_libvirtd_process_id(ori_pid_libvirtd, test) + except xcepts.LibvirtXMLError as xml_error: + if not define_error: + test.fail("Failed to define VM:\n%s" % xml_error) + else: + logging.info("As expected, failed to define VM") + check_libvirtd_process_id(ori_pid_libvirtd, test) + except Exception as ex: + test.fail("unexpected exception happen: %s" % str(ex)) + check_libvirtd_process_id(ori_pid_libvirtd, test) + finally: + # Recover VM. + if vm.is_alive(): + vm.destroy(gracefully=False) + logging.info("Restoring vm...") + vmxml_backup.sync() + # Clean up images + for file_path in [image_path]: + if os.path.exists(file_path): + os.remove(file_path) diff --git a/libvirt/tests/src/virtual_disks/virtual_disks_dasd.py b/libvirt/tests/src/virtual_disks/virtual_disks_dasd.py new file mode 100644 index 0000000000..f77a3486e7 --- /dev/null +++ b/libvirt/tests/src/virtual_disks/virtual_disks_dasd.py @@ -0,0 +1,146 @@ +# pylint: disable=spelling +# disable pylint spell checker to allow for dasda, fdasda, vdb, vda, virtio, blk +import logging +import re + +from avocado.core.exceptions import TestError + +from virttest import virsh +from virttest.utils_zchannels import SubchannelPaths +from virttest.utils_misc import cmd_status_output, wait_for +from virttest.libvirt_xml.vm_xml import VMXML + +from provider.vfio import ccw + +TEST_DASD_ID = None +TARGET = "vdb" # suppose guest has only one disk 'vda' + + +def get_partitioned_dasd_path(): + """ + Selects and prepares DASD for test case + + :return path: absolute path to block device, e.g. '/dev/dasda' + """ + paths = SubchannelPaths() + paths.get_info() + device = paths.get_first_unused_and_safely_removable() + if not device: + raise TestError("Couldn't find dasd device for test") + global TEST_DASD_ID + TEST_DASD_ID = device[paths.HEADER["Device"]] + enable_disk(TEST_DASD_ID) + disk_path = get_device_path(TEST_DASD_ID) + wait_for(lambda: ccw.format_dasd(disk_path, None), 10, first=1.0) + wait_for(lambda: ccw.make_dasd_part(disk_path, None), 10, first=1.0) + return disk_path + + +def enable_disk(disk_id): + """ + Enables the disk so it can be used + + :param id: disk id cssid.ssid.devno, e.g. 0.0.5000 + :raises: TestError if can't use disk + """ + + cmd = "chzdev -e %s" % disk_id + err, out = cmd_status_output(cmd, shell=True) + if err: + raise TestError("Couldn't enable dasd '%s'. %s" % (disk_id, out)) + + +def disable_disk(disk_id): + """ + Enables the disk so it can be used + + :param disk_id: disk id cssid.ssid.devno, e.g. 0.0.5000 + :raises: TestError if can't use disk + """ + + cmd = "chzdev -d %s" % disk_id + err, out = cmd_status_output(cmd, shell=True) + if err: + raise TestError("Couldn't disable dasd '%s'. %s" % (disk_id, out)) + + +def get_device_path(disk_id): + """ + Gets the device path for the DASD disk + + :param disk_id: disk id cssid.ssid.devno, e.g. 0.0.5000 + :return: absolute device path, e.g. '/dev/dasda' + """ + + cmd = "lszdev %s" % disk_id + err, out = cmd_status_output(cmd, shell=True) + if err: + raise TestError("Couldn't get device info. %s" % out) + """ Expected output looks like: + TYPE ID ON PERS NAMES + dasd-eckd 0.0.5000 yes yes dasda + """ + try: + info = out.split('\n') + values = re.split(r"\s+", info[1]) + name = values[-1] + return "/dev/" + name + except: + raise TestError("Couldn't create device path from '%s', '%s', '%s'" % + (out, info, values)) + + +def attach_disk(vm_name, target, path): + """ + Attaches the disk on path as block device + + :param vm_name: VM name + :param target: //target@dev, e.g. 'vdb' + :param path: device path e.g. '/dev/dasda' + """ + + source_info = " --sourcetype block" + virsh.attach_disk(vm_name, path, target, source_info, ignore_status=False) + + +def check_dasd_partition_table(session, device_target): + """ + Checks that the partition table can be read + with 'fdasd' + + :param session: guest session, run command on host if None + :param device_target: the expected target device name, e.g. 'vdb' + """ + + cmd = "fdasd -p /dev/%s" % device_target + err, out = cmd_status_output(cmd, shell=True, session=session) + if err or not re.findall("reading vtoc.*ok", out): + raise TestError("Couldn't get partition table. %s" % out) + logging.debug("Confirmed partition table was read correctly:") + logging.debug(out) + + +def run(test, params, env): + """ + Confirm native 'dasd' partitions can be read + when attached via 'virtio-blk' + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + + vmxml = VMXML.new_from_inactive_dumpxml(vm_name) + backup_xml = vmxml.copy() + + try: + disk_path = get_partitioned_dasd_path() + attach_disk(vm_name, TARGET, disk_path) + + session = vm.wait_for_login() + check_dasd_partition_table(session, TARGET) + finally: + # sync will release attached disk, precondition for disablement + backup_xml.sync() + global TEST_DASD_ID + if TEST_DASD_ID: + disable_disk(TEST_DASD_ID) diff --git a/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py b/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py index 30ff03c0ca..5938500f67 100644 --- a/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py +++ b/libvirt/tests/src/virtual_disks/virtual_disks_gluster.py @@ -207,6 +207,7 @@ def test_pmsuspend(vm_name): if start_vm: if vm.is_dead(): vm.start() + vm.wait_for_login().close() else: if not vm.is_dead(): vm.destroy() diff --git a/libvirt/tests/src/virtual_interface/domain_lifecycle.py b/libvirt/tests/src/virtual_interface/domain_lifecycle.py new file mode 100644 index 0000000000..a8d8277fbf --- /dev/null +++ b/libvirt/tests/src/virtual_interface/domain_lifecycle.py @@ -0,0 +1,140 @@ +import logging +import os + +from virttest import data_dir +from virttest import libvirt_version +from virttest import utils_misc +from virttest import utils_vdpa +from virttest import virsh +from virttest.libvirt_xml import vm_xml +from virttest.staging import service +from virttest.utils_libvirt import libvirt_vmxml +from virttest.utils_test import libvirt + +from provider.interface import interface_base +from provider.interface import check_points + +VIRSH_ARGS = {'debug': True, 'ignore_status': False} + + +def run(test, params, env): + """ + Test domain lifecycle + """ + + def setup_default(): + """ + Default setup + """ + logging.debug("Remove VM's interface devices.") + libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') + + def teardown_default(): + """ + Default cleanup + """ + pass + + def setup_vdpa(): + """ + Setup vDPA environment + """ + setup_default() + test_env_obj = None + if test_target == "simulator": + test_env_obj = utils_vdpa.VDPASimulatorTest() + else: + pf_pci = utils_vdpa.get_vdpa_pci() + test_env_obj = utils_vdpa.VDPAOvsTest(pf_pci) + test_env_obj.setup() + return test_env_obj + + def teardown_vdpa(): + """ + Cleanup vDPA environment + """ + if test_target != "simulator": + service.Factory.create_service("NetworkManager").restart() + if test_obj: + test_obj.cleanup() + + def run_test(dev_type, params, test_obj=None): + """ + Test domain lifecycle + + 1) Start the vm and check network + 2) Destroy and start the VM, and check network + 3) Save and restore, and check network + 4) Suspend and resume, and check network + 5) Reboot the VM and check the network + """ + # Setup Iface device + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + iface_dict = eval(params.get('iface_dict', '{}')) + iface_dev = interface_base.create_iface(dev_type, iface_dict) + libvirt.add_vm_device(vmxml, iface_dev) + + logging.info("Start a VM with a '%s' type interface.", dev_type) + vm.start() + vm.wait_for_serial_login(timeout=240).close() + check_points.check_network_accessibility(vm, test_obj=test_obj, **params) + + logging.info("Destroy and start the VM.") + virsh.destroy(vm.name, **VIRSH_ARGS) + virsh.start(vm.name, **VIRSH_ARGS) + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=True, **params) + + logging.info("Save the VM.") + save_error = "yes" == params.get("save_error", "no") + save_path = os.path.join(data_dir.get_tmp_dir(), vm.name + '.save') + res = virsh.save(vm.name, 'sss', debug=True) + libvirt.check_exit_status(res, expect_error=save_error) + if not save_error: + logging.info("Restore vm.") + virsh.restore(save_path, **VIRSH_ARGS) + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=False, **params) + + logging.info("Suspend and resume the vm.") + virsh.suspend(vm.name, **VIRSH_ARGS) + if not libvirt.check_vm_state(vm_name, "paused"): + test.fail("VM should be paused!") + virsh.resume(vm.name, **VIRSH_ARGS) + if not libvirt.check_vm_state(vm_name, "running"): + test.fail("VM should be running!") + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=False, **params) + + logging.debug("Reboot VM and check network.") + virsh.reboot(vm.name, **VIRSH_ARGS) + check_points.check_network_accessibility( + vm, test_obj=test_obj, config_vdpa=False, **params) + + libvirt_version.is_libvirt_feature_supported(params) + utils_misc.is_qemu_function_supported(params) + + # Variable assignment + test_target = params.get('test_target', '') + dev_type = params.get('dev_type', '') + + vm_name = params.get('main_vm') + vm = env.get_vm(vm_name) + + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + backup_vmxml = vmxml.copy() + + setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \ + locals() else setup_default + teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \ + dev_type in locals() else teardown_default + + test_obj = None + try: + # Execute test + test_obj = setup_test() + run_test(dev_type, params, test_obj=test_obj) + + finally: + backup_vmxml.sync() + teardown_test() diff --git a/libvirt/tests/src/virtual_interface/interface_hotplug.py b/libvirt/tests/src/virtual_interface/interface_hotplug.py new file mode 100644 index 0000000000..4dc6f27731 --- /dev/null +++ b/libvirt/tests/src/virtual_interface/interface_hotplug.py @@ -0,0 +1,104 @@ +import logging + +from virttest import libvirt_version +from virttest import utils_misc +from virttest import utils_vdpa +from virttest.libvirt_xml import vm_xml +from virttest.staging import service +from virttest.utils_libvirt import libvirt_vmxml + +from provider.interface import interface_base +from provider.interface import vdpa_base + + +def run(test, params, env): + """ + Test Hotplug/unplug interface device(s) + """ + + def setup_default(): + """ + Default setup + """ + logging.debug("Remove VM's interface devices.") + libvirt_vmxml.remove_vm_devices_by_type(vm, 'interface') + + def teardown_default(): + """ + Default cleanup + """ + pass + + def setup_vdpa(): + """ + Setup vDPA environment + """ + setup_default() + test_env_obj = None + if test_target == "simulator": + test_env_obj = utils_vdpa.VDPASimulatorTest() + else: + pf_pci = utils_vdpa.get_vdpa_pci() + test_env_obj = utils_vdpa.VDPAOvsTest(pf_pci) + test_env_obj.setup() + return test_env_obj + + def teardown_vdpa(): + """ + Cleanup vDPA environment + """ + if test_target != "simulator": + service.Factory.create_service("NetworkManager").restart() + if test_obj: + test_obj.cleanup() + + def test_vdpa(): + """ + Hotplug/unplug vDPA type interface + + 1) Start the vm, hotplug the interface + 2) Login to the vm and check the network function + 3) Hot-unplug the interface + """ + vm.start() + vm_session = vm.wait_for_serial_login(timeout=240) + + br_name = None + if test_target == "mellanox": + br_name = test_obj.br_name + for _i in range(eval(params.get('repeat_times', '1'))): + interface_base.attach_iface_device(vm_name, dev_type, params) + vdpa_base.check_vdpa_conn(vm_session, test_target, br_name) + interface_base.detach_iface_device(vm_name, dev_type) + + libvirt_version.is_libvirt_feature_supported(params) + supported_qemu_ver = eval(params.get('func_supported_since_qemu_kvm_ver', '()')) + if supported_qemu_ver: + if not utils_misc.compare_qemu_version(*supported_qemu_ver, False): + test.cancel("Current qemu version doesn't support this test!") + + # Variable assignment + test_target = params.get('test_target', '') + dev_type = params.get('dev_type', '') + + vm_name = params.get('main_vm') + vm = env.get_vm(vm_name) + + vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) + backup_vmxml = vmxml.copy() + + run_test = eval("test_%s" % dev_type) + setup_test = eval("setup_%s" % dev_type) if "setup_%s" % dev_type in \ + locals() else setup_default + teardown_test = eval("teardown_%s" % dev_type) if "teardown_%s" % \ + dev_type in locals() else teardown_default + + test_obj = None + try: + # Execute test + test_obj = setup_test() + run_test() + + finally: + backup_vmxml.sync() + teardown_test() diff --git a/libvirt/tests/src/virtual_network/iface_network.py b/libvirt/tests/src/virtual_network/iface_network.py index c6a9f71560..f960533e66 100644 --- a/libvirt/tests/src/virtual_network/iface_network.py +++ b/libvirt/tests/src/virtual_network/iface_network.py @@ -129,6 +129,8 @@ def modify_iface_xml(sync=True): source['dev'] = net_ifs[0] del iface.source iface.source = source + if iface_vlan: + iface.vlan = iface.new_vlan(**iface_vlan) if iface_model: iface.model = get_iface_model(iface_model, host_arch) if iface_rom: @@ -617,6 +619,7 @@ def dig_test(session): iface_boot = params.get("iface_boot") iface_model = params.get("iface_model") iface_driver = params.get("iface_driver") + iface_vlan = eval(params.get("iface_vlan", "None")) multiple_guests = params.get("multiple_guests") create_network = "yes" == params.get("create_network", "no") attach_iface = "yes" == params.get("attach_iface", "no") diff --git a/provider/interface/check_points.py b/provider/interface/check_points.py new file mode 100644 index 0000000000..5428f50a3f --- /dev/null +++ b/provider/interface/check_points.py @@ -0,0 +1,32 @@ +import logging +from avocado.core import exceptions + +from provider.interface import vdpa_base + + +def check_network_accessibility(vm, **kwargs): + """ + Check VM's network accessibility + + :param vm: VM object + """ + if kwargs.get("recreate_vm_session", "yes") == "yes": + logging.debug("Recreating vm session...") + vm.cleanup_serial_console() + vm.create_serial_console() + vm_session = vm.wait_for_serial_login() + else: + vm_session = vm.session + + dev_type = kwargs.get("dev_type") + if dev_type == "vdpa": + br_name = None + config_vdpa = True + test_target = kwargs.get("test_target") + if test_target == "mellanox": + if not kwargs.get("test_obj"): + raise exceptions.TestError("test_obj must be assigned!") + br_name = kwargs.get("test_obj").br_name + config_vdpa = kwargs.get("config_vdpa", True) + vdpa_base.check_vdpa_conn( + vm_session, test_target, br_name, config_vdpa=config_vdpa) diff --git a/provider/interface/interface_base.py b/provider/interface/interface_base.py new file mode 100644 index 0000000000..bc58245d59 --- /dev/null +++ b/provider/interface/interface_base.py @@ -0,0 +1,74 @@ +import logging +import time + +from avocado.core import exceptions + +from virttest import utils_net +from virttest import virsh +from virttest.libvirt_xml import vm_xml +from virttest.libvirt_xml.devices import interface +from virttest.utils_libvirt import libvirt_vmxml +from virttest.utils_test import libvirt + + +def create_iface(iface_type, iface_dict): + """ + Create Interface device + + :param iface_type: String, interface type + :param iface_dict: Dict, attrs of Interface + :return: xml object of interface + """ + iface = interface.Interface(iface_type) + iface.setup_attrs(**iface_dict) + + logging.debug("Interface XML: %s", iface) + return iface + + +def get_vm_iface(vm_session): + """ + Get VM's 1st interface + + :param vm_session: An session to VM + :return: VM's first interface + """ + p_iface, _v_ifc = utils_net.get_remote_host_net_ifs(vm_session) + vm_iface = p_iface[:1:] + if not vm_iface: + raise exceptions.TestFail("Failed to get vm's iface!") + return vm_iface[0] + + +def attach_iface_device(vm_name, dev_type, params): + """ + Attach an interface to VM + + :param vm_name: VM's name + :param dev_type: Interface device type + :param params: Dictionary with the test parameters + """ + iface_dict = eval(params.get('iface_dict', '{}')) + status_error = "yes" == params.get('status_error', 'no') + + iface = create_iface(dev_type, iface_dict) + res = virsh.attach_device(vm_name, iface.xml, debug=True) + libvirt.check_exit_status(res, status_error) + libvirt_vmxml.check_guest_xml(vm_name, dev_type) + # FIXME: Sleep for 20 secs to make iface work properly + time.sleep(20) + + +def detach_iface_device(vm_name, dev_type): + """ + Detach an interface from VM + + :param vm_name: VM's name + :param dev_type: Interface device type + """ + iface = interface.Interface(dev_type) + iface = vm_xml.VMXML.new_from_dumpxml(vm_name).devices.by_device_tag( + "interface")[0] + virsh.detach_device(vm_name, iface.xml, wait_for_event=True, + debug=True, ignore_status=False) + libvirt_vmxml.check_guest_xml(vm_name, dev_type, status_error=True) diff --git a/provider/interface/vdpa_base.py b/provider/interface/vdpa_base.py new file mode 100644 index 0000000000..7006e0eaf1 --- /dev/null +++ b/provider/interface/vdpa_base.py @@ -0,0 +1,95 @@ +import logging +import re + +from avocado.core import exceptions +from avocado.utils import process + +from provider.interface import interface_base + +from virttest import utils_test +from virttest import utils_misc +from virttest.staging import service + + +def config_vdpa_conn(vm_session, vm_iface, br_name, + br_ip_addr='100.100.100.100', cidr='24'): + """ + Config vdpa connection + + :param vm_session: An session to VM + :param vm_iface: VM's interface + :param br_name: Bridge name + :param br_ip_addr: IP address of the bridge + :param cidr: CIDR + """ + vm_ip = re.sub('\d+$', '60', br_ip_addr) + service.Factory.create_service("NetworkManager").stop() + + logging.debug("Config static ip %s for vm.", vm_ip) + cmd = ("nmcli con del {0}; nmcli con add type ethernet ifname {0} " + "con-name {0} ipv4.method manual ipv4.address {1}/{2}" + .format(vm_iface, vm_ip, cidr)) + vm_session.cmd(cmd) + logging.debug("Set ip address of the bridge.") + cmd = ("ip addr del {0}/{1} dev {2}; sleep 5; ip addr add {0}/{1} dev {2};" + "sleep 5;ip link set {2} up".format(br_ip_addr, cidr, br_name)) + process.run(cmd, shell=True) + + +def check_vdpa_network(vm_session, vm_iface, br_name, + ping_dest="100.100.100.100", + config_vdpa=True): + """ + Check vdpa network connection + + :param vm_session: An session to VM + :param vm_iface: VM's interface + :param br_name: Bridge name + :param ping_dest: The ip address to ping + :config_vdpa: Whether to config vDPA connection + """ + if config_vdpa: + config_vdpa_conn(vm_session, vm_iface, br_name) + + if not utils_misc.wait_for(lambda: not utils_test.ping( + ping_dest, count=3, timeout=5, output_func=logging.debug, + session=vm_session)[0], first=5, timeout=30): + raise exceptions.TestFail("Failed to ping %s." % ping_dest) + + +def check_rx_tx_packages(vm_session, vm_iface): + """ + Check rx and tx package + + :param vm_session: An session to VM + :param vm_iface: VM's interface + """ + cmd = "ip -s -json link show %s" % vm_iface + status, stdout = vm_session.cmd_status_output(cmd) + if status or not stdout: + raise exceptions.TestFail("Failed to run cmd - {}, status - {}, " + "output - {}.".format(cmd, status, stdout)) + ip_info = eval(stdout.strip()) + logging.debug("VM iface's info: %s.", ip_info) + + tx_info = ip_info[0]['stats64']['tx']['packets'] + rx_info = ip_info[0]['stats64']['rx']['packets'] + if rx_info != tx_info: + raise exceptions.TestFail("The value of rx and tx should be same.") + + +def check_vdpa_conn(vm_session, test_target, br_name=None, config_vdpa=True): + """ + Check vDPA connection + + :param vm_session: An session to VM + :param test_target: Test target env, eg, "mellanox" or "simulator" + :param br_name: Bridge name + :config_vdpa: Whether to config vDPA connection + """ + vm_iface = interface_base.get_vm_iface(vm_session) + if test_target == "mellanox": + check_vdpa_network(vm_session, vm_iface, br_name, + config_vdpa=config_vdpa) + elif test_target == "simulator": + check_rx_tx_packages(vm_session, vm_iface) diff --git a/provider/migration/migration_base.py b/provider/migration/migration_base.py index 8e873379f5..8d6a5afd12 100644 --- a/provider/migration/migration_base.py +++ b/provider/migration/migration_base.py @@ -4,6 +4,7 @@ import signal # pylint: disable=W0611 from virttest import virsh # pylint: disable=W0611 +from virttest import utils_misc # pylint: disable=W0611 from virttest.utils_conn import TLSConnection from virttest.utils_libvirt import libvirt_network # pylint: disable=W0611 diff --git a/provider/v2v_vmcheck_helper.py b/provider/v2v_vmcheck_helper.py index bd753d81e3..9c00287bc5 100644 --- a/provider/v2v_vmcheck_helper.py +++ b/provider/v2v_vmcheck_helper.py @@ -316,8 +316,8 @@ def _when_target_ovirt(): # Video model will change to QXL for Windows2008r2 and windows7 if self.target == 'libvirt': video_model = _when_target_libvirt(has_qxldod) - if not has_virtio_win: - video_model = 'cirrus' + if not has_virtio_win: + video_model = 'cirrus' return video_model diff --git a/provider/vfio/ccw.py b/provider/vfio/ccw.py index c489c8e1c8..067bdeda9d 100644 --- a/provider/vfio/ccw.py +++ b/provider/vfio/ccw.py @@ -25,7 +25,7 @@ def read_write_operations_work(session, chpids, makefs=True): Per default the device gets a new filesystem setup. :param session: logged in guest session - :param chipds: string representing CHPIDs, e.g. 11122122 + :param chpids: string representing CHPIDs, e.g. 11122122 :param makefs: if False, the device is expected to have a valid filesystem already :return: True on success @@ -72,12 +72,14 @@ def make_dasd_part(path, session): :param path: dasd disk path, e.g. /dev/dasda :param session: guest session + :return: True if partitioning succeeded """ cmd = "fdasd -a %s" % path err, out = cmd_status_output(cmd, shell=True, session=session) if err: raise TestError("Couldn't create partition. %s" % out) + return True def make_dasd_fs(path, part, session): @@ -101,12 +103,14 @@ def format_dasd(path, session): :param path: dasd disk path, e.g. /dev/dasda :param session: guest session :raises TestError: if disk can't be formatted + :return: True if formatting succeeded """ cmd = "dasdfmt -b 4096 -M quick --force -p -y %s" % path err, out = cmd_status_output(cmd, shell=True, session=session) if err: raise TestError("Couldn't format disk. %s" % out) + return True def umount(session): @@ -137,6 +141,21 @@ def mount(session): raise TestError("Couldn't mount partition. %s" % out) +def set_device_offline(device_id, session=None): + """ + Sets device offline + + :param device_id: cssid.ssid.devno, e.g. 0.0.560a + :param session: guest session, command is run on host if None + :raises TestError: if the device can't be set online + """ + + cmd = "chccwdev -d %s" % device_id + err, out = cmd_status_output(cmd, shell=True, session=session) + if err: + raise TestError("Could not set device offline. %s" % out) + + def set_device_online(device_id, session=None): """ Sets device online @@ -156,7 +175,7 @@ def get_first_device_identifiers(chpids, session): """ Gets the usual device identifier cssid.ssid.devno - :param chpids: chipids where the disk is connected, e.g. "11122122" + :param chpids: chpids where the disk is connected, e.g. "11122122" :param session: guest session :return: Pair of strings, "cssid.ssid.devno" "cssid.ssid.schid" :raises TestError: if the device can't be found inside guest @@ -167,7 +186,7 @@ def get_first_device_identifiers(chpids, session): devices_inside_guest = [x for x in paths.devices if x[paths.HEADER["CHPIDs"]] == chpids] if not devices_inside_guest: - raise TestError("Device with chipds %s wasn't" + raise TestError("Device with chpids %s wasn't" " found inside guest" % chpids) first = devices_inside_guest[0] return first[paths.HEADER["Device"]], first[paths.HEADER["Subchan."]] @@ -179,7 +198,7 @@ def device_is_listed(session, chpids): path ids. :param session: guest console session - :param chipds: chpids where the disk is connected, e.g. "11122122" + :param chpids: chpids where the disk is connected, e.g. "11122122" :return: True if device is listed """ diff --git a/requirements-travis.txt b/requirements-travis.txt index 3709b32645..7e2b9d656d 100644 --- a/requirements-travis.txt +++ b/requirements-travis.txt @@ -5,4 +5,5 @@ tox==1.5.0 virtualenv==1.9.1 simplejson==3.8.1 inspektor==0.5.2 +pylint==2.11.1 pyenchant diff --git a/spell.ignore b/spell.ignore index 35f6af3764..b60e40ad3f 100644 --- a/spell.ignore +++ b/spell.ignore @@ -1,20 +1,170 @@ -aa ABI +AES +Adapte +Analyse +Auth +Autostart +BZ +Backend +Blockcopy +Blockjob +CCW +CN +Ceph +Cgroup +Chunfu +Ckeck +Cleaup +CmdError +Codec +Coldplug +Coldunplug +Config +DAC +DAX +DMI +DNS +Dompmwakeup +EOI +ESC +ESX +Esx +FIXIT +FS +Fibre +Filt +Getlink +GiB +Gib +Gluster +Guestfish +Guestname +HBA +HPT +Hotplug +Hotplugged +Hotunplug +Hugepage +IDE +IOV +IP +IPs +Infiniband +Init +Iscsi +KSM +KVER +KVM +KiB +LUN +LXC +Libvirt +Lifecycle +Luks +MBs +MCS +MLS +MMU +Makesure +Managedsave +Maskable +Maxmem +Mbps +Metabytes +MiB +Miryala +Mitterle +Multicast +NFS +NMI +NUL +Nodedev +Normalise +Numa +OSP +OVS +Occupt +PCI +PF's +PFs +PID +PPC +Passthrough +Poweroff +Poweron +Pre +Prepareguest +Prepend +Prudhvi +QMP +QXL +RBD +RFE +RHV +RNG +RTC +Radix +Readlink +Refall +Reinstall +RemoteFile +Repeate +Resize +SMT +SSL +STP +SVE +Seabios +Setlink +Sparsify +Spinlock +Sriov +Stdout +Str +Subnet +SysRq +TCP +TLS +TODO +TPM +TestError +TestFail +Testcase +Tpm +Tuple +URI +UTC +Uefi +Unbridge +Unclassifed +Uncompress +Undefine +Unmark +Unprotect +Unregister +Unsets +Updae +VF's +VM +VMs +Virtio +XEN +Xen +Yalan +Zhang +aa accel acl acpi acpiphp -Adapte addr addrss -AES aexpect afeter aggregater agregators amd analyse -Analyse ap api args @@ -28,17 +178,14 @@ augeas augpath ausearch auth -Auth autodestroy autogenerated autologin automake autoport autostart -Autostart autotest backend -Backend backgroup backingchain backingstore @@ -53,20 +200,18 @@ binded bindwidth blk blkdev +blkdev's blkdevio blkdeviotune -blkdev's blkid blkio blkiotune blockcommit blockcopy -Blockcopy blockdev blockdevs blockjcopy blockjob -Blockjob blockjobs blockpull blockresize @@ -86,23 +231,20 @@ breakpoint btrfs buf bz -BZ cachetune +calc cartesian ccw -CCW cd cdrom cellno ceph -Ceph cfg cfq cgconfig cgget cgname cgroup -Cgroup cgstop chardev chardevs @@ -112,37 +254,29 @@ chipset chmod chnaged chown +chpids chronyc -Chunfu chwen cid cidr -Ckeck clas classname -Cleaup cli clocksource clocksources cloop cmd -CmdError cmdline cmds -CN codec -Codec codeset coldplug -Coldplug -Coldunplug commandline compat compcache condn conf config -Config configs coredump corescount @@ -162,11 +296,11 @@ ctrl currentvcpu curvcpu dac -DAC darget dargs +dasd +dasda datetime -DAX dbus de deduplicate @@ -176,10 +310,10 @@ desc dest desturi dev +dev's devname devno devnode -dev's devs df dhcp @@ -196,12 +330,10 @@ distros dmesg dmg dmi -DMI dmidecode dn dname dns -DNS dnsmaq dnsmasq docstring @@ -215,6 +347,7 @@ domblkstat domblkthreshold domcapabilities domcontrol +domdirtyrate domdisplay domfsfreeze domfsinfo @@ -231,7 +364,6 @@ domjobinfo dommemstat domname dompmsuspend -Dompmwakeup domrename domstate domstats @@ -253,12 +385,8 @@ emulatorpin enospace env eo -EOI errmsg -ESC esx -Esx -ESX ethernet ethtool exe @@ -270,13 +398,13 @@ failover fallocate fc fd +fdasd fdisk fds fdset ffree fialed fibre -Fibre fileio filename filepath @@ -284,14 +412,12 @@ filesize filesystem filesystems filetransfer -Filt filterbinding filterref filterrfer findfs finename firewalld -FIXIT flushbufs fmt freecell @@ -300,7 +426,6 @@ freepages freezed frontend fs -FS fsck fsfreeze fstab @@ -317,23 +442,17 @@ gcc genid getconf getlink -Getlink getrandom getspeed gfxcard -Gib -GiB gid gluster -Gluster glusterfs guestfish -Guestfish guestfs guestinfo guestkallsyms guestmount -Guestname guestvcpus guset gzip @@ -341,7 +460,6 @@ happends hardcode hardcoded hba -HBA hba's hbas hd @@ -359,22 +477,17 @@ hostfile hostname hostnuma hotplug -Hotplug hotpluggable hotplugged -Hotplugged hotplugging hotswap hotunplug -Hotunplug hotunplugged hpt -HPT htb htm htp hugepage -Hugepage hugepages hugetlbfs hwinfo @@ -387,7 +500,6 @@ ibm ibvirtd icmp ide -IDE idmap ie iface @@ -400,9 +512,7 @@ imagelabel img increated indice -Infiniband init -Init initctl initramfs initscripts @@ -428,19 +538,15 @@ iothreadpins iothreads iothreadsched iothreadset -IOV iowait iozone ip -IP ipaddress iperf -IPs iptable iptables isa iscsi -Iscsi iscsid isdigit ishotplug @@ -461,13 +567,9 @@ keypresses keyvalue keywards keywrap -KiB kms ksm -KSM -KVER kvm -KVM kwargs lan lchown @@ -478,14 +580,13 @@ libosinfo libs libvird libvirt -Libvirt +libvirt's libvirtd libvirtd's -libvirt's +libvirtdconf libvirts libvrtd lifecycle -Lifecycle linux localhost localvm @@ -507,9 +608,7 @@ lstatlist lsusb lszdev luks -Luks lun -LUN lun's lv lvcreate @@ -522,31 +621,25 @@ lvs lvuuid lvxxx lxc -LXC lzop macvlan macvtap -Makesure +makefs managedsave -Managedsave managedsaved maskable -Maskable maxdowntime maximumvcpu maxmem -Maxmem maxvcpu maxvcpus mb -Mbps mbr -MBs mbytes mcast -MCS md mdev +mdevctl mem memballoon memhog @@ -556,25 +649,19 @@ memorytune memspec memtune messege -Metabytes metadata metadatacache -MiB middletotop migratable migrateuri mins -Miryala -Mitterle mkfifo mkfs mkfs'ed mknod mkswap mlock -MLS mmconfig -MMU mnt modprobe monitorxml @@ -589,7 +676,6 @@ msg mtu multi multicast -Multicast multifd multifunction multipath @@ -626,16 +712,13 @@ newdisk newvm newvol nfs -NFS nic nmcli -NMI nodecpumap nodecpustats nodedev -Nodedev -nodedevice nodedev's +nodedevice nodei nodeinfo nodememstats @@ -647,17 +730,14 @@ nogroup nonexist noqueue normalise -Normalise nosharepages npiv nr nss nsswitch ntfs -NUL num numa -Numa numactl numad numanode @@ -667,7 +747,6 @@ nvdimm nvram nwfilter objs -Occupt ok olddisk oldvm @@ -678,14 +757,12 @@ optionstr orinial os osinfo -OSP outrange overcommit overcommits ovf ovirt ovs -OVS ovsbridge pagesize params @@ -696,22 +773,17 @@ partnum partprobe parttype passthrough -Passthrough passwd pci -PCI +pci's pcie pcihole -pci's pem perf perl persistency pfs -PFs -PF's pid -PID pids pingable pkgs @@ -727,28 +799,21 @@ postcopy postfix postprocess poweroff -Poweroff -Poweron powerpc powershell ppc -PPC ppid pre -Pre pread preallocates preallocation precopy predicatable -Prepareguest -Prepend prepended priveledge proc propoer proto -Prudhvi ps pseries pts @@ -769,27 +834,18 @@ qed qem qemu qmp -QMP quiesce qxl -QXL qxldod -Radix rases rawio rbd -RBD rdm -Readlink readonly readwrite realtime redhat redirdev -Refall -Reinstall -RemoteFile -Repeate repo requiste rereadpt @@ -797,27 +853,22 @@ resctrl resettable resie resize -Resize resized resizing restared restorecon ret retlist -RFE rhel rhelx rhev rhsrvany rhv -RHV rng -RNG rom rootfs rrunner rsyslog -RTC rtype runnable runtest @@ -831,6 +882,7 @@ sasl sata scenaries schedinfo +schid scp scsi sd @@ -838,7 +890,6 @@ sda sdb sdx seabios -Seabios seclabel sectorsize securetty @@ -849,7 +900,6 @@ sendkey servcie servercert setlink -Setlink setmaxdowntime setmaxmem setmem @@ -868,33 +918,28 @@ smbios smitterl smm smt -SMT snaplist snapname snapshotname socketscount sourse -Sparsify sparsing spasified specifice spicevm spicevmc spinlock -Spinlock spinlocking spinlocks src srcuri sriov -Sriov srv sshd sshfs sshkeys ssid ssl -SSL stacktrace startup startupPolicy @@ -902,16 +947,13 @@ statvfs stderr stdin stdout -Stdout stdout's stdouts -stdouts storagepool storagevol stp -STP str -Str +stressapptest struct subchannel subclasses @@ -919,13 +961,11 @@ subcommand subelement sublement subnet -Subnet suboption subprocess substring subtests sve -SVE svirt swaplabel swtpm @@ -935,11 +975,11 @@ sys sysconfig sysfs sysinfo +syslibvirtd syslog syspath sysprep sysrq -SysRq systemctl systemd systemlink @@ -950,17 +990,13 @@ tartget tb tc tcp -TCP tcpdump's tcsd teardown tempfile testcase -Testcase testcases testdir -TestError -TestFail testxml tftp tgtd @@ -972,16 +1008,12 @@ timedatectl timestamp tis tls -TLS tlsconnection tmp tmpfile tmpfs -TODO topo tpm -Tpm -TPM tseg ttcp tty @@ -989,26 +1021,20 @@ ttyconsole tunap tunnelled tuple -Tuple txt typervisor ubuntu udev udp uefi -Uefi uid umask umount umounted un unbridge -Unbridge -Unclassifed uncompress -Uncompress undefine -Undefine undefines undefinesource undefining @@ -1020,22 +1046,17 @@ unixbench unlink unlinked unmap -Unmark unmount unordered unpaused unpluging unprotect -Unprotect -Unregister unresettable untar upadte -Updae uperf uptime uri -URI uris url usb @@ -1045,17 +1066,16 @@ username userserial usr utc -UTC utils uuid uuidgen valiable valut vcpu +vcpu's vcpucount vcpuinfo vcpupin -vcpu's vcpus vcpuset vd @@ -1069,11 +1089,10 @@ vdx vepa verfiy vf +vf's vfat vfio -vf's vfs -VF's vg vgamem vgcreate @@ -1097,7 +1116,6 @@ virsh virt virtadmin virtio -Virtio virtiofs virtiofsd virtlogd @@ -1109,17 +1127,15 @@ virtualization virtualport vlan vm -VM +vm'infomation +vm'name +vm's vmcheck vmchecker vmdk -vm'infomation vmlinuz -vm'name vmname -vm's vms -VMs vmtype vmware vmxml @@ -1151,8 +1167,6 @@ wwns wwpn xattr xen -Xen -XEN xfs xfstest xhci @@ -1165,10 +1179,5 @@ xxxx xxxxx xyz xz -Yalan yalzhang yyyy -Zhang -stressapptest -calc -domdirtyrate diff --git a/v2v/tests/cfg/function_test_esx.cfg b/v2v/tests/cfg/function_test_esx.cfg index 5c31c0783f..2019731b62 100644 --- a/v2v/tests/cfg/function_test_esx.cfg +++ b/v2v/tests/cfg/function_test_esx.cfg @@ -39,7 +39,6 @@ - json: only dest_json only uefi.win2019,device_map,without_ip_option,env_leak,block_dev - base_os_directory="/var/lib/libvirt/images" variants: - default: no block_dev @@ -112,6 +111,12 @@ checkpoint = 'fstrim_warning' msg_content = 'virt-v2v: warning: fstrim on guest filesystem /dev/.*? failed. Usually' expect_msg = yes + - schtask: + only esx_70 + checkpoint = "ogac" + virtio_win_path = "/usr/share/virtio-win/" + os_version = VM_VERSION_WIN_SCHTASKS_V2V_EXAMPLE + main_vm = VM_NAME_WIN_SCHTASKS_V2V_EXAMPLE - OGAC: only esx_67 checkpoint = "ogac" diff --git a/v2v/tests/src/function_test_esx.py b/v2v/tests/src/function_test_esx.py index 59b2ed8a9c..58fd485f10 100644 --- a/v2v/tests/src/function_test_esx.py +++ b/v2v/tests/src/function_test_esx.py @@ -781,14 +781,6 @@ def vm_check(status_error): if output_mode == 'libvirt': pvt.pre_pool(pool_name, pool_type, pool_target, '') - # Create json output dir - if output_mode == 'json': - base_os_directory = params_get(params, 'base_os_directory') - os_directory = None - os_directory = tempfile.TemporaryDirectory(prefix='v2v_test_', dir=base_os_directory) - logging.info("-os dir is %s", os_directory) - params['os_directory'] = os_directory.name - if 'root' in checkpoint and 'ask' in checkpoint: v2v_params['v2v_opts'] += ' --root ask' v2v_params['custom_inputs'] = params.get('choice', '2') @@ -1026,13 +1018,13 @@ def vm_check(status_error): v2v_sasl.close_session() if output_mode == 'libvirt': pvt.cleanup_pool(pool_name, pool_type, pool_target, '') - if output_mode == 'json' and os_directory: - os_directory.cleanup() if 'with_proxy' in checkpoint: logging.info('Unset http_proxy&https_proxy') os.environ.pop('http_proxy') os.environ.pop('https_proxy') if unprivileged_user: process.system("userdel -fr %s" % unprivileged_user) + if params.get('os_directory') and os.path.isdir(params['os_directory']): + shutil.rmtree(params['os_directory'], ignore_errors=True) # Cleanup constant files utils_v2v.cleanup_constant_files(params) diff --git a/virttools/tests/cfg/virt_install/hostdev_mdev.cfg b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg new file mode 100644 index 0000000000..747e5cd64c --- /dev/null +++ b/virttools/tests/cfg/virt_install/hostdev_mdev.cfg @@ -0,0 +1,6 @@ +- virt_install.hostdev.mdev: + type = hostdev_mdev + variants: + - check_present_inside_guest: + only s390-virtio + mdev_type = vfio_ccw-io diff --git a/virttools/tests/src/virt_install/hostdev_mdev.py b/virttools/tests/src/virt_install/hostdev_mdev.py new file mode 100644 index 0000000000..2ce7a2e238 --- /dev/null +++ b/virttools/tests/src/virt_install/hostdev_mdev.py @@ -0,0 +1,176 @@ +from time import sleep +from uuid import uuid4 +from avocado.core.exceptions import TestError +from avocado.core.exceptions import TestFail +from provider.vfio import ccw +from virttest.libvirt_xml.vm_xml import VMXML +from virttest.utils_misc import cmd_status_output +from virttest import virsh + + +class MdevHandler(object): + """ Base class for mdev type specific implementations """ + + def create_nodedev(self): + """ Creates the mdev and returns its name """ + raise NotImplementedError() + + def get_target_address(self): + """ Returns a target address to use for hostdev """ + raise NotImplementedError() + + def check_device_present_inside_guest(self, session): + """ + Checks if the host device is present inside the guest + + :param session: guest session + """ + raise NotImplementedError() + + def clean_up(self): + """ Stops the mediated device and returns resources to the host """ + raise NotImplementedError() + + @staticmethod + def from_type(mdev_type): + """ + Creates implementing instance for mdev_type + + :param mdev_type: The mediated device type as by nodedev API + """ + if mdev_type == "vfio_ccw-io": + return CcwMdevHandler() + else: + raise TestError("Test doesn't know how to handle %s." % mdev_type) + + +class CcwMdevHandler(MdevHandler): + """ Class implementing test methods for vfio_ccw-io """ + + def __init__(self): + self.uuid = None + self.chpids = None + self.schid = None + self.target_address = None + self.expected_device_address = None + self.device_id = None + + def create_nodedev(self): + """ + Creates a mediated device of a specific type + and returns its name from libvirt. + """ + self.schid, self.chpids = ccw.get_device_info() + self.device_id, _ = ccw.get_first_device_identifiers(self.chpids, None) + ccw.set_override(self.schid) + self.uuid = str(uuid4()) + ccw.start_device(self.uuid, self.schid) + + return first_mdev_nodedev_name() + + def get_target_address(self): + """ + Returns a valid target device address + + :param address_type: guest device address type + """ + self.target_address = "address.type=ccw,address.cssid=0xfe,address.ssid=0x0,address.devno=0x1111" + self.expected_device_address = "0.0.1111" + return self.target_address + + def check_device_present_inside_guest(self, session): + """ + Fails the test if the device can't be found inside the guest. + + :param session: guest session + :raises: TestFail if device not found + """ + device, _ = ccw.get_first_device_identifiers(self.chpids, session) + if not device == self.expected_device_address: + raise TestFail("Couldn't find device inside guest." + "Expected address %s, found %s." % + (self.expected_device_address, device)) + + def clean_up(self): + """ + Returns the mdev resources to the host. + """ + if self.uuid: + ccw.stop_device(self.uuid) + if self.schid: + ccw.unset_override(self.schid) + # need to sleep to avoid issue with setting device offline + # adding a wait_for would likely be more complicated + sleep(1) + if self.device_id: + ccw.set_device_offline(self.device_id) + + +def disk_for_import(vmxml): + """ + Returns the absolute path to a disk image for import. + Assume the boot image is the first disk and an image file. + + :param vmxml: VMXML instance + """ + disks = vmxml.get_disk_all() + disk_list = list(disks.values()) + first_disk = disk_list[0] + return first_disk.find('source').get('file') + + +def first_mdev_nodedev_name(): + """ Returns the first nodedev of type mdev known to libvirt """ + result = virsh.nodedev_list(cap="mdev", debug=True) + device_names = result.stdout.strip().splitlines() + if result.exit_status or len(device_names) == 0: + raise TestError("Couldn't create nodedev. %s. %s." % + (result.stderr, result.stdout)) + return device_names[0] + + +def virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk_path): + """ Runs virt-install with hostdev""" + cmd = ("virt-install --import --name %s" + " --hostdev %s,%s" + " --disk %s" + " --vcpus 2 --memory 2048" + " --nographics --noautoconsole" % + (vm_name, mdev_nodedev, target_address, disk_path)) + err, out = cmd_status_output(cmd, shell=True, verbose=True) + if err: + raise TestError("Couldn't install vm with hostdev: %s" % out) + + +def run(test, params, env): + """ + Confirm that a mediated device can be used by virt-install. + For this we import a disk we know will boot and check the + result inside the guest. + The mediated device is created by the test and assumed + to be the only mediated device in the test environment. + """ + + vm_name = params.get("main_vm") + vm = env.get_vm(vm_name) + vmxml = VMXML.new_from_inactive_dumpxml(vm_name) + mdev_type = params.get("mdev_type", "vfio_ccw-io") + handler = None + + try: + + vm.undefine() + handler = MdevHandler.from_type(mdev_type) + disk = disk_for_import(vmxml) + mdev_nodedev = handler.create_nodedev() + target_address = handler.get_target_address() + + virt_install_with_hostdev(vm_name, mdev_nodedev, target_address, disk) + + session = vm.wait_for_login() + handler.check_device_present_inside_guest(session) + + finally: + vmxml.sync() + if handler: + handler.clean_up()