From aa044c8633354ad7b5488d7377a584bc12193a57 Mon Sep 17 00:00:00 2001 From: Beraldo Leal Date: Fri, 8 Oct 2021 10:03:50 -0300 Subject: [PATCH] virttests: Uses the new logging namespace format Because of recent logging changes in Avocado itself, let's follow suit of commits adbfa5686 and b26e8032b. Signed-off-by: Beraldo Leal Signed-off-by: Jan Richter --- virttest/asset.py | 61 ++-- virttest/base_installer.py | 35 +- virttest/build_helper.py | 103 +++--- virttest/cartesian_config.py | 9 +- virttest/ceph.py | 20 +- virttest/cpu.py | 130 ++++---- virttest/data_dir.py | 7 +- virttest/env_process.py | 189 +++++------ virttest/gluster.py | 26 +- virttest/guest_agent.py | 17 +- virttest/http_server.py | 7 +- virttest/ip_sniffing.py | 30 +- virttest/iscsi.py | 43 +-- virttest/kernel_interface.py | 7 +- virttest/libvirt_cgroup.py | 32 +- virttest/libvirt_installer.py | 12 +- virttest/libvirt_remote.py | 5 +- virttest/libvirt_storage.py | 82 ++--- virttest/libvirt_version.py | 9 +- virttest/libvirt_vm.py | 308 +++++++++--------- virttest/libvirt_xml/domcapability_xml.py | 6 +- virttest/libvirt_xml/network_xml.py | 10 +- virttest/libvirt_xml/pool_xml.py | 20 +- virttest/libvirt_xml/vm_xml.py | 71 ++-- virttest/libvirtd_decorator.py | 20 +- virttest/logging_manager.py | 4 +- virttest/lvm.py | 54 +-- virttest/lvsb.py | 6 +- virttest/lvsb_base.py | 9 +- virttest/migration.py | 92 +++--- virttest/migration_template.py | 100 +++--- virttest/nfs.py | 45 ++- virttest/openvswitch.py | 29 +- virttest/ovirt.py | 181 +++++----- virttest/ovs_utils.py | 5 +- virttest/postprocess_iozone.py | 21 +- virttest/ppm_utils.py | 4 +- virttest/qemu_devices/qcontainer.py | 97 +++--- virttest/qemu_devices/qdevices.py | 22 +- virttest/qemu_installer.py | 37 ++- virttest/qemu_qtree.py | 21 +- virttest/qemu_storage.py | 96 +++--- virttest/qemu_virtio_port.py | 234 +++++++------ virttest/qemu_vm.py | 278 ++++++++-------- virttest/remote.py | 36 +- virttest/remote_build.py | 13 +- virttest/shared/deps/run_autotest/boottool.py | 22 +- .../kernel_install/kernelinstall.py | 20 +- virttest/ssh_key.py | 38 ++- virttest/staging/lv_utils.py | 88 ++--- virttest/staging/service.py | 10 +- virttest/staging/utils_cgroup.py | 24 +- virttest/staging/utils_koji.py | 33 +- virttest/staging/utils_memory.py | 6 +- virttest/step_editor.py | 8 +- virttest/storage.py | 50 +-- virttest/syslog_server.py | 6 +- virttest/test_setup.py | 189 ++++++----- virttest/tests/unattended_install.py | 135 ++++---- virttest/unittests/test_utils_test__init__.py | 4 +- virttest/utils_backup.py | 17 +- virttest/utils_config.py | 6 +- virttest/utils_conn.py | 57 ++-- virttest/utils_disk.py | 69 ++-- virttest/utils_env.py | 15 +- virttest/utils_gdb.py | 20 +- virttest/utils_hotplug.py | 5 +- virttest/utils_iptables.py | 14 +- virttest/utils_kernel_module.py | 28 +- virttest/utils_libguestfs.py | 29 +- virttest/utils_libvirt/libvirt_ceph_utils.py | 4 +- virttest/utils_libvirt/libvirt_config.py | 16 +- virttest/utils_libvirt/libvirt_cpu.py | 4 +- virttest/utils_libvirt/libvirt_disk.py | 38 ++- .../utils_libvirt/libvirt_embedded_qemu.py | 4 +- virttest/utils_libvirt/libvirt_keywrap.py | 6 +- virttest/utils_libvirt/libvirt_misc.py | 4 +- virttest/utils_libvirt/libvirt_nested.py | 8 +- virttest/utils_libvirt/libvirt_network.py | 18 +- virttest/utils_libvirt/libvirt_numa.py | 14 +- virttest/utils_libvirt/libvirt_nwfilter.py | 4 +- virttest/utils_libvirt/libvirt_pcicontr.py | 16 +- virttest/utils_libvirt/libvirt_vfio.py | 4 +- virttest/utils_libvirt/libvirt_vmxml.py | 6 +- virttest/utils_libvirtd.py | 32 +- virttest/utils_misc.py | 228 ++++++------- virttest/utils_nbd.py | 12 +- virttest/utils_net.py | 191 ++++++----- virttest/utils_netperf.py | 30 +- virttest/utils_npiv.py | 49 +-- virttest/utils_package.py | 13 +- virttest/utils_pyvmomi.py | 26 +- virttest/utils_sasl.py | 12 +- virttest/utils_secret.py | 8 +- virttest/utils_selinux.py | 38 ++- virttest/utils_spice.py | 82 ++--- virttest/utils_split_daemons.py | 26 +- virttest/utils_sriov.py | 10 +- virttest/utils_stress.py | 10 +- virttest/utils_sys.py | 10 +- virttest/utils_test/__init__.py | 242 +++++++------- virttest/utils_test/libguestfs.py | 112 +++---- virttest/utils_test/libvirt.py | 267 +++++++-------- virttest/utils_test/libvirt_domjobinfo.py | 8 +- virttest/utils_test/qemu/__init__.py | 84 ++--- virttest/utils_test/qemu/migration.py | 121 +++---- virttest/utils_time.py | 24 +- virttest/utils_v2v.py | 152 ++++----- virttest/utils_virtio_port.py | 8 +- virttest/utils_windows/virtio_win.py | 6 +- virttest/video_maker.py | 24 +- virttest/virsh.py | 72 ++-- virttest/virt_admin.py | 24 +- virttest/virt_vm.py | 52 +-- virttest/xml_utils.py | 28 +- 115 files changed, 2919 insertions(+), 2764 deletions(-) diff --git a/virttest/asset.py b/virttest/asset.py index 42fc150136..eacf1a274c 100644 --- a/virttest/asset.py +++ b/virttest/asset.py @@ -21,6 +21,8 @@ from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + class ConfigLoader: @@ -317,8 +319,8 @@ def download_test_provider(provider, update=False): pass except Exception: if not dir_existed and os.path.isdir(download_dst): - logging.error('Cleaning up provider %s download dir %s', provider, - download_dst) + LOG.error('Cleaning up provider %s download dir %s', provider, + download_dst) shutil.rmtree(download_dst) raise @@ -327,8 +329,8 @@ def download_test_provider(provider, update=False): os.chdir(download_dst) process.system('git log -1') except process.CmdError: - logging.error('Something is unexpectedly wrong with the git repository at %s', - download_dst) + LOG.error('Something is unexpectedly wrong with the git repository at %s', + download_dst) raise finally: os.chdir(original_dir) @@ -371,7 +373,7 @@ def get_file_asset(title, src_path, destination): for ext in (".xz", ".gz", ".7z", ".bz2"): if os.path.exists(src_path + ext): destination = destination + ext - logging.debug('Found source image %s', destination) + LOG.debug('Found source image %s', destination) return { 'url': None, 'sha1_url': None, 'destination': src_path + ext, 'destination_uncompressed': destination, @@ -379,7 +381,7 @@ def get_file_asset(title, src_path, destination): 'downloaded': True} if os.path.exists(src_path): - logging.debug('Found source image %s', destination) + LOG.debug('Found source image %s', destination) return {'url': src_path, 'sha1_url': None, 'destination': destination, 'destination_uncompressed': None, 'uncompress_cmd': None, 'shortname': title, 'title': title, @@ -455,13 +457,13 @@ def uncompress_asset(asset_info, force=False): if os.path.isfile(destination) and force: os.chdir(os.path.dirname(destination_uncompressed)) - logging.debug('Uncompressing %s -> %s', destination, - destination_uncompressed) + LOG.debug('Uncompressing %s -> %s', destination, + destination_uncompressed) process.run(uncompress_cmd, shell=True) backup_file = destination_uncompressed + '.backup' if os.path.isfile(backup_file): - logging.debug('Copying %s -> %s', destination_uncompressed, - backup_file) + LOG.debug('Copying %s -> %s', destination_uncompressed, + backup_file) shutil.copy(destination_uncompressed, backup_file) @@ -486,13 +488,13 @@ def download_file(asset_info, interactive=False, force=False): if sha1_url is not None: try: - logging.info("Verifying expected SHA1 sum from %s", sha1_url) + LOG.info("Verifying expected SHA1 sum from %s", sha1_url) sha1_file = urllib.request.urlopen(sha1_url) sha1_contents = astring.to_text(sha1_file.read()) sha1 = sha1_contents.split(" ")[0] - logging.info("Expected SHA1 sum: %s", sha1) + LOG.info("Expected SHA1 sum: %s", sha1) except Exception as e: - logging.error("Failed to get SHA1 from file: %s", e) + LOG.error("Failed to get SHA1 from file: %s", e) else: sha1 = None @@ -501,7 +503,7 @@ def download_file(asset_info, interactive=False, force=False): os.makedirs(destination_dir) if not os.path.isfile(destination): - logging.warning("File %s not found", destination) + LOG.warning("File %s not found", destination) if interactive: answer = genio.ask("Would you like to download it from %s?" % url) else: @@ -512,12 +514,12 @@ def download_file(asset_info, interactive=False, force=False): "Downloading %s" % title) had_to_download = True except Exception as download_failure: - logging.error("Check your internet connection: %s", - download_failure) + LOG.error("Check your internet connection: %s", + download_failure) else: - logging.warning("Missing file %s", destination) + LOG.warning("Missing file %s", destination) else: - logging.info("Found %s", destination) + LOG.info("Found %s", destination) if sha1 is None: answer = 'n' else: @@ -526,27 +528,27 @@ def download_file(asset_info, interactive=False, force=False): if answer == 'y': actual_sha1 = crypto.hash_file(destination, algorithm='sha1') if actual_sha1 != sha1: - logging.info("Actual SHA1 sum: %s", actual_sha1) + LOG.info("Actual SHA1 sum: %s", actual_sha1) if interactive: answer = genio.ask("The file seems corrupted or outdated. " "Would you like to download it?") else: - logging.info("The file seems corrupted or outdated") + LOG.info("The file seems corrupted or outdated") answer = 'y' if answer == 'y': - logging.info("Updating image to the latest available...") + LOG.info("Updating image to the latest available...") while not file_ok: try: download.url_download_interactive(url, destination, title) except Exception as download_failure: - logging.error("Check your internet connection: %s", - download_failure) + LOG.error("Check your internet connection: %s", + download_failure) sha1_post_download = crypto.hash_file(destination, algorithm='sha1') had_to_download = True if sha1_post_download != sha1: - logging.error("Actual SHA1 sum: %s", actual_sha1) + LOG.error("Actual SHA1 sum: %s", actual_sha1) if interactive: answer = genio.ask("The file downloaded %s is " "corrupted. Would you like " @@ -556,8 +558,7 @@ def download_file(asset_info, interactive=False, force=False): answer = 'n' if answer == 'n': problems_ignored = True - logging.error("File %s is corrupted" % - destination) + LOG.error("File %s is corrupted" % destination) file_ok = True else: file_ok = False @@ -565,15 +566,15 @@ def download_file(asset_info, interactive=False, force=False): file_ok = True else: file_ok = True - logging.info("SHA1 sum check OK") + LOG.info("SHA1 sum check OK") else: problems_ignored = True - logging.info("File %s present, but did not verify integrity", - destination) + LOG.info("File %s present, but did not verify integrity", + destination) if file_ok: if not problems_ignored: - logging.info("%s present, with proper checksum", destination) + LOG.info("%s present, with proper checksum", destination) uncompress_asset(asset_info=asset_info, force=force or had_to_download) diff --git a/virttest/base_installer.py b/virttest/base_installer.py index 64c3de5beb..78a3448ca9 100644 --- a/virttest/base_installer.py +++ b/virttest/base_installer.py @@ -20,6 +20,8 @@ from . import arch from .staging import utils_koji +LOG = logging.getLogger('avocado.' + __name__) + class NoModuleError(Exception): @@ -180,14 +182,14 @@ def _set_param_cleanup(self): self.cleanup = True cleanup = self.params.get('installer_cleanup', 'yes') if cleanup == 'no': - logging.debug("Setting installer cleanup attribute to False") + LOG.debug("Setting installer cleanup attribute to False") self.cleanup = False def set_install_params(self, test=None, params=None): """ Called by test to setup parameters from the configuration file """ - logging.info("calling set install params") + LOG.info("calling set install params") if test is not None: self._set_test_dirs(test, params) @@ -350,7 +352,7 @@ def write_version_keyval(self, test): except AttributeError: version = "Unknown" sw_version = {('software_version_%s' % self.name): version} - logging.debug("Writing test keyval %s", sw_version) + LOG.debug("Writing test keyval %s", sw_version) test.write_test_keyval(sw_version) def load_modules(self, module_list=None): @@ -370,8 +372,7 @@ def load_modules(self, module_list=None): if not module_list: raise NoModuleError("Module list empty") - logging.info("Loading modules from default locations through " - "modprobe") + LOG.info("Loading modules from default locations through modprobe") for module in module_list: process.system("modprobe %s" % module) @@ -384,7 +385,7 @@ def unload_modules(self, module_list=None): """ if module_list is None: module_list = self.module_list - logging.info("Unloading kernel modules: %s" % " ".join(module_list)) + LOG.info("Unloading kernel modules: %s" % " ".join(module_list)) for module in module_list: linux_modules.unload_module(module) @@ -470,8 +471,8 @@ def __init__(self, mode, name, test=None, params=None): super(NoopInstaller, self).__init__(mode, name, test, params) def install(self): - logging.info("Assuming virtualization software to be already " - "installed. Doing nothing") + LOG.info("Assuming virtualization software to be already " + "installed. Doing nothing") class YumInstaller(BaseInstaller): @@ -560,7 +561,7 @@ def _expand_koji_pkgs_with_debuginfo(self): :return: None """ - logging.debug("Koji package list to be updated with debuginfo pkgs") + LOG.debug("Koji package list to be updated with debuginfo pkgs") koji_pkgs_with_debug = [] for pkg_text in self.koji_pkgs: @@ -575,8 +576,8 @@ def _expand_koji_pkgs_with_debuginfo(self): pkg.subpackages.append(debuginfo_pkg_name) pkg_with_debug_text = pkg.to_text() - logging.debug("KojiPkgSpec with debuginfo package added: %s", - pkg_with_debug_text) + LOG.debug("KojiPkgSpec with debuginfo package added: %s", + pkg_with_debug_text) koji_pkgs_with_debug.append(pkg_with_debug_text) # swap current koji_pkgs with on that includes debuginfo pkgs @@ -623,8 +624,8 @@ def _install_phase_download(self): if pkg.is_valid(): koji_client.get_pkgs(pkg, dst_dir=self.test_workdir) else: - logging.error('Package specification (%s) is invalid: %s' % - (pkg, pkg.describe_invalid())) + LOG.error('Package specification (%s) is invalid: %s' % + (pkg, pkg.describe_invalid())) for pkg_text in self.koji_scratch_pkgs: pkg = utils_koji.KojiScratchPkgSpec(pkg_text) koji_client.get_scratch_pkgs(pkg, dst_dir=self.test_workdir) @@ -633,8 +634,8 @@ def _install_phase_install(self): if self.koji_yumrepo_baseurl is not None: repo = yumrepo.YumRepo(self.param_key_prefix, self.koji_yumrepo_baseurl) - logging.debug('Enabling YUM Repo "%s" at "%s"', - self.param_key_prefix, self.koji_yumrepo_baseurl) + LOG.debug('Enabling YUM Repo "%s" at "%s"', + self.param_key_prefix, self.koji_yumrepo_baseurl) repo.save() os.chdir(self.test_workdir) @@ -642,8 +643,8 @@ def _install_phase_install(self): process.system("yum --nogpgcheck -y install %s" % rpm_file_names) if self.koji_yumrepo_baseurl is not None: - logging.debug('Disabling YUM Repo "%s" at "%s"', - self.param_key_prefix, self.koji_yumrepo_baseurl) + LOG.debug('Disabling YUM Repo "%s" at "%s"', + self.param_key_prefix, self.koji_yumrepo_baseurl) repo.remove() diff --git a/virttest/build_helper.py b/virttest/build_helper.py index 3fa3fb24a0..6b0fa25a0e 100644 --- a/virttest/build_helper.py +++ b/virttest/build_helper.py @@ -13,6 +13,9 @@ from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + + def _force_copy(src, dest): """ Replace dest with a new copy of src, even if it exists @@ -64,52 +67,51 @@ def _parse_params(self): __init__(). """ config_prefix = 'git_repo_%s' % self.name - logging.debug('Parsing parameters for git repo %s, configuration ' - 'prefix is %s' % (self.name, config_prefix)) + LOG.debug('Parsing parameters for git repo %s, configuration ' + 'prefix is %s' % (self.name, config_prefix)) self.base_uri = self.params.get('%s_base_uri' % config_prefix) if self.base_uri is None: - logging.debug('Git repo %s base uri is not set' % self.name) + LOG.debug('Git repo %s base uri is not set' % self.name) else: - logging.debug('Git repo %s base uri: %s' % (self.name, - self.base_uri)) + LOG.debug('Git repo %s base uri: %s' % (self.name, self.base_uri)) self.uri = self.params.get('%s_uri' % config_prefix) - logging.debug('Git repo %s uri: %s' % (self.name, self.uri)) + LOG.debug('Git repo %s uri: %s' % (self.name, self.uri)) self.branch = self.params.get('%s_branch' % config_prefix, 'master') - logging.debug('Git repo %s branch: %s' % (self.name, self.branch)) + LOG.debug('Git repo %s branch: %s' % (self.name, self.branch)) self.lbranch = self.params.get('%s_lbranch' % config_prefix) if self.lbranch is None: self.lbranch = self.branch - logging.debug('Git repo %s lbranch: %s' % (self.name, self.lbranch)) + LOG.debug('Git repo %s lbranch: %s' % (self.name, self.lbranch)) self.commit = self.params.get('%s_commit' % config_prefix) if self.commit is None: - logging.debug('Git repo %s commit is not set' % self.name) + LOG.debug('Git repo %s commit is not set' % self.name) else: - logging.debug('Git repo %s commit: %s' % (self.name, self.commit)) + LOG.debug('Git repo %s commit: %s' % (self.name, self.commit)) self.tag = self.params.get('%s_tag' % config_prefix) if self.tag is None: - logging.debug('Git repo %s tag is not set' % self.name) + LOG.debug('Git repo %s tag is not set' % self.name) else: - logging.debug('Git repo %s tag: %s' % (self.name, self.tag)) + LOG.debug('Git repo %s tag: %s' % (self.name, self.tag)) self.key_file = None tag_signed = self.params.get('%s_tag_signed' % config_prefix) if tag_signed is None: - logging.warning('Git repo %s tag is not signed' % self.name) - logging.warning('This means we will not verify if the key was ' - 'made by whomever claims to have made it ' - '(dangerous)') + LOG.warning('Git repo %s tag is not signed' % self.name) + LOG.warning('This means we will not verify if the key was ' + 'made by whomever claims to have made it ' + '(dangerous)') else: self.key_file = os.path.join(data_dir.get_data_dir(), 'gpg', tag_signed) if os.path.isfile(self.key_file): - logging.debug('Git repo %s tag %s will be verified with public ' - 'key file %s', self.name, self.tag, self.key_file) + LOG.debug('Git repo %s tag %s will be verified with public ' + 'key file %s', self.name, self.tag, self.key_file) else: raise exceptions.TestError('GPG public key file %s not found, ' 'will not proceed with testing' % @@ -140,8 +142,8 @@ def execute(self): os.makedirs(gnupg_home) os.environ['GNUPGHOME'] = gnupg_home process.system('gpg --import %s' % self.key_file) - logging.debug('Verifying if tag is actually signed with ' - 'GPG key ID %s' % self.key_file) + LOG.debug('Verifying if tag is actually signed with ' + 'GPG key ID %s' % self.key_file) process.system('git tag -v %s' % self.tag) except process.CmdError: raise exceptions.TestError("GPG signature check for git repo " @@ -209,12 +211,11 @@ def _parse_params(self): Parses the params items for entries related to source dir """ config_prefix = 'local_src_%s' % self.name - logging.debug('Parsing parameters for local source %s, configuration ' - 'prefix is %s' % (self.name, config_prefix)) + LOG.debug('Parsing parameters for local source %s, configuration ' + 'prefix is %s' % (self.name, config_prefix)) self.path = self.params.get('%s_path' % config_prefix) - logging.debug('Local source directory %s path: %s' % (self.name, - self.path)) + LOG.debug('Local source directory %s path: %s' % (self.name, self.path)) self.source = self.path self.destination = self.destination_dir @@ -241,8 +242,8 @@ def extract(self): name = os.path.basename(self.destination) temp_dir = os.path.join(os.path.dirname(self.destination), '%s.tmp' % name) - logging.debug('Temporary directory for extracting tarball is %s' % - temp_dir) + LOG.debug('Temporary directory for extracting tarball is %s' % + temp_dir) if not os.path.isdir(temp_dir): os.makedirs(temp_dir) @@ -309,12 +310,11 @@ def _parse_params(self): Parses the params items for entries related to this local tar helper """ config_prefix = 'local_tar_%s' % self.name - logging.debug('Parsing parameters for local tar %s, configuration ' - 'prefix is %s' % (self.name, config_prefix)) + LOG.debug('Parsing parameters for local tar %s, configuration ' + 'prefix is %s' % (self.name, config_prefix)) self.path = self.params.get('%s_path' % config_prefix) - logging.debug('Local source tar %s path: %s' % (self.name, - self.path)) + LOG.debug('Local source tar %s path: %s' % (self.name, self.path)) self.source = self.path self.destination = self.destination_dir @@ -377,12 +377,11 @@ def _parse_params(self): Parses the params items for entries related to this remote tar helper """ config_prefix = 'remote_tar_%s' % self.name - logging.debug('Parsing parameters for remote tar %s, configuration ' - 'prefix is %s' % (self.name, config_prefix)) + LOG.debug('Parsing parameters for remote tar %s, configuration ' + 'prefix is %s' % (self.name, config_prefix)) self.uri = self.params.get('%s_uri' % config_prefix) - logging.debug('Remote source tar %s uri: %s' % (self.name, - self.uri)) + LOG.debug('Remote source tar %s uri: %s' % (self.name, self.uri)) self.source = self.uri self.destination = self.destination_dir @@ -464,16 +463,16 @@ def _parse_params(self): methods. That means it's not strictly necessary to call parent's __init__(). """ - logging.debug('Parsing patch parameters for prefix %s' % self.prefix) + LOG.debug('Parsing patch parameters for prefix %s' % self.prefix) patches_param_key = '%s_patches' % self.prefix self.patches_str = self.params.get(patches_param_key, '[]') - logging.debug('Patches config for prefix %s: %s' % (self.prefix, - self.patches_str)) + LOG.debug('Patches config for prefix %s: %s' % (self.prefix, + self.patches_str)) self.patches = eval(self.patches_str) - logging.debug('Patches for prefix %s: %s' % (self.prefix, - ", ".join(self.patches))) + LOG.debug('Patches for prefix %s: %s' % (self.prefix, + ", ".join(self.patches))) class GnuSourceBuildInvalidSource(Exception): @@ -561,7 +560,7 @@ def include_pkg_config_path(self): else: os.environ[env_var] = ':'.join(include_paths) - logging.debug('PKG_CONFIG_PATH is: %s' % os.environ['PKG_CONFIG_PATH']) + LOG.debug('PKG_CONFIG_PATH is: %s' % os.environ['PKG_CONFIG_PATH']) def get_configure_path(self): """ @@ -609,8 +608,8 @@ def enable_debug_symbols(self): enable_debug_option = "--disable-strip" if enable_debug_option in self.get_available_configure_options(): self.configure_options.append(enable_debug_option) - logging.debug('Enabling debug symbols with option: %s' % - enable_debug_option) + LOG.debug('Enabling debug symbols with option: %s' % + enable_debug_option) def get_configure_command(self): """ @@ -629,7 +628,7 @@ def configure(self): Runs the "configure" script passing appropriate command line options """ configure_command = self.get_configure_command() - logging.info('Running configure on build dir') + LOG.info('Running configure on build dir') os.chdir(self.build_dir) process.system(configure_command) @@ -639,7 +638,7 @@ def make_parallel(self): """ parallel_make_jobs = multiprocessing.cpu_count() make_command = "make -j %s" % parallel_make_jobs - logging.info("Running parallel make on build dir") + LOG.info("Running parallel make on build dir") os.chdir(self.build_dir) process.system(make_command) @@ -747,16 +746,15 @@ def _parse_params(self): self.kernel_path = self.params.get(kernel_path_key, default_kernel_path) - logging.info('Parsing Linux kernel build parameters for %s', - self.prefix) + LOG.info('Parsing Linux kernel build parameters for %s', self.prefix) def make_guest_kernel(self): """ Runs "make", using a single job """ os.chdir(self.source) - logging.info("Building guest kernel") - logging.debug("Kernel config is %s" % self.config) + LOG.info("Building guest kernel") + LOG.debug("Kernel config is %s" % self.config) download.get_file(self.config, '.config') # FIXME currently no support for builddir @@ -765,7 +763,7 @@ def make_guest_kernel(self): parallel_make_jobs = multiprocessing.cpu_count() make_command = "make -j %s %s" % ( parallel_make_jobs, self.build_target) - logging.info("Running parallel make on src dir") + LOG.info("Running parallel make on src dir") process.system(make_command) def make_clean(self): @@ -841,13 +839,12 @@ def _parse_params(self): methods. That means it's not strictly necessary to call parent's __init__(). """ - logging.debug('Parsing gnu_autotools build parameters for %s' % - self.name) + LOG.debug('Parsing gnu_autotools build parameters for %s' % self.name) configure_opt_key = '%s_configure_options' % self.name configure_options = self.params.get(configure_opt_key, '').split() - logging.debug('Configure options for %s: %s' % (self.name, - configure_options)) + LOG.debug('Configure options for %s: %s' % (self.name, + configure_options)) self.source = self.destination_dir self.build_dir = self.destination_dir diff --git a/virttest/cartesian_config.py b/virttest/cartesian_config.py index 572102702c..7552e9cdd1 100755 --- a/virttest/cartesian_config.py +++ b/virttest/cartesian_config.py @@ -144,6 +144,9 @@ num_failed_cases = 5 +LOG = logging.getLogger('avocado.' + __name__) + + class ParserError(Exception): def __init__(self, msg, line=None, filename=None, linenum=None): @@ -1455,10 +1458,10 @@ def __init__(self, filename=None, defaults=False, expand_defaults=[], def _debug(self, s, *args): if self.debug: - logging.debug(s, *args) + LOG.debug(s, *args) def _warn(self, s, *args): - logging.warn(s, *args) + LOG.warn(s, *args) def parse_file(self, filename): """ @@ -2307,7 +2310,7 @@ def postfix_parse(dic): parser.error("filename required") if options.debug: - logging.basicConfig(level=logging.DEBUG) + LOG.setLevel(logging.DEBUG) expand = [] if options.expand: diff --git a/virttest/ceph.py b/virttest/ceph.py index 3b06b70177..a0476b72d9 100644 --- a/virttest/ceph.py +++ b/virttest/ceph.py @@ -18,6 +18,8 @@ from virttest import error_context from virttest import utils_misc +LOG = logging.getLogger('avocado.' + __name__) + class CephError(Exception): pass @@ -68,7 +70,7 @@ def rbd_image_create(ceph_monitor, rbd_pool_name, rbd_image_name, image=rbd_image_name, size=size, keyring=keyring) process.system(cmd, verbose=True) else: - logging.debug("Image already exist skip the create.") + LOG.debug("Image already exist skip the create.") @error_context.context_aware @@ -95,7 +97,7 @@ def rbd_image_rm(ceph_monitor, rbd_pool_name, rbd_image_name, image=rbd_image_name, keyring=keyring) process.run(cmd, verbose=True) else: - logging.debug("Image not exist, skip to remove it.") + LOG.debug("Image not exist, skip to remove it.") @error_context.context_aware @@ -123,7 +125,7 @@ def rbd_image_exist(ceph_monitor, rbd_pool_name, output = process.run(cmd, ignore_status=True, verbose=True).stdout_text - logging.debug("Response from rbd ls command is: %s" % output) + LOG.debug("Response from rbd ls command is: %s" % output) return (rbd_image_name.strip() in output.splitlines()) @@ -183,7 +185,7 @@ def rbd_image_map(ceph_monitor, rbd_pool_name, rbd_image_name): if os.path.exist(os.path.join("/dev/rbd", rbd_pool_name, rbd_image_name)): return os.path.join("/dev/rbd", rbd_pool_name, rbd_image_name) else: - logging.debug("Failed to map image to local: %s" % output) + LOG.debug("Failed to map image to local: %s" % output) return None @@ -197,7 +199,7 @@ def rbd_image_unmap(rbd_pool_name, rbd_image_name): cmd = "rbd unmap /dev/rbd/%s/%s" % (rbd_pool_name, rbd_image_name) output = process.run(cmd, verbose=True).stdout_text if os.path.exist(os.path.join("/dev/rbd", rbd_pool_name, rbd_image_name)): - logging.debug("Failed to unmap image from local: %s" % output) + LOG.debug("Failed to unmap image from local: %s" % output) @error_context.context_aware @@ -264,9 +266,9 @@ def cephfs_mount(ceph_uri, mount_point, options=None, verbose=False, session=Non try: utils_misc.make_dirs(mount_point) except OSError as dirError: - logging.debug("Creation of the directory:%s failed:%s", mount_point, str(dirError)) + LOG.debug("Creation of the directory:%s failed:%s", mount_point, str(dirError)) else: - logging.debug("Successfully created the directory %s", mount_point) + LOG.debug("Successfully created the directory %s", mount_point) process.system(mount_cmd, verbose=verbose) @@ -291,6 +293,6 @@ def cephfs_umount(ceph_uri, mount_point, verbose=False, session=None): try: utils_misc.safe_rmdir(mount_point) except OSError as dirError: - logging.debug("Delete of the directory:%s failed:%s", mount_point, str(dirError)) + LOG.debug("Delete of the directory:%s failed:%s", mount_point, str(dirError)) else: - logging.debug("Successfully deleted the directory %s", mount_point) + LOG.debug("Successfully deleted the directory %s", mount_point) diff --git a/virttest/cpu.py b/virttest/cpu.py index 93b61f6b9c..a0a68fed7b 100644 --- a/virttest/cpu.py +++ b/virttest/cpu.py @@ -38,6 +38,8 @@ from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + ARCH = platform.machine() CPU_TYPES = {"AuthenticAMD": ["EPYC-Milan", "EPYC-Rome", "EPYC", "Opteron_G5", @@ -118,7 +120,7 @@ def get_cpu_xmldata(vm, options=""): try: cpu_xmldata['current_vcpu'] = int(vm_xml.current_vcpu) except LibvirtXMLNotFoundError: - logging.debug("current vcpu value not present in xml, set as max value") + LOG.debug("current vcpu value not present in xml, set as max value") cpu_xmldata['current_vcpu'] = int(vm_xml.vcpu) cpu_xmldata['vcpu'] = int(vm_xml.vcpu) return cpu_xmldata @@ -192,7 +194,7 @@ def affinity_from_xml(vm): vmxml = libvirt_xml.VMXML.new_from_dumpxml(vm.name) xml_affinity_list = vmxml['cputune'].vcpupins except LibvirtXMLNotFoundError: - logging.debug("No element find in domain xml") + LOG.debug("No element find in domain xml") return xml_affinity # Store xml_affinity_list to a dict for vcpu in xml_affinity_list: @@ -259,7 +261,7 @@ def get_vcpucount_details(vm, options): result = virsh.vcpucount(vm.name, options, ignore_status=True, debug=True) if result.exit_status: - logging.debug("vcpu count command failed") + LOG.debug("vcpu count command failed") return (result, vcpucount_details) if options: @@ -312,20 +314,19 @@ def check_affinity(vm, expect_vcpupin): expect_affinity = cpus_string_to_affinity_list(str(expect_vcpupin[vcpu]), host_cpu_count) # Check for vcpuinfo affinity if affinity_vcpuinfo[int(vcpu)] != expect_affinity: - logging.error("CPU affinity in virsh vcpuinfo output" - " is unexpected") + LOG.error("CPU affinity in virsh vcpuinfo output is unexpected") result = False # Check for vcpupin affinity if affinity_vcpupin[int(vcpu)] != expect_affinity: - logging.error("Virsh vcpupin output is unexpected") + LOG.error("Virsh vcpupin output is unexpected") result = False # Check for affinity in Domain xml if affinity_xml: if affinity_xml[vcpu] != expect_affinity: - logging.error("Affinity in domain XML is unexpected") + LOG.error("Affinity in domain XML is unexpected") result = False if result: - logging.debug("Vcpupin info check pass") + LOG.debug("Vcpupin info check pass") return result @@ -350,28 +351,28 @@ def check_vcpucount(vm, exp_vcpu, option="", guest_agent=False): result = False if vcpucount_option == "--guest" and guest_agent: if vcpucount_result['guest_live'] != exp_vcpu['guest_live']: - logging.error("Virsh vcpucount output is unexpected\nExpected: " - "%s\nActual: %s", exp_vcpu, vcpucount_result) + LOG.error("Virsh vcpucount output is unexpected\nExpected: " + "%s\nActual: %s", exp_vcpu, vcpucount_result) result = False else: # Check for config option results if vm.is_dead(): if (exp_vcpu['max_config'] != vcpucount_result['max_config'] or exp_vcpu['cur_config'] != vcpucount_result['cur_config']): - logging.error("Virsh vcpucount output is unexpected\nExpected" - ":%s\nActual:%s", exp_vcpu, vcpucount_result) + LOG.error("Virsh vcpucount output is unexpected\nExpected" + ":%s\nActual:%s", exp_vcpu, vcpucount_result) result = False else: if (exp_vcpu['max_config'] != vcpucount_result['max_config'] or exp_vcpu['max_live'] != vcpucount_result['max_live'] or exp_vcpu['cur_config'] != vcpucount_result['cur_config'] or exp_vcpu['cur_live'] != vcpucount_result['cur_live']): - logging.error("Virsh vcpucount output is unexpected\n " - "Expected:%s\nActual:%s", exp_vcpu, - vcpucount_result) + LOG.error("Virsh vcpucount output is unexpected\n " + "Expected:%s\nActual:%s", exp_vcpu, + vcpucount_result) result = False if result: - logging.debug("Command vcpucount check pass") + LOG.debug("Command vcpucount check pass") return result @@ -394,11 +395,11 @@ def check_vcpuinfo(vm, exp_vcpu): affinity_vcpuinfo = affinity_from_vcpuinfo(vm) vcpuinfo_num = len(affinity_vcpuinfo) if vcpuinfo_num != exp_vcpu[idx]: - logging.error("Vcpu number in virsh vcpuinfo is unexpected\n" - "Expected: %s\nActual: %s", exp_vcpu[idx], vcpuinfo_num) + LOG.error("Vcpu number in virsh vcpuinfo is unexpected\n" + "Expected: %s\nActual: %s", exp_vcpu[idx], vcpuinfo_num) result = False else: - logging.debug("Command vcpuinfo check pass") + LOG.debug("Command vcpuinfo check pass") return result @@ -421,18 +422,18 @@ def check_xmlcount(vm, exp_vcpu, option): exp_key = "cur_live" if cpu_xml['current_vcpu'] != exp_vcpu[exp_key]: if cpu_xml['current_vcpu'] != exp_vcpu['cur_config']: - logging.error("currrent vcpu number mismatch in xml\n" - "Expected: %s\nActual:%s", exp_vcpu[exp_key], - cpu_xml['current_vcpu']) + LOG.error("currrent vcpu number mismatch in xml\n" + "Expected: %s\nActual:%s", exp_vcpu[exp_key], + cpu_xml['current_vcpu']) result = False else: - logging.debug("current vcpu count in xml check pass") + LOG.debug("current vcpu count in xml check pass") if cpu_xml['vcpu'] != exp_vcpu['max_config']: - logging.error("vcpu count mismatch in xml\nExpected: %s\nActual: %s", - exp_vcpu['max_config'], cpu_xml['vcpu']) + LOG.error("vcpu count mismatch in xml\nExpected: %s\nActual: %s", + exp_vcpu['max_config'], cpu_xml['vcpu']) result = False else: - logging.debug("vcpu count in xml check pass") + LOG.debug("vcpu count in xml check pass") return result @@ -454,8 +455,7 @@ def get_cpustats(vm, cpu=None): option = "--start %s --count 1" % cpu result = virsh.cpu_stats(vm.name, option) if result.exit_status != 0: - logging.error("cpu stats command failed: %s", - result.stderr_text) + LOG.error("cpu stats command failed: %s", result.stderr_text) return None output = result.stdout_text.strip().split() if re.match("CPU%s" % cpu, output[0]): @@ -469,8 +469,7 @@ def get_cpustats(vm, cpu=None): option = "--start %s --count 1" % host_cpu_online[i] result = virsh.cpu_stats(vm.name, option) if result.exit_status != 0: - logging.error("cpu stats command failed: %s", - result.stderr_text) + LOG.error("cpu stats command failed: %s", result.stderr_text) return None output = result.stdout_text.strip().split() if re.match("CPU%s" % host_cpu_online[i], output[0]): @@ -480,8 +479,7 @@ def get_cpustats(vm, cpu=None): result = virsh.cpu_stats(vm.name, "--total") cpustats["total"] = [] if result.exit_status != 0: - logging.error("cpu stats command failed: %s", - result.stderr_text) + LOG.error("cpu stats command failed: %s", result.stderr_text) return None output = result.stdout_text.strip().split() cpustats["total"] = [float(output[2])] # cputime @@ -519,12 +517,12 @@ def check_vcpu_domstats(vm, exp_vcpu): exp_cur_max = exp_vcpu['max_config'] if exp_cur_vcpu != cur_vcpu: status = False - logging.error("Mismatch in current vcpu in domstats output, " - "Expected: %s Actual: %s", exp_cur_vcpu, cur_vcpu) + LOG.error("Mismatch in current vcpu in domstats output, " + "Expected: %s Actual: %s", exp_cur_vcpu, cur_vcpu) if exp_cur_max != max_vcpu: status = False - logging.error("Mismatch in maximum vcpu in domstats output, Expected:" - " %s Actual: %s", exp_cur_max, max_vcpu) + LOG.error("Mismatch in maximum vcpu in domstats output, Expected:" + " %s Actual: %s", exp_cur_max, max_vcpu) return status @@ -549,7 +547,7 @@ def check_vcpu_value(vm, exp_vcpu, vcpupin=None, option="", guest_agent=False): False if not """ final_result = True - logging.debug("Expect vcpu number: %s", exp_vcpu) + LOG.debug("Expect vcpu number: %s", exp_vcpu) # 1.1 Check virsh vcpucount output if not check_vcpucount(vm, exp_vcpu, option, guest_agent): @@ -611,7 +609,7 @@ def guest_numa_check(vm, exp_vcpu): :param exp_vcpu: dict of expected vcpus :return: True if check succeed, False otherwise """ - logging.debug("Check guest numa") + LOG.debug("Check guest numa") session = vm.wait_for_login() vm_cpu_info = get_cpu_info(session) session.close() @@ -653,18 +651,18 @@ def guest_numa_check(vm, exp_vcpu): # Check cpu if node_cpu_xml != node_cpu_guest: status = False - logging.error("Mismatch in cpus in node %s: xml %s guest %s", node, - node_cpu_xml, node_cpu_guest) + LOG.error("Mismatch in cpus in node %s: xml %s guest %s", node, + node_cpu_xml, node_cpu_guest) # Check memory if int(node_mem_xml) != node_mem_guest: status = False - logging.error("Mismatch in memory in node %s: xml %s guest %s", node, - node_mem_xml, node_mem_guest) + LOG.error("Mismatch in memory in node %s: xml %s guest %s", node, + node_mem_xml, node_mem_guest) # Check no. of nodes if exp_num_nodes != node_num_guest: status = False - logging.error("Mismatch in numa nodes expected nodes: %s guest: %s", exp_num_nodes, - node_num_guest) + LOG.error("Mismatch in numa nodes expected nodes: %s guest: %s", + exp_num_nodes, node_num_guest) return status @@ -738,7 +736,7 @@ def get_cpu_info(session=None): try: output_raw = session.cmd_output(cmd) output = re.sub('\n[\s]+', '', output_raw).splitlines() - logging.info("output is %s" % output) + LOG.info("output is %s" % output) finally: session.close() cpu_info = dict(map(lambda x: [i.strip() for i in x.split(":", 1)], output)) @@ -858,7 +856,7 @@ def get_cpu_vendor(cpu_info="", verbose=True): else: vendor = vendor[0] if verbose: - logging.debug("Detected CPU vendor as '%s'", vendor) + LOG.debug("Detected CPU vendor as '%s'", vendor) return vendor @@ -920,7 +918,7 @@ def _make_up_pattern(flags): cpu_model = cpu_type cpu_support_model.append(cpu_model) else: - logging.warn("Can not Get cpu flags from cpuinfo") + LOG.warn("Can not Get cpu flags from cpuinfo") return cpu_support_model @@ -951,9 +949,9 @@ def check_model_list(pattern): e_msg = ("CPU models reported by qemu -cpu ? not supported by avocado-vt. " "Please work with us to add support for it") - logging.error(e_msg) + LOG.error(e_msg) for line in qemu_cpu_help_text.splitlines(): - logging.error(line) + LOG.error(line) raise UnsupportedCPU(e_msg) @@ -979,10 +977,10 @@ def check_if_vm_vcpu_match(vcpu_desire, vm, connect_uri=None, session=None): if isinstance(vcpu_desire, str) and vcpu_desire.isdigit(): vcpu_desire = int(vcpu_desire) if vcpu_desire != vcpu_actual: - logging.debug("CPU quantity mismatched !!! guest said it got %s " - "but we assigned %s" % (vcpu_actual, vcpu_desire)) + LOG.debug("CPU quantity mismatched !!! guest said it got %s " + "but we assigned %s" % (vcpu_actual, vcpu_desire)) return False - logging.info("CPU quantity matched: %s" % vcpu_actual) + LOG.info("CPU quantity matched: %s" % vcpu_actual) return True @@ -1031,16 +1029,16 @@ def get_model_features(model_name): features.append(feature.get('name')) break except ET.ParseError as error: - logging.warn("Configuration file %s has wrong xml format" % conf) + LOG.warn("Configuration file %s has wrong xml format" % conf) raise except AttributeError as elem_attr: - logging.warn("No attribute %s in file %s" % (str(elem_attr), conf)) + LOG.warn("No attribute %s in file %s" % (str(elem_attr), conf)) raise except Exception: # Other exceptions like IOError when open/read configuration file, # capture here - logging.warn("Some other exceptions, like configuration file is not " - "found or not file: %s" % conf) + LOG.warn("Some other exceptions, like configuration file is not " + "found or not file: %s" % conf) raise return features @@ -1066,8 +1064,8 @@ def cpus_string_to_affinity_list(cpus_string, num_cpus): single_pattern, between_pattern) pattern = r"^((%s),)*(%s)$" % (sub_pattern, sub_pattern) if not re.match(pattern, cpus_string): - logging.debug("Cpus_string=%s is not a supported format for cpu_list." - % cpus_string) + LOG.debug("Cpus_string=%s is not a supported format for cpu_list." + % cpus_string) # Init a list for result. affinity = [] for i in range(int(num_cpus)): @@ -1175,8 +1173,7 @@ def hotplug_domain_vcpu(vm, count, by_virsh=True, hotplug=True): if result.exit_status != 0: raise exceptions.TestFail(result.stderr_text) else: - logging.debug("Command output:\n%s", - result.stdout_text.strip()) + LOG.debug("Command output:\n%s", result.stdout_text.strip()) return result @@ -1208,8 +1205,8 @@ def cpus_parser(cpulist): try: commas.append(int(cpulist)) except ValueError: - logging.error("The cpulist has to be an " - "integer. (%s)", cpulist) + LOG.error("The cpulist has to be an " + "integer. (%s)", cpulist) elif "-" in cpulist: tmp = re.split("-", cpulist) hyphens = list(range(int(tmp[0]), int(tmp[-1]) + 1)) @@ -1221,8 +1218,7 @@ def cpus_parser(cpulist): others.append(int(cpulist)) return others except ValueError: - logging.error("The cpulist has to be an " - "integer. (%s)", cpulist) + LOG.error("The cpulist has to be an integer. (%s)", cpulist) cpus_set = set(hyphens).union(set(commas)).difference(set(carets)) @@ -1322,7 +1318,7 @@ def get_cpu_info_from_virsh(params): try: path.find_command("virsh") except path.CmdNotFoundError: - logging.warning("Virsh executable not set or found on path") + LOG.warning("Virsh executable not set or found on path") return xml = """ @@ -1339,11 +1335,11 @@ def get_cpu_info_from_virsh(params): with open(xml_file, "w") as f: f.write(xml) try: - logging.info("Get cpu model and features from virsh") + LOG.info("Get cpu model and features from virsh") virsh.define(xml_file) virsh.start(name) except Exception as err: - logging.error(err) + LOG.error(err) return else: cpu_info = get_cpu_info_from_virsh_qemu_cli(name) diff --git a/virttest/data_dir.py b/virttest/data_dir.py index f2c053f343..f524d87446 100755 --- a/virttest/data_dir.py +++ b/virttest/data_dir.py @@ -30,6 +30,9 @@ BACKING_DATA_DIR = None +LOG = logging.getLogger('avocado.' + __name__) + + class MissingDepsDirError(Exception): pass @@ -207,8 +210,8 @@ def get_tmp_dir(public=True): if distro.detect().name == 'Ubuntu': tmp_dir = "/var/lib/libvirt/images" if not utils_path.usable_rw_dir(tmp_dir): - logging.warning("Unable to write in '/var/lib/libvirt/images' " - "on Ubuntu, apparmor might complain...") + LOG.warning("Unable to write in '/var/lib/libvirt/images' " + "on Ubuntu, apparmor might complain...") tmp_dir = None tmp_dir = data_dir.get_tmp_dir(basedir=tmp_dir) if public: diff --git a/virttest/env_process.py b/virttest/env_process.py index 7e1e556a69..872abfb2a6 100644 --- a/virttest/env_process.py +++ b/virttest/env_process.py @@ -93,6 +93,8 @@ THREAD_ERROR = False +LOG = logging.getLogger('avocado.' + __name__) + def _get_qemu_version(qemu_cmd): """ @@ -123,7 +125,7 @@ def preprocess_image(test, params, image_name, vm_process_status=None): base_dir = params.get("images_base_dir", data_dir.get_data_dir()) if not storage.preprocess_image_backend(base_dir, params, image_name): - logging.error("Backend can't be prepared correctly.") + LOG.error("Backend can't be prepared correctly.") image_filename = storage.get_image_filename(params, base_dir) @@ -144,7 +146,7 @@ def preprocess_image(test, params, image_name, vm_process_status=None): # force create a new image. storage.file_remove(params, image_filename) image = qemu_storage.QemuImg(params, base_dir, image_name) - logging.info("Create image on %s." % image.storage_type) + LOG.info("Create image on %s." % image.storage_type) image.create(params) @@ -179,7 +181,7 @@ def preprocess_fs_source(test, params, fs_name, vm_process_status=None): if create_fs_source: if os.path.exists(fs_source): shutil.rmtree(fs_source, ignore_errors=True) - logging.info("Create filesystem source %s." % fs_source) + LOG.info("Create filesystem source %s." % fs_source) os.makedirs(fs_source) else: test.cancel('Unsupport the type of filesystem "%s"' % fs_type) @@ -373,7 +375,7 @@ def preprocess_vm(test, params, env, name): debug_msg += "There is no serial console in VM." if debug_msg: debug_msg += " Skip the kernel command line check." - logging.warn(debug_msg) + LOG.warn(debug_msg) return cmd_line = params.get("kernel_cmd_line_str", "Command line:") try: @@ -399,12 +401,11 @@ def preprocess_vm(test, params, env, name): err_msg += " serial output is %s" % kernel_cmd_line raise exceptions.TestError(err_msg) - logging.info("Kernel command line get from serial port is" - " as expect") + LOG.info("Kernel command line get from serial port is as expect") except Exception as err: - logging.warn("Did not get the kernel command line from serial " - "port output. Skip the kernel command line check." - "Error is %s" % err) + LOG.warn("Did not get the kernel command line from serial " + "port output. Skip the kernel command line check." + "Error is %s" % err) def check_image(test, params, image_name, vm_process_status=None): @@ -423,7 +424,7 @@ def check_image(test, params, image_name, vm_process_status=None): if vm_process_status == "running" and check_image_flag: if params.get("skip_image_check_during_running") == "yes": - logging.debug("Guest is still running, skip the image check.") + LOG.debug("Guest is still running, skip the image check.") check_image_flag = False else: image_info_output = image.info(force_share=True) @@ -434,14 +435,14 @@ def check_image(test, params, image_name, vm_process_status=None): if len(option) == 2: image_info[option[0].strip()] = option[1].strip() else: - logging.debug("Can not find matched image for selected guest " - "os, skip the image check.") + LOG.debug("Can not find matched image for selected guest " + "os, skip the image check.") check_image_flag = False if ("lazy refcounts" in image_info and image_info["lazy refcounts"] == "true"): - logging.debug("Should not check image while guest is alive" - " when the image is create with lazy refcounts." - " Skip the image check.") + LOG.debug("Should not check image while guest is alive" + " when the image is create with lazy refcounts." + " Skip the image check.") check_image_flag = False # Save the potential bad image when the test is not passed. @@ -454,7 +455,7 @@ def check_image(test, params, image_name, vm_process_status=None): image_name, hsh, image.image_format)) image.save_image(params, name) else: - logging.error("Not saving images, VM is not stopped.") + LOG.error("Not saving images, VM is not stopped.") if check_image_flag: try: @@ -468,7 +469,7 @@ def check_image(test, params, image_name, vm_process_status=None): params["img_check_failed"] = "yes" if (params.get("skip_cluster_leak_warn") == "yes" and "Leaked clusters" in six.text_type(e)): - logging.warn(six.text_type(e)) + LOG.warn(six.text_type(e)) else: raise e @@ -485,8 +486,8 @@ def postprocess_image(test, params, image_name, vm_process_status=None): or None for no vm exist. """ if vm_process_status == "running": - logging.warn("Skipped processing image '%s' since " - "the VM is running!" % image_name) + LOG.warn("Skipped processing image '%s' since " + "the VM is running!" % image_name) return restored, removed = (False, False) @@ -523,7 +524,7 @@ def postprocess_image(test, params, image_name, vm_process_status=None): removed = True if (not removed and params.get("remove_image", "yes") == "yes"): - logging.info("Remove image on %s." % image.storage_type) + LOG.info("Remove image on %s." % image.storage_type) if clone_master is None: image.remove() elif clone_master == "yes": @@ -545,8 +546,8 @@ def postprocess_fs_source(test, params, fs_name, vm_process_status=None): running, dead or None for no vm exist. """ if vm_process_status == "running": - logging.warn("Skipped processing filesystem '%s' since " - "the VM is running!" % fs_name) + LOG.warn("Skipped processing filesystem '%s' since " + "the VM is running!" % fs_name) return fs_type = params.get('fs_source_type', 'mount') @@ -557,11 +558,11 @@ def postprocess_fs_source(test, params, fs_name, vm_process_status=None): fs_source = os.path.join(base_dir, fs_source) if params.get("remove_fs_source") == 'yes': - logging.info("Remove filesystem source %s." % fs_source) + LOG.info("Remove filesystem source %s." % fs_source) shutil.rmtree(fs_source, ignore_errors=True) else: - logging.info("Skipped processing filesystem '%s' since " - "unsupported type '%s'." % (fs_name, fs_type)) + LOG.info("Skipped processing filesystem '%s' since " + "unsupported type '%s'." % (fs_name, fs_type)) def postprocess_vm(test, params, env, name): @@ -618,8 +619,8 @@ def postprocess_vm(test, params, env, name): try: vm.copy_files_from(dump_path, vm_extra_dumps) except: - logging.error("Could not copy the extra dump '%s' from the vm '%s'", - dump_path, vm.name) + LOG.error("Could not copy the extra dump '%s' from the vm '%s'", + dump_path, vm.name) if params.get("kill_vm") == "yes": kill_vm_timeout = float(params.get("kill_vm_timeout", 0)) @@ -661,7 +662,7 @@ def process_command(test, params, env, command, command_timeout, (test.bindir, command), shell=True) except a_process.CmdError as e: if command_noncritical: - logging.warn(e) + LOG.warn(e) else: raise @@ -744,7 +745,7 @@ def _process_images_serial(image_func, test, images, params, exit_event=None, image_params = params.object_params(image_name) image_func(test, image_params, image_name, vm_process_status) if exit_event and exit_event.is_set(): - logging.error("Received exit_event, stop processing of images.") + LOG.error("Received exit_event, stop processing of images.") break @@ -772,7 +773,7 @@ def _process_images_parallel(image_func, test, params, vm_process_status=None): thread.join() if exit_event.is_set(): # Failure in some thread - logging.error("Image processing failed:") + LOG.error("Image processing failed:") for thread in threads: if thread.exc_info: # Throw the first failure six.reraise(thread.exc_info[1], None, thread.exc_info[2]) @@ -941,7 +942,7 @@ def preprocess(test, params, env): try: cpu_family = cpu_utils.get_family() if hasattr(cpu_utils, 'get_family') else cpu_utils.get_cpu_arch() except Exception: - logging.warning("Could not get host cpu family") + LOG.warning("Could not get host cpu family") migration_setup = params.get("migration_setup", "no") == "yes" if cpu_family is not None and "power" in cpu_family: pvr_cmd = "grep revision /proc/cpuinfo | awk '{print $3}' | head -n 1" @@ -975,9 +976,9 @@ def preprocess(test, params, env): test_setup.switch_indep_threads_mode(state="N", params=params) test_setup.switch_smt(state="off", params=params) if pvr != remote_pvr: - logging.warning("Source and destinations system PVR " - "does not match\n PVR:\nSource: %s" - "\nDestination: %s", pvr, remote_pvr) + LOG.warning("Source and destinations system PVR " + "does not match\n PVR:\nSource: %s" + "\nDestination: %s", pvr, remote_pvr) # First, let's verify if this test does require root or not. If it # does and the test suite is running as a regular user, we shall just # throw a TestSkipError exception, which will skip the test. @@ -1164,8 +1165,8 @@ def preprocess(test, params, env): continue if vm.name not in requested_vms: if keep_unrequested_vms: - logging.debug("The vm %s is registered in the env and disregarded " - "in the current test", vm.name) + LOG.debug("The vm %s is registered in the env and disregarded " + "in the current test", vm.name) else: vm.destroy() del env[key] @@ -1191,16 +1192,16 @@ def preprocess(test, params, env): warning_msg = "KVM module not loaded" if params.get("enable_kvm", "yes") == "yes": test.cancel(warning_msg) - logging.warning(warning_msg) + LOG.warning(warning_msg) kvm_version = "Unknown" - logging.debug("KVM version: %s" % kvm_version) + LOG.debug("KVM version: %s" % kvm_version) version_info["kvm_version"] = str(kvm_version) # Checking required kernel, if not satisfied, cancel test if params.get("required_kernel"): required_kernel = params.get("required_kernel") - logging.info("Test requires kernel version: %s" % required_kernel) + LOG.info("Test requires kernel version: %s" % required_kernel) match = re.search(r'[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?', kvm_version) if match is None: test.cancel("Can not get host kernel version.") @@ -1223,16 +1224,16 @@ def preprocess(test, params, env): kvm_userspace_version = _get_qemu_version(qemu_path) qemu_dst_path = utils_misc.get_qemu_dst_binary(params) if qemu_dst_path and qemu_dst_path != qemu_path: - logging.debug("KVM userspace dst version(qemu): %s", - _get_qemu_version(qemu_dst_path)) + LOG.debug("KVM userspace dst version(qemu): %s", + _get_qemu_version(qemu_dst_path)) - logging.debug("KVM userspace version(qemu): %s", kvm_userspace_version) + LOG.debug("KVM userspace version(qemu): %s", kvm_userspace_version) version_info["qemu_version"] = str(kvm_userspace_version) # Checking required qemu, if not satisfied, cancel test if params.get("required_qemu"): required_qemu = params.get("required_qemu") - logging.info("Test requires qemu version: %s" % required_qemu) + LOG.info("Test requires qemu version: %s" % required_qemu) match = re.search(r'[0-9]+\.[0-9]+\.[0-9]+(\-[0-9]+)?', kvm_userspace_version) if match is None: @@ -1251,7 +1252,7 @@ def preprocess(test, params, env): except a_process.CmdError: libvirt_version = "Unknown" version_info["libvirt_version"] = str(libvirt_version) - logging.debug("KVM userspace version(libvirt): %s" % libvirt_version) + LOG.debug("KVM userspace version(libvirt): %s" % libvirt_version) # Write it as a keyval test.write_test_keyval(version_info) @@ -1301,11 +1302,11 @@ def preprocess(test, params, env): try: pol.setup() except test_setup.PolkitWriteLibvirtdConfigError as e: - logging.error(str(e)) + LOG.error(str(e)) except test_setup.PolkitRulesSetupError as e: - logging.error(str(e)) + LOG.error(str(e)) except Exception as e: - logging.error("Unexpected error: '%s'" % str(e)) + LOG.error("Unexpected error: '%s'" % str(e)) if libvirtd_inst is None: libvirtd_inst = utils_libvirtd.Libvirtd("virtqemud") libvirtd_inst.restart() @@ -1321,10 +1322,10 @@ def preprocess(test, params, env): image_filename = storage.get_image_filename(params, base_dir) sysprep_options = params.get("sysprep_options", "--operations machine-id") # backup the original master image before customization - logging.info("Backup the master image before sysprep") + LOG.info("Backup the master image before sysprep") image_obj = qemu_storage.QemuImg(params, base_dir, image_filename) image_obj.backup_image(params, base_dir, "backup", True, True) - logging.info("Syspreping the image as requested before cloning.") + LOG.info("Syspreping the image as requested before cloning.") try: utils_libguestfs.virt_sysprep_cmd( image_filename, options=sysprep_options, ignore_status=False) @@ -1360,8 +1361,8 @@ def preprocess(test, params, env): if cpu_info: break except Exception as err: - logging.error("Failed to get cpu info with policy %s: %s" - % (policy, err)) + LOG.error("Failed to get cpu info with policy %s: %s" + % (policy, err)) continue else: raise exceptions.TestCancel("Failed to get cpu info with " @@ -1421,7 +1422,7 @@ def thread_func(obj): try: obj.run_avocado() except Exception as info: - logging.error(info) + LOG.error(info) THREAD_ERROR = True nest_params = params.copy() nested_params = eval(nest_params.get("nested_params", "{}")) @@ -1453,7 +1454,7 @@ def thread_func(obj): nest_params["vt_extra_params"] += (" nested_guest_max_level=\"L%s\"" % int(max_level.lstrip("L"))) nest_params["vt_extra_params"] += " run_nested_guest_test=\"yes\"" - logging.debug("Test is running in Guest level: %s", current_level) + LOG.debug("Test is running in Guest level: %s", current_level) for vm in nest_vms: # params with nested level specific configuration new_params = nest_params.object_params(current_level) @@ -1505,7 +1506,7 @@ def postprocess(test, params, env): postprocess_vm_on_hook(test, params, env) # pylint: disable=E1102 except Exception as details: err += "\nPostprocessing living vm hook: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) migration_setup = params.get("migration_setup", "no") == "yes" if params.get("verify_guest_dmesg", "yes") == "yes" and params.get("start_vm", "no") == "yes": @@ -1526,7 +1527,7 @@ def postprocess(test, params, env): base_dir = data_dir.get_data_dir() # if sysprep was requested in preprocess then restore back the original image if params.get("sysprep_required", "no") == "yes": - logging.info("Restoring the original master image.") + LOG.info("Restoring the original master image.") image_filename = storage.get_image_filename(params, base_dir) image_obj = qemu_storage.QemuImg(params, base_dir, image_filename) image_obj.backup_image(params, base_dir, "restore", True) @@ -1537,7 +1538,7 @@ def postprocess(test, params, env): living_vms = [vm for vm in env.get_all_vms() if (vm.is_alive() and not vm.is_paused())] for vm in living_vms: sosreport_path = vm.sosreport() - logging.info("Sosreport for guest: %s", sosreport_path) + LOG.info("Sosreport for guest: %s", sosreport_path) # Collect code coverage report for qemu if enabled if params.get("gcov_qemu", "no") == "yes": @@ -1557,15 +1558,15 @@ def postprocess(test, params, env): archive.compress("gcov_qemu.tar.gz", gcov_qemu_dir) shutil.rmtree(gcov_qemu_dir, ignore_errors=True) else: - logging.warning("Check either qemu build directory availablilty" - " or install gcovr package for qemu coverage report") + LOG.warning("Check either qemu build directory availablilty" + " or install gcovr package for qemu coverage report") # Postprocess all VMs and images try: process(test, params, env, postprocess_image, postprocess_vm, True, postprocess_fs_source) except Exception as details: err += "\nPostprocess: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) # Terminate the screendump thread global _screendump_thread, _screendump_thread_termination_event @@ -1594,11 +1595,11 @@ def postprocess(test, params, env): else: video_file = "%s.ogg" % screendump_dir video_file = os.path.join(test.debugdir, video_file) - logging.debug("Encoding video file %s", video_file) + LOG.debug("Encoding video file %s", video_file) video.encode(screendump_dir, video_file) except Exception as detail: - logging.info( + LOG.info( "Video creation failed for %s: %s", screendump_dir, detail) # Warn about corrupt PPM files @@ -1611,7 +1612,7 @@ def postprocess(test, params, env): ppm_file_rex = "*_iter%s.ppm" % test.iteration for f in glob.glob(os.path.join(screendump_temp_dir, ppm_file_rex)): if not ppm_utils.image_verify_ppm_file(f): - logging.warn("Found corrupt PPM file: %s", f) + LOG.warn("Found corrupt PPM file: %s", f) # Should we convert PPM files to PNG format? if params.get("convert_ppm_files_to_png", "no") == "yes": @@ -1664,7 +1665,7 @@ def postprocess(test, params, env): timeout=vm.LOGIN_WAIT_TIMEOUT) session.close() except (remote.LoginError, virt_vm.VMError, IndexError) as e: - logging.warn(e) + LOG.warn(e) vm.destroy(gracefully=False) # Kill VMs with deleted disks @@ -1675,7 +1676,7 @@ def postprocess(test, params, env): if params.object_params(image).get('remove_image') == 'yes': destroy = True if destroy and not vm.is_dead(): - logging.debug( + LOG.debug( 'Image of VM %s was removed, destroying it.', vm.name) vm.destroy() @@ -1689,7 +1690,7 @@ def postprocess(test, params, env): # collect sosreport of host/remote host during postprocess if enabled if params.get("enable_host_sosreport", "no") == "yes": sosreport_path = utils_misc.get_sosreport(sosreport_name="host") - logging.info("Sosreport for host: %s", sosreport_path) + LOG.info("Sosreport for host: %s", sosreport_path) if params.get("enable_remote_host_sosreport", "no") == "yes": remote_params = {'server_ip': params['remote_ip'], 'server_pwd': params['remote_pwd']} remote_params['server_user'] = params['remote_user'] @@ -1699,7 +1700,7 @@ def postprocess(test, params, env): remote_pwd=params['remote_pwd'], remote_user=params['remote_user'], sosreport_name="host_remote") - logging.info("Sosreport for remote host: %s", sosreport_path) + LOG.info("Sosreport for remote host: %s", sosreport_path) living_vms = [vm for vm in env.get_all_vms() if vm.is_alive()] # Close all monitor socket connections of living vm. if not params.get_boolean("keep_env_vms", False): @@ -1720,7 +1721,7 @@ def postprocess(test, params, env): try: cpu_family = cpu_utils.get_family() if hasattr(cpu_utils, 'get_family') else cpu_utils.get_cpu_arch() except Exception: - logging.warning("Could not get host cpu family") + LOG.warning("Could not get host cpu family") if cpu_family is not None and "power" in cpu_family: pvr_cmd = "grep revision /proc/cpuinfo | awk '{print $3}' | head -n 1" pvr = float(a_process.system_output(pvr_cmd, shell=True).strip()) @@ -1757,7 +1758,7 @@ def postprocess(test, params, env): libvirtd_inst.restart() except Exception as details: err += "\nHP cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) else: _post_hugepages_surp = h.ext_hugepages_surp @@ -1767,7 +1768,7 @@ def postprocess(test, params, env): thp.cleanup() except Exception as details: err += "\nTHP cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) for kvm_module in KVM_MODULE_HANDLERS: kvm_module.restore() @@ -1778,7 +1779,7 @@ def postprocess(test, params, env): ksm.cleanup(env) except Exception as details: err += "\nKSM cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) if params.get("setup_egd") == "yes" and params.get("kill_vm") == "yes": try: @@ -1786,7 +1787,7 @@ def postprocess(test, params, env): egd.cleanup() except Exception as details: err += "\negd.pl cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) if vm_type == "libvirt": if params.get("setup_libvirt_polkit") == "yes": @@ -1798,11 +1799,11 @@ def postprocess(test, params, env): libvirtd_inst.restart() except test_setup.PolkitConfigCleanupError as e: err += "\nPolkit cleanup: %s" % str(e).replace('\\n', '\n ') - logging.error(e) + LOG.error(e) except Exception as details: err += "\nPolkit cleanup: %s" % str(details ).replace('\\n', '\n ') - logging.error("Unexpected error: %s" % details) + LOG.error("Unexpected error: %s" % details) if params.get("enable_libvirtd_debug_log", "yes") == "yes": libvirtd_debug_log = test_setup.LibvirtdDebugLog(test) libvirtd_debug_log.disable() @@ -1816,7 +1817,7 @@ def postprocess(test, params, env): except Exception as details: err += "\nPostprocess command: %s" % str(details).replace('\n', '\n ') - logging.error(details) + LOG.error(details) if params.get("storage_type") == "iscsi": try: @@ -1824,7 +1825,7 @@ def postprocess(test, params, env): iscsidev.cleanup() except Exception as details: err += "\niscsi cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) if params.get("storage_type") == "lvm": try: @@ -1832,7 +1833,7 @@ def postprocess(test, params, env): lvmdev.cleanup() except Exception as details: err += "\nLVM cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) env.unregister_lvmdev("lvm_%s" % params["main_vm"]) if params.get("storage_type") == "nfs": @@ -1882,7 +1883,7 @@ def postprocess(test, params, env): brcfg.cleanup() except Exception as details: err += "\nPB cleanup: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) if params.get("verify_host_dmesg", "yes") == "yes": dmesg_log_file = params.get("host_dmesg_logfile", "host_dmesg.log") @@ -1905,7 +1906,7 @@ def postprocess(test, params, env): postprocess_vm_off_hook(test, params, env) # pylint: disable=E1102 except Exception as details: err += "\nPostprocessing dead vm hook: %s" % str(details).replace('\\n', '\n ') - logging.error(details) + LOG.error(details) if err: raise RuntimeError("Failures occurred while postprocess:\n%s" % err) @@ -1959,16 +1960,16 @@ def _take_screendumps(test, params, env): try: vm.screendump(filename=temp_filename, debug=False) except qemu_monitor.MonitorError as e: - logging.warn(e) + LOG.warn(e) continue except AttributeError as e: - logging.warn(e) + LOG.warn(e) continue if not os.path.exists(temp_filename): - logging.warn("VM '%s' failed to produce a screendump", vm.name) + LOG.warn("VM '%s' failed to produce a screendump", vm.name) continue if not ppm_utils.image_verify_ppm_file(temp_filename): - logging.warn("VM '%s' produced an invalid screendump", vm.name) + LOG.warn("VM '%s' produced an invalid screendump", vm.name) os.unlink(temp_filename) continue screendump_dir = "screendumps_%s_%s_iter%s" % (vm.name, vm_pid, @@ -1994,12 +1995,12 @@ def _take_screendumps(test, params, env): raise virt_vm.VMScreenInactiveError(vm, time_inactive) except virt_vm.VMScreenInactiveError: - logging.error(msg) + LOG.error(msg) # Let's reset the counter inactivity[vm.instance] = time.time() test.background_errors.put(sys.exc_info()) elif inactivity_watcher == 'log': - logging.debug(msg) + LOG.debug(msg) else: inactivity[vm.instance] = time.time() cache[image_hash] = screendump_filename @@ -2011,8 +2012,8 @@ def _take_screendumps(test, params, env): image.save(screendump_filename, format="JPEG", quality=quality) except (IOError, OSError) as error_detail: - logging.warning("VM '%s' failed to produce a " - "screendump: %s", vm.name, error_detail) + LOG.warning("VM '%s' failed to produce a " + "screendump: %s", vm.name, error_detail) # Decrement the counter as we in fact failed to # produce a converted screendump counter[vm.instance] -= 1 @@ -2053,10 +2054,10 @@ def store_vm_info(vm, log_filename, info_cmd='registers', try: output = vm.catch_monitor.info(info_cmd, debug=False) except qemu_monitor.MonitorError as err: - logging.warn(err) + LOG.warn(err) return False except AttributeError as err: - logging.warn(err) + LOG.warn(err) return False elif vmtype == "libvirt": try: @@ -2065,7 +2066,7 @@ def store_vm_info(vm, log_filename, info_cmd='registers', "--hmp", debug=False) output = result.stdout except Exception as details: - logging.warn(details) + LOG.warn(details) return False log_filename = "%s_%s" % (log_filename, timestamp) @@ -2090,7 +2091,7 @@ def report_result(status, cmd, results): results[vm_instance]) if msg != "%s." % status: - logging.debug(msg) + LOG.debug(msg) global _vm_info_thread_termination_event delay = float(params.get("vm_info_delay", 5)) @@ -2108,7 +2109,7 @@ def report_result(status, cmd, results): if not vm.is_alive(): if cmd_details[cmd]['vm_info_error_count'][vm.instance] < 1: - logging.warning( + LOG.warning( "%s is not alive. Can't query the %s status", cmd, vm.name) cmd_details[cmd]['vm_info_error_count'][vm.instance] += 1 continue @@ -2128,9 +2129,9 @@ def report_result(status, cmd, results): vmtype = params.get("vm_type") stored_log = store_vm_info(vm, vr_filename, cmd, vmtype=vmtype) if cmd_details[cmd]['vm_info_error_count'][vm.instance] >= 1: - logging.debug("%s alive now. Used to failed to get register" - " info from guest %s" - " times", vm.name, cmd_details[cmd]['vm_info_error_count'][vm.instance]) + LOG.debug("%s alive now. Used to failed to get register" + " info from guest %s" + " times", vm.name, cmd_details[cmd]['vm_info_error_count'][vm.instance]) cmd_details[cmd]['vm_info_error_count'][vm.instance] = 0 if stored_log: cmd_details[cmd]['counter'][vm.instance] += 1 diff --git a/virttest/gluster.py b/virttest/gluster.py index 9146f61059..a7250b353c 100644 --- a/virttest/gluster.py +++ b/virttest/gluster.py @@ -23,6 +23,8 @@ from virttest import utils_net from virttest import error_context +LOG = logging.getLogger('avocado.' + __name__) + class GlusterError(Exception): pass @@ -182,7 +184,7 @@ def gluster_brick_create(brick_path, force=False, session=None): eval(cmd2_str) return True except OSError as details: - logging.error("Not able to create brick folder %s", details) + LOG.error("Not able to create brick folder %s", details) def gluster_brick_delete(brick_path, session=None): @@ -205,7 +207,7 @@ def gluster_brick_delete(brick_path, session=None): eval(cmd2_str) return True except OSError as details: - logging.error("Not able to delete brick folder %s", details) + LOG.error("Not able to delete brick folder %s", details) @error_context.context_aware @@ -298,7 +300,7 @@ def create_gluster_vol(params): if_up = utils_net.get_net_if(state="UP") for i in if_up: ipv4_value = utils_net.get_net_if_addrs(i)["ipv4"] - logging.debug("ipv4_value is %s", ipv4_value) + LOG.debug("ipv4_value is %s", ipv4_value) if ipv4_value != []: ip_addr = ipv4_value[0] break @@ -394,8 +396,8 @@ def file_exists(params, filename_path): if os.path.exists(mount_filename_path): ret = True except Exception as e: - logging.error("Failed to mount gluster volume %s to" - " mount dir %s: %s" % (sg_uri, tmpdir_path, e)) + LOG.error("Failed to mount gluster volume %s to" + " mount dir %s: %s" % (sg_uri, tmpdir_path, e)) finally: if glusterfs_umount(sg_uri, tmpdir_path): try: @@ -403,8 +405,8 @@ def file_exists(params, filename_path): except OSError: pass else: - logging.warning("Unable to unmount tmp directory %s with glusterfs" - " mount.", tmpdir_path) + LOG.warning("Unable to unmount tmp directory %s with glusterfs" + " mount.", tmpdir_path) return ret @@ -462,9 +464,9 @@ def add_rpc_insecure(filepath): cmd = "cat %s" % filepath content = process.run(cmd).stdout_text match = re.findall(r'rpc-auth-allow-insecure on', content) - logging.info("match is %s", match) + LOG.info("match is %s", match) if not match: - logging.info("not match") + LOG.info("not match") cmd = "sed -i '/end-volume/i \ \ \ \ option rpc-auth-allow-insecure on' %s" % filepath process.system(cmd, shell=True) process.system("service glusterd restart; sleep 2", shell=True) @@ -538,13 +540,13 @@ def setup_or_cleanup_gluster(is_setup, vol_name, brick_path="", pool_name="", ip_addr = utils_net.get_host_ip_address() add_rpc_insecure(file_path) glusterd_start() - logging.debug("finish start gluster") - logging.debug("The contents of %s: \n%s", file_path, open(file_path).read()) + LOG.debug("finish start gluster") + LOG.debug("The contents of %s: \n%s", file_path, open(file_path).read()) gluster_vol_create(vol_name, ip_addr, brick_path, True, session) gluster_allow_insecure(vol_name, session) gluster_nfs_disable(vol_name, session) - logging.debug("finish vol create in gluster") + LOG.debug("finish vol create in gluster") if session: session.close() return ip_addr diff --git a/virttest/guest_agent.py b/virttest/guest_agent.py index a48d013ab4..e3b6f110f4 100644 --- a/virttest/guest_agent.py +++ b/virttest/guest_agent.py @@ -23,6 +23,8 @@ import six +LOG = logging.getLogger('avocado.' + __name__) + class VAgentError(MonitorError): pass @@ -188,7 +190,7 @@ def __init__(self, vm, name, serial_type, gagent_params, except VAgentError as e: self._close_sock() if suppress_exceptions: - logging.warn(e) + LOG.warn(e) else: raise @@ -326,7 +328,7 @@ def _get_supported_cmds(self): if not self._supported_cmds: # If initiation fails, set supported list to a None-only list. self._supported_cmds = [None] - logging.warn("Could not get supported guest agent cmds list") + LOG.warn("Could not get supported guest agent cmds list") def check_has_command(self, cmd): """ @@ -358,8 +360,8 @@ def _log_command(self, cmd, debug=True, extra_str=""): :param extra_str: Extra string would be printed in log. """ if self.debug_log or debug: - logging.debug("(vagent %s) Sending command '%s' %s", - self.name, cmd, extra_str) + LOG.debug("(vagent %s) Sending command '%s' %s", + self.name, cmd, extra_str) def _log_response(self, cmd, resp, debug=True): """ @@ -370,8 +372,7 @@ def _log_response(self, cmd, resp, debug=True): :param debug: Whether to print the commands. """ def _log_output(o, indent=0): - logging.debug("(vagent %s) %s%s", - self.name, " " * indent, o) + LOG.debug("(vagent %s) %s%s", self.name, " " * indent, o) def _dump_list(li, indent=0): for l in li: @@ -394,8 +395,8 @@ def _dump_dict(di, indent=0): _log_output(o, indent) if self.debug_log or debug: - logging.debug("(vagent %s) Response to '%s' " - "(re-formatted)", self.name, cmd) + LOG.debug("(vagent %s) Response to '%s' " + "(re-formatted)", self.name, cmd) if isinstance(resp, dict): _dump_dict(resp) elif isinstance(resp, list): diff --git a/virttest/http_server.py b/virttest/http_server.py index 739b4bff25..c1d0c0c618 100644 --- a/virttest/http_server.py +++ b/virttest/http_server.py @@ -15,6 +15,9 @@ from avocado.utils.astring import to_text +LOG = logging.getLogger('avocado.' + __name__) + + class HTTPRequestHandler(SimpleHTTPRequestHandler): def do_GET(self): @@ -120,8 +123,8 @@ def address_string(self): return self.client_address[0] def log_message(self, fmt, *args): - logging.debug("builtin http server handling request from %s: %s" % - (self.address_string(), fmt % args)) + LOG.debug("builtin http server handling request from %s: %s" % + (self.address_string(), fmt % args)) def http_server(port=8000, cwd=None, terminate_callable=None): diff --git a/virttest/ip_sniffing.py b/virttest/ip_sniffing.py index a006d94958..5c711f21fd 100644 --- a/virttest/ip_sniffing.py +++ b/virttest/ip_sniffing.py @@ -21,6 +21,8 @@ from virttest.utils_misc import log_line from virttest.utils_version import VersionInterval +LOG = logging.getLogger('avocado.' + __name__) + class AddrCache(object): @@ -55,8 +57,8 @@ def __setitem__(self, hwaddr, ipaddr): if self._data.get(hwaddr) == ipaddr: return self._data[hwaddr] = ipaddr - logging.debug("Updated HWADDR (%s)<->(%s) IP pair " - "into address cache", hwaddr, ipaddr) + LOG.debug("Updated HWADDR (%s)<->(%s) IP pair " + "into address cache", hwaddr, ipaddr) def __getitem__(self, hwaddr): hwaddr = self._format_hwaddr(hwaddr) @@ -71,7 +73,7 @@ def __delitem__(self, hwaddr): if hwaddr not in self._data: return del self._data[hwaddr] - logging.debug("Dropped the address cache of HWADDR (%s)", hwaddr) + LOG.debug("Dropped the address cache of HWADDR (%s)", hwaddr) def get(self, hwaddr): """ @@ -112,7 +114,7 @@ def clear(self): """Clear all the address caches.""" with self._lock: self._data.clear() - logging.debug("Clean out all the address caches") + LOG.debug("Clean out all the address caches") class Sniffer(object): @@ -174,7 +176,7 @@ def _output_logger_handler(self, line): try: log_line(self._logfile, line) except Exception as e: - logging.warn("Can't log ip sniffer output: '%s'", e) + LOG.warn("Can't log ip sniffer output: '%s'", e) if self._output_handler(line): return # We can check whether the process is terminated unexpectedly @@ -182,14 +184,14 @@ def _output_logger_handler(self, line): match = self._re_sniffer_finished.match(line) if match: if match.group(1) != "0": - logging.error("IP sniffer (%s) terminated unexpectedly! " - "please check the log to get the details " - "(status: %s)", self.command, match.group(1)) + LOG.error("IP sniffer (%s) terminated unexpectedly! " + "please check the log to get the details " + "(status: %s)", self.command, match.group(1)) def _start_remote(self): address, port, username, password, prompt = self._remote_opts cmd = "%s %s" % (self.command, self.options) - logging.debug("Run '%s' on host '%s'", cmd, address) + LOG.debug("Run '%s' on host '%s'", cmd, address) login_cmd = ("ssh -o UserKnownHostsFile=/dev/null " "-o StrictHostKeyChecking=no " "-o PreferredAuthentications=password -p %s %s@%s" % @@ -302,7 +304,7 @@ def is_supported(cls, session=None): return False version = cls._get_version(session) if not version: - logging.warning("Couldn't get the version of '%s'", cls.command) + LOG.warning("Couldn't get the version of '%s'", cls.command) return False return version in cls.supported_versions @@ -342,10 +344,10 @@ def _output_handler(self, line): if re.match(r"[0-9a-fA-F]{1,4}:\S+", packet[0]): # TODO: support DHCPv6 if not self.__dict__.setdefault("_ip6_warned", False): - logging.warn("IPv6 address sniffing is not supported yet by " - "using TShark, please fallback to use other " - "sniffers by uninstalling TShark when testing " - "with IPv6") + LOG.warn("IPv6 address sniffing is not supported yet by " + "using TShark, please fallback to use other " + "sniffers by uninstalling TShark when testing " + "with IPv6") self._ip6_warned = True return True diff --git a/virttest/iscsi.py b/virttest/iscsi.py index 7b2e15b1be..524f1cff72 100644 --- a/virttest/iscsi.py +++ b/virttest/iscsi.py @@ -24,6 +24,7 @@ from virttest import utils_package from virttest.staging import service +LOG = logging.getLogger('avocado.' + __name__) ISCSI_CONFIG_FILE = "/etc/iscsi/initiatorname.iscsi" @@ -135,8 +136,8 @@ def iscsi_node_del(target_name=None): process.system(cmd, ignore_status=True) break if not cmd: - logging.error("The target '%s' for delete is not in target node" - " record", target_name) + LOG.error("The target '%s' for delete is not in target node" + " record", target_name) else: for node_tup in node_list: cmd = "iscsiadm -m node -o delete -T %s " % node_tup[1] @@ -164,7 +165,7 @@ def iscsi_logout(target_name=None): # This failure makes no sense when target name is not specified stderr = detail.result.stderr_text if not target_name and 'No matching sessions' in stderr: - logging.info("%s: %s", detail, stderr) + LOG.info("%s: %s", detail, stderr) else: raise @@ -186,7 +187,7 @@ def iscsi_discover(portal_ip): session = "" if "Invalid" in output: - logging.debug(output) + LOG.debug(output) else: session = output return session @@ -276,7 +277,7 @@ def set_initiatorName(self, id, name): back up and set up the InitiatorName """ if os.path.isfile("%s" % ISCSI_CONFIG_FILE): - logging.debug("Try to update iscsi initiatorname") + LOG.debug("Try to update iscsi initiatorname") # Don't override the backup file if not os.path.isfile("%s-%s" % (ISCSI_CONFIG_FILE, id)): cmd = "mv %s %s-%s" % (ISCSI_CONFIG_FILE, ISCSI_CONFIG_FILE, id) @@ -327,10 +328,10 @@ def get_device_name(self): try: device_name = "/dev/%s" % device_name[0] except IndexError: - logging.error( + LOG.error( "Can not find target '%s' after login.", self.target) else: - logging.error("Session is not logged in yet.") + LOG.error("Session is not logged in yet.") return device_name def set_chap_auth_initiator(self): @@ -346,7 +347,7 @@ def set_chap_auth_initiator(self): try: process.system(cmd) except process.CmdError: - logging.error("Fail to set CHAP authentication for initiator") + LOG.error("Fail to set CHAP authentication for initiator") def logout(self): """ @@ -373,7 +374,7 @@ def cleanup(self, confirmed=False): if path.find_command("targetcli"): cmd = "targetcli clearconfig confirm=true" if process.system(cmd, shell=True) != 0: - logging.error("targetcli configuration unable to clear") + LOG.error("targetcli configuration unable to clear") class IscsiTGT(_IscsiComm): @@ -428,11 +429,11 @@ def add_chap_account(self): cmd += " --password %s" % self.chap_passwd process.system(cmd) except process.CmdError as err: - logging.error("Fail to add account: %s", err) + LOG.error("Fail to add account: %s", err) # Check the new add account exist if self.chap_user not in self.get_chap_accounts(): - logging.error("Can't find account %s" % self.chap_user) + LOG.error("Can't find account %s" % self.chap_user) def delete_chap_account(self): """ @@ -466,8 +467,8 @@ def set_chap_auth_target(self): if self.chap_user not in self.get_chap_accounts(): self.add_chap_account() if self.chap_user in self.get_target_account_info(): - logging.debug("Target %s already has account %s", self.target, - self.chap_user) + LOG.debug("Target %s already has account %s", self.target, + self.chap_user) else: cmd = "tgtadm --lld iscsi --op bind --mode account" cmd += " --tid %s --user %s" % (self.emulated_id, self.chap_user) @@ -493,7 +494,7 @@ def export_target(self): restart_tgtd() output = process.run(cmd).stdout_text if not re.findall("%s$" % self.target, output, re.M): - logging.debug("Need to export target in host") + LOG.debug("Need to export target in host") # Set selinux to permissive mode to make sure iscsi target # export successfully @@ -528,7 +529,7 @@ def export_target(self): # Create a LUN with emulated image if re.findall(self.emulated_image, output, re.M): # Exist already - logging.debug("Exported image already exists.") + LOG.debug("Exported image already exists.") self.export_flag = True else: tgt_str = re.search(r'.*(Target\s+\d+:\s+%s\s*.*)$' % self.target, @@ -599,7 +600,7 @@ def get_target_id(self): try: target = re.findall("iqn[\.]\S+:\S+", line)[0] except IndexError: - logging.info("No found target in %s", line) + LOG.info("No found target in %s", line) continue else: continue @@ -700,7 +701,7 @@ def export_target(self): cmd = "targetcli ls /iscsi 1" output = process.run(cmd).stdout_text if not re.findall("%s$" % self.target, output, re.M): - logging.debug("Need to export target in host") + LOG.debug("Need to export target in host") # Set selinux to permissive mode to make sure # iscsi target export successfully @@ -787,7 +788,7 @@ def export_target(self): self.export_flag = True else: - logging.info("Target %s has already existed!" % self.target) + LOG.info("Target %s has already existed!" % self.target) if self.chap_flag: # Set CHAP authentication on the exported target @@ -808,7 +809,7 @@ def export_target(self): "generate_node_acls=1", "cache_dynamic_acls=1")) output = process.run(auth_cmd + attr_cmd).stdout_text - logging.info("Define access rights: %s" % output) + LOG.info("Define access rights: %s" % output) # Discovery the target self.portal_visible() @@ -869,8 +870,8 @@ def create_iSCSI(params, root_dir=data_dir.get_tmp_dir()): # Install linux iscsi target software targetcli iscsi_package = ["targetcli"] if not utils_package.package_install(iscsi_package): - logging.error("Failed to install targetcli trying with scsi-" - "target-utils or tgt package") + LOG.error("Failed to install targetcli trying with scsi-" + "target-utils or tgt package") # try with scsi target utils if targetcli is not available if ubuntu: iscsi_package = ["tgt"] diff --git a/virttest/kernel_interface.py b/virttest/kernel_interface.py index f3ad5a0c89..049a6cc8a2 100644 --- a/virttest/kernel_interface.py +++ b/virttest/kernel_interface.py @@ -5,6 +5,9 @@ from avocado.utils import process +LOG = logging.getLogger('avocado.' + __name__) + + class FS(object): """ Base class for proc/sys FS set and get @@ -56,8 +59,8 @@ def fs_value(self, value): cmd = "echo %s > %s" % (value, self.fs) status, output = self.func(cmd) if status != 0: - logging.error("Failed to set %s to %s, error: %s", self.fs, - value, output.strip()) + LOG.error("Failed to set %s to %s, error: %s", self.fs, + value, output.strip()) return False return True diff --git a/virttest/libvirt_cgroup.py b/virttest/libvirt_cgroup.py index d76d7bbe63..c08f552dfc 100644 --- a/virttest/libvirt_cgroup.py +++ b/virttest/libvirt_cgroup.py @@ -53,6 +53,8 @@ "iothread_period": "/cpu.max", "iothread_quota": "/cpu.max"} +LOG = logging.getLogger('avocado.' + __name__) + #cgroup related functions class CgroupTest(object): @@ -101,7 +103,7 @@ def get_cgroup_path(self, controller=None): cgroup_path = utils_cgroup.resolve_task_cgroup_path( int(self.__vm_pid), controller) if not os.path.exists(cgroup_path): - logging.error("cgroup path '%s' doesn't exist" % cgroup_path) + LOG.error("cgroup path '%s' doesn't exist" % cgroup_path) return None return cgroup_path @@ -119,9 +121,9 @@ def __get_cpu_subdirs(self, controller_path=None, dir_keyword=None): if dir_keyword in filename: dir_names.append(filename) if not dir_names and "iothread" in dir_keyword: - logging.debug("No sub dirs found with keyword: '%s'. " - "Pls check if you've executed virsh cmd " - "'iothreadadd'.", dir_keyword) + LOG.debug("No sub dirs found with keyword: '%s'. " + "Pls check if you've executed virsh cmd " + "'iothreadadd'.", dir_keyword) return None return sorted(dir_names) @@ -176,12 +178,12 @@ def __get_cg_file_path(cg_key, cg_path, cg_file_name): cgroup_path = self.get_cgroup_path("memory") cmd = "getconf PAGE_SIZE" page_size = process.run(cmd, ignore_status=True, shell=True).stdout_text.strip() - logging.debug("page_size is %d" % int(page_size)) + LOG.debug("page_size is %d" % int(page_size)) if int(page_size) == 65536: max_mem_value = "9223372036854710272" else: max_mem_value = "9223372036854771712" - logging.debug("max_mem_value is %s" % max_mem_value) + LOG.debug("max_mem_value is %s" % max_mem_value) for cg_key, cg_file_name in list(CGROUP_V1_MEM_FILE_MAPPING.items()): with open(os.path.join(cgroup_path, cg_file_name), 'r') as cg_file: cg_file_value = cg_file.read().strip() @@ -213,7 +215,7 @@ def __get_cg_file_path(cg_key, cg_path, cg_file_name): continue standardized_cgroup_info[cg_key] = cg_file_value else: - logging.error("You've provided a wrong virsh cmd: %s", virsh_cmd) + LOG.error("You've provided a wrong virsh cmd: %s", virsh_cmd) return standardized_cgroup_info def __get_standardized_cgroup2_info(self, virsh_cmd=None): @@ -275,7 +277,7 @@ def __get_standardized_cgroup2_info(self, virsh_cmd=None): list_index = 1 standardized_cgroup_info[cg_key] = cg_file_values[list_index] else: - logging.error("You've provided a wrong virsh cmd: %s", virsh_cmd) + LOG.error("You've provided a wrong virsh cmd: %s", virsh_cmd) return standardized_cgroup_info def get_cgroup_file_mapping(self, virsh_cmd): @@ -331,7 +333,7 @@ def get_virsh_output_dict(self, vm_name=None, virsh_cmd=None): elif virsh_cmd == "schedinfo": func = virsh.schedinfo else: - logging.error("There is no virsh cmd '%s'", virsh_cmd) + LOG.error("There is no virsh cmd '%s'", virsh_cmd) return None result = func(vm_name, ignore_status=True) return self.convert_virsh_output_to_dict(result) @@ -367,7 +369,7 @@ def __get_dev_major_minor(self, dev_path="/dev/sda"): :param dev_path: The path to the device """ if not os.path.exists(dev_path): - logging.debug("device '%s' not existing", dev_path) + LOG.debug("device '%s' not existing", dev_path) return None dev = os.stat(dev_path) return "%s:%s" % (os.major(dev.st_rdev), os.minor(dev.st_rdev)) @@ -409,9 +411,9 @@ def get_standardized_virsh_info(self, virsh_cmd=None, virsh_dict=None): standardized_virsh_output_info[mem_item] = str(int(mem_item_value) * 1024) else: standardized_virsh_output_info[mem_item] = mem_item_value - logging.debug("memtune: the value '%s' for '%s' is " - "new to us, pls check.", - mem_item_value, mem_item) + LOG.debug("memtune: the value '%s' for '%s' is " + "new to us, pls check.", + mem_item_value, mem_item) elif virsh_cmd == "schedinfo": for schedinfo_item, schedinfo_value in list(virsh_dict.items()): if schedinfo_item.lower() in ["scheduler"]: @@ -424,8 +426,8 @@ def get_standardized_virsh_info(self, virsh_cmd=None, virsh_dict=None): continue standardized_virsh_output_info[schedinfo_item] = schedinfo_value else: - logging.error("You've provided an unsupported virsh cmd: %s", - virsh_cmd) + LOG.error("You've provided an unsupported virsh cmd: %s", + virsh_cmd) return None return standardized_virsh_output_info diff --git a/virttest/libvirt_installer.py b/virttest/libvirt_installer.py index cfe0c82c55..19100fc3b9 100644 --- a/virttest/libvirt_installer.py +++ b/virttest/libvirt_installer.py @@ -15,6 +15,8 @@ __all__ = ['GitRepoInstaller', 'LocalSourceDirInstaller', 'LocalSourceTarInstaller', 'RemoteSourceTarInstaller'] +LOG = logging.getLogger('avocado.' + __name__) + class LIBVIRTBaseInstaller(base_installer.BaseInstaller): @@ -40,14 +42,14 @@ def _install_phase_package(self): self.rpmbuild_path = self.params.get("rpmbuild_path", "/root/rpmbuild/") if os.path.isdir(self.rpmbuild_path): process.system("rm -rf %s/*" % self.rpmbuild_path) - logging.debug("Build libvirt rpms") + LOG.debug("Build libvirt rpms") process.system("make rpm", allow_output_check="combined") def _install_phase_package_verify(self): """ Check if rpms are generated """ - logging.debug("Check for libvirt rpms") + LOG.debug("Check for libvirt rpms") found = False for fl in os.listdir('%s/RPMS/%s/' % (self.rpmbuild_path, platform.machine())): @@ -60,7 +62,7 @@ def _install_phase_install(self): """ Install libvirt package """ - logging.debug("Install libvirt rpms") + LOG.debug("Install libvirt rpms") package_install_cmd = "rpm -Uvh --nodeps --replacepkgs" package_install_cmd += " --replacefiles --oldpackage" package_install_cmd += " %s/RPMS/%s/libvirt*" % (self.rpmbuild_path, @@ -73,7 +75,7 @@ def _install_phase_init(self): :return: None """ - logging.debug("Initialize installed libvirt package") + LOG.debug("Initialize installed libvirt package") process.system("service libvirtd restart", allow_output_check="combined") def _install_phase_init_verify(self): @@ -82,7 +84,7 @@ def _install_phase_init_verify(self): :return: None """ - logging.debug("Check libvirt package install") + LOG.debug("Check libvirt package install") process.system("service libvirtd status", allow_output_check="combined") process.system("virsh capabilities", allow_output_check="combined") diff --git a/virttest/libvirt_remote.py b/virttest/libvirt_remote.py index 1e2c385ed5..0fc362a881 100644 --- a/virttest/libvirt_remote.py +++ b/virttest/libvirt_remote.py @@ -11,6 +11,9 @@ from virttest.utils_test import libvirt +LOG = logging.getLogger('avocado.' + __name__) + + def update_remote_file(params, value, file_path='/etc/libvirt/libvirtd.conf', restart_libvirt=True): @@ -26,7 +29,7 @@ def update_remote_file(params, value, """ try: tmp_value = eval(value) - logging.debug("Update file {} with: {}".format(file_path, value)) + LOG.debug("Update file {} with: {}".format(file_path, value)) remote_ip = params.get("server_ip", params.get("remote_ip")) remote_pwd = params.get("server_pwd", params.get("remote_pwd")) remote_user = params.get("server_user", params.get("remote_user")) diff --git a/virttest/libvirt_storage.py b/virttest/libvirt_storage.py index 8ac7bd642e..0cdf65b165 100644 --- a/virttest/libvirt_storage.py +++ b/virttest/libvirt_storage.py @@ -15,6 +15,8 @@ from virttest import storage from virttest import virsh +LOG = logging.getLogger('avocado.' + __name__) + class QemuImg(storage.QemuImg): @@ -255,7 +257,7 @@ def delete_pool(self, name): # TODO: Allow pool_destroy to raise exception. # Because some testcase rely on this function, # I should start this work after this module is accepted. - logging.error("Destroy pool '%s' failed.", name) + LOG.error("Destroy pool '%s' failed.", name) return False # Undefine pool anyway @@ -263,9 +265,9 @@ def delete_pool(self, name): self.virsh_instance.pool_undefine(name, ignore_status=False) except process.CmdError as detail: if self.pool_exists(name): - logging.error("Undefine pool '%s' failed:%s", name, detail) + LOG.error("Undefine pool '%s' failed:%s", name, detail) return False - logging.info("Deleted pool '%s'", name) + LOG.info("Deleted pool '%s'", name) return True def set_pool_autostart(self, name, extra=""): @@ -275,9 +277,9 @@ def set_pool_autostart(self, name, extra=""): try: self.virsh_instance.pool_autostart(name, extra, ignore_status=False) except process.CmdError: - logging.error("Autostart pool '%s' failed.", name) + LOG.error("Autostart pool '%s' failed.", name) return False - logging.info("Set pool '%s' autostart.", name) + LOG.info("Set pool '%s' autostart.", name) return True def build_pool(self, name, options="", **dargs): @@ -287,9 +289,9 @@ def build_pool(self, name, options="", **dargs): try: self.virsh_instance.pool_build(name, options, **dargs) except process.CmdError: - logging.error("Build pool '%s' failed.", name) + LOG.error("Build pool '%s' failed.", name) return False - logging.info("Built pool '%s'", name) + LOG.info("Built pool '%s'", name) return True def start_pool(self, name): @@ -297,14 +299,14 @@ def start_pool(self, name): Start pool if it is inactive. """ if self.is_pool_active(name): - logging.info("Pool '%s' is already active.", name) + LOG.info("Pool '%s' is already active.", name) return True try: self.virsh_instance.pool_start(name, ignore_status=False) except process.CmdError as details: - logging.error("Start pool '%s' failed: %s", name, details) + LOG.error("Start pool '%s' failed: %s", name, details) return False - logging.info("Started pool '%s'", name) + LOG.info("Started pool '%s'", name) return True def destroy_pool(self, name): @@ -312,7 +314,7 @@ def destroy_pool(self, name): Destroy pool if it is active. """ if not self.is_pool_active(name): - logging.info("pool '%s' is already inactive.", name) + LOG.info("pool '%s' is already inactive.", name) return True return self.virsh_instance.pool_destroy(name) @@ -324,9 +326,9 @@ def define_dir_pool(self, name, target_path): self.virsh_instance.pool_define_as(name, "dir", target_path, ignore_status=False) except process.CmdError: - logging.error("Define dir pool '%s' failed.", name) + LOG.error("Define dir pool '%s' failed.", name) return False - logging.info("Defined pool '%s'", name) + LOG.info("Defined pool '%s'", name) return True def define_fs_pool(self, name, block_device, target_path): @@ -338,9 +340,9 @@ def define_fs_pool(self, name, block_device, target_path): extra="--source-dev %s" % block_device, ignore_status=False) except process.CmdError: - logging.error("Define fs pool '%s' failed.", name) + LOG.error("Define fs pool '%s' failed.", name) return False - logging.info("Defined pool '%s'", name) + LOG.info("Defined pool '%s'", name) return True def define_lvm_pool(self, name, block_device, vg_name, target_path): @@ -353,9 +355,9 @@ def define_lvm_pool(self, name, block_device, vg_name, target_path): self.virsh_instance.pool_define_as(name, "logical", target_path, extra, ignore_status=False) except process.CmdError: - logging.error("Define logic pool '%s' failed.", name) + LOG.error("Define logic pool '%s' failed.", name) return False - logging.info("Defined pool '%s'", name) + LOG.info("Defined pool '%s'", name) return True def define_disk_pool(self, name, block_device, target_path): @@ -367,9 +369,9 @@ def define_disk_pool(self, name, block_device, target_path): self.virsh_instance.pool_define_as(name, "disk", target_path, extra, ignore_status=False) except process.CmdError: - logging.error("Define disk pool '%s' failed.", name) + LOG.error("Define disk pool '%s' failed.", name) return False - logging.info("Defined pool '%s'", name) + LOG.info("Defined pool '%s'", name) return True def define_iscsi_pool(self, name, source_host, source_dev, target_path): @@ -382,9 +384,9 @@ def define_iscsi_pool(self, name, source_host, source_dev, target_path): self.virsh_instance.pool_define_as(name, "iscsi", target_path, extra, ignore_status=False) except process.CmdError: - logging.error("Define iscsi pool '%s' failed.", name) + LOG.error("Define iscsi pool '%s' failed.", name) return False - logging.info("Define pool '%s'", name) + LOG.info("Define pool '%s'", name) return True def define_netfs_pool(self, name, source_host, source_path, target_path): @@ -397,9 +399,9 @@ def define_netfs_pool(self, name, source_host, source_path, target_path): self.virsh_instance.pool_define_as(name, "netfs", target_path, extra, ignore_status=False) except process.CmdError: - logging.error("Define netfs pool '%s' failed.", name) + LOG.error("Define netfs pool '%s' failed.", name) return False - logging.info("Define pool '%s'", name) + LOG.info("Define pool '%s'", name) return True def define_rbd_pool(self, name, source_host, source_name, extra=""): @@ -412,9 +414,9 @@ def define_rbd_pool(self, name, source_host, source_name, extra=""): self.virsh_instance.pool_define_as(name, "rbd", "", extra, ignore_status=False) except process.CmdError: - logging.error("Define rbd pool '%s' failed.", name) + LOG.error("Define rbd pool '%s' failed.", name) return False - logging.info("Define pool '%s'", name) + LOG.info("Define pool '%s'", name) return True @@ -435,7 +437,7 @@ def list_volumes(self): result = self.virsh_instance.vol_list(self.pool_name, ignore_status=False) except process.CmdError as detail: - logging.error('List volume failed: %s', detail) + LOG.error('List volume failed: %s', detail) return volumes lines = result.stdout_text.strip().splitlines() @@ -469,7 +471,7 @@ def volume_info(self, name): result = self.virsh_instance.vol_info(name, self.pool_name, ignore_status=False) except process.CmdError as detail: - logging.error("Get volume information failed: %s", detail) + LOG.error("Get volume information failed: %s", detail) return info for line in result.stdout_text.strip().splitlines(): @@ -484,19 +486,18 @@ def create_volume(self, name, capability, Create a volume in pool. """ if self.volume_exists(name): - logging.debug("Volume '%s' already exists.", name) + LOG.debug("Volume '%s' already exists.", name) return False try: self.virsh_instance.vol_create_as(name, self.pool_name, capability, allocation, frmt, ignore_status=False, debug=True) except process.CmdError as detail: - logging.error("Create volume failed:%s", detail) + LOG.error("Create volume failed:%s", detail) return False if not self.volume_exists(name): - logging.error("Created volume does not exist:%s", - self.list_volumes()) + LOG.error("Created volume does not exist:%s", self.list_volumes()) return False return True @@ -509,16 +510,16 @@ def delete_volume(self, name): self.virsh_instance.vol_delete(name, self.pool_name, ignore_status=False) except process.CmdError as detail: - logging.error("Delete volume failed:%s", detail) + LOG.error("Delete volume failed:%s", detail) return False if not self.volume_exists(name): - logging.debug("Volume '%s' has been deleted.", name) + LOG.debug("Volume '%s' has been deleted.", name) return True else: - logging.debug("Delete volume '%s' failed.", name) + LOG.debug("Delete volume '%s' failed.", name) return False else: - logging.info("Volume '%s' does not exist.", name) + LOG.info("Volume '%s' does not exist.", name) return True # Return True for expected result def clone_volume(self, old_name, new_name): @@ -531,18 +532,17 @@ def clone_volume(self, old_name, new_name): self.pool_name, ignore_status=False) except process.CmdError as detail: - logging.error("Clone volume failed:%s", detail) + LOG.error("Clone volume failed:%s", detail) return False if self.volume_exists(new_name): - logging.debug("Volume '%s' has been created by clone.", - new_name) + LOG.debug("Volume '%s' has been created by clone.", new_name) return True else: - logging.debug("Volume '%s' clone failed.", old_name) + LOG.debug("Volume '%s' clone failed.", old_name) return False else: - logging.info("Volume '%s' does not exist or '%s' has been exist." - % (old_name, new_name)) + LOG.info("Volume '%s' does not exist or '%s' has been exist." + % (old_name, new_name)) return False diff --git a/virttest/libvirt_version.py b/virttest/libvirt_version.py index 19354985fb..77bb709b52 100644 --- a/virttest/libvirt_version.py +++ b/virttest/libvirt_version.py @@ -11,6 +11,9 @@ from avocado.utils.astring import to_text +LOG = logging.getLogger('avocado.' + __name__) + + def version_compare(major, minor, update, session=None): """ Determine/use the current libvirt library version on the system @@ -56,12 +59,12 @@ def version_compare(major, minor, update, session=None): int(mobj.group(3)) break except (ValueError, TypeError, AttributeError): - logging.warning("Error determining libvirt version") + LOG.warning("Error determining libvirt version") return False compare_version = major * 1000000 + minor * 1000 + update if LIBVIRT_LIB_VERSION == 0: - logging.error("Unable to get virtqemud/libvirtd version!") + LOG.error("Unable to get virtqemud/libvirtd version!") elif LIBVIRT_LIB_VERSION >= compare_version: return True return False @@ -95,7 +98,7 @@ def is_libvirt_feature_supported(params, ignore_error=False): if func_supported_since_libvirt_ver: if not version_compare(*func_supported_since_libvirt_ver): if ignore_error: - logging.error(unspported_err_msg) + LOG.error(unspported_err_msg) return False else: raise exceptions.TestCancel(unspported_err_msg) diff --git a/virttest/libvirt_vm.py b/virttest/libvirt_vm.py index b3b65671c3..2889235527 100644 --- a/virttest/libvirt_vm.py +++ b/virttest/libvirt_vm.py @@ -8,7 +8,7 @@ import time import string import os -import logging as log +import logging import fcntl import re import shutil @@ -38,7 +38,7 @@ # Using as lower capital is not the best way to do, but this is just a # workaround to avoid changing the entire file. -logging = log.getLogger('avocado.' + __name__) +LOG = logging.getLogger('avocado.' + __name__) def normalize_connect_uri(connect_uri): @@ -207,8 +207,8 @@ def __init__(self, name, params, root_dir, address_cache, state=None): self.monitor = Monitor(self.name) # virtnet init depends on vm_type/driver_type being set w/in params super(VM, self).__init__(name, params) - logging.info("Libvirt VM '%s', driver '%s', uri '%s'", - self.name, self.driver_type, self.connect_uri) + LOG.info("Libvirt VM '%s', driver '%s', uri '%s'", + self.name, self.driver_type, self.connect_uri) def is_lxc(self): """ @@ -306,7 +306,7 @@ def undefine(self, options=None): virsh.undefine(self.name, options=options, uri=self.connect_uri, ignore_status=False) except process.CmdError as detail: - logging.error("Undefined VM %s failed:\n%s", self.name, detail) + LOG.error("Undefined VM %s failed:\n%s", self.name, detail) return False return True @@ -315,13 +315,13 @@ def define(self, xml_file): Define the VM. """ if not os.path.exists(xml_file): - logging.error("File %s not found." % xml_file) + LOG.error("File %s not found." % xml_file) return False try: virsh.define(xml_file, uri=self.connect_uri, ignore_status=False) except process.CmdError as detail: - logging.error("Defined VM from %s failed:\n%s", xml_file, detail) + LOG.error("Defined VM from %s failed:\n%s", xml_file, detail) return False return True @@ -366,7 +366,7 @@ def backup_xml(self, active=False): except Exception as detail: if os.path.exists(xml_file): os.remove(xml_file) - logging.error("Failed to backup xml file:\n%s", detail) + LOG.error("Failed to backup xml file:\n%s", detail) return "" def clone(self, name=None, params=None, root_dir=None, address_cache=None, @@ -470,7 +470,7 @@ def add_hvm_or_pv(help_text, hvm_or_pv): elif hvm_or_pv == "pv": return " --paravirt" else: - logging.warning("Unknown virt type hvm_or_pv, using default.") + LOG.warning("Unknown virt type hvm_or_pv, using default.") return "" def add_mem(help_text, mem, maxmem=None, hugepage=False, @@ -479,25 +479,25 @@ def add_mem(help_text, mem, maxmem=None, hugepage=False, cmd = " --memory=%s" % mem if maxmem: if not has_sub_option('memory', 'maxmemory'): - logging.warning("maxmemory option not supported by " - "virt-install") + LOG.warning("maxmemory option not supported by " + "virt-install") else: cmd += ",maxmemory=%s" % maxmem if hugepage: if not has_sub_option('memory', 'hugepages'): - logging.warning("hugepages option not supported by " - "virt-install") + LOG.warning("hugepages option not supported by " + "virt-install") else: cmd += ",hugepages=yes" if hotplugmaxmem: if not has_sub_option('memory', 'hotplugmemorymax'): - logging.warning("hotplugmemorymax option not supported" - "by virt-install") + LOG.warning("hotplugmemorymax option not supported" + "by virt-install") else: cmd += ",hotplugmemorymax=%s" % hotplugmaxmem if not has_sub_option('memory', 'hotplugmemoryslots'): - logging.warning("hotplugmemoryslots option not " - "supported by virt-install") + LOG.warning("hotplugmemoryslots option not " + "supported by virt-install") else: cmd += ",hotplugmemoryslots=%d" % hotplugmemslots return cmd @@ -529,7 +529,7 @@ def add_numa(): :return: appended numa parameter to virt-install cmd """ if not has_sub_option('cpu', 'cell'): - logging.warning("virt-install version does not support numa cmd line") + LOG.warning("virt-install version does not support numa cmd line") return "" cmd = " --cpu" cell = "cell%s.cpus=%s,cell%s.id=%s,cell%s.memory=%s" @@ -613,8 +613,8 @@ def pin_numa(help_text, host_numa_node_list): host_numa_list = host_numa.split(',') for each_numa in host_numa_list: if each_numa not in host_numa_node_list: - logging.error("host numa node - %s is not online or " - "doesn't have memory", each_numa) + LOG.error("host numa node - %s is not online or " + "doesn't have memory", each_numa) host_numa_list.remove(each_numa) if host_numa_list: host_numa = ','.join(map(str, host_numa_list)) @@ -895,8 +895,8 @@ def add_nic(help_text, nic_params): result += ',driver_name=%s' % nic_driver elif mac: # possible to specify --mac w/o --network result += " --mac=%s" % mac - logging.debug("vm.make_create_command.add_nic returning: %s", - result) + LOG.debug("vm.make_create_command.add_nic returning: %s", + result) return result def add_memballoon(help_text, memballoon_model): @@ -910,9 +910,9 @@ def add_memballoon(help_text, memballoon_model): if has_option(help_text, "memballoon"): result = " --memballoon model=%s" % memballoon_model else: - logging.warning("memballoon is not supported") + LOG.warning("memballoon is not supported") result = "" - logging.debug("vm.add_memballoon returning: %s", result) + LOG.debug("vm.add_memballoon returning: %s", result) return result def add_kernel(help_text, cmdline, kernel_path=None, initrd_path=None, @@ -939,7 +939,7 @@ def add_kernel(help_text, cmdline, kernel_path=None, initrd_path=None, result += "kernel_args=\"%s\"," % kernel_args else: result = "" - logging.warning("boot option is not supported") + LOG.warning("boot option is not supported") return result.rstrip(',') def add_cputune(vcpu_cputune=""): @@ -971,10 +971,10 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): """ result = "" if not has_option(help_text, "tpm"): - logging.warning("tpm option is not supported in virt-install") + LOG.warning("tpm option is not supported in virt-install") return result if not (device_path and os.path.exists(device_path)): - logging.warning("Given TPM device is not valid or not present") + LOG.warning("Given TPM device is not valid or not present") return result result = " --tpm path=%s" % device_path if has_sub_option("tpm", "model") and model: @@ -1017,8 +1017,8 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): arch_name = params.get("vm_arch_name", platform.machine()) support_machine_type = libvirt.get_machine_types(arch_name, hvm_or_pv, ignore_status=False) - logging.debug("Machine types supported for %s/%s: %s", - hvm_or_pv, arch_name, support_machine_type) + LOG.debug("Machine types supported for %s/%s: %s", + hvm_or_pv, arch_name, support_machine_type) # Start constructing the qemu command virt_install_cmd = "" @@ -1093,12 +1093,12 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): # Numa might be online but if it doesn't have free memory, # skip it if free_mem == 0: - logging.debug("Host numa node: %s doesn't have memory", - each_numa) + LOG.debug("Host numa node: %s doesn't have memory", + each_numa) host_numa_node_list.remove(each_numa) if not host_numa_node_list: - logging.error("Host Numa nodes are not online or doesn't " - "have memory to pin") + LOG.error("Host Numa nodes are not online or doesn't " + "have memory to pin") else: virt_install_cmd += pin_numa(help_text, host_numa_node_list) @@ -1118,8 +1118,8 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): guest_numa = ','.join(map(str, list(range(guest_numa)))) virt_install_cmd += pin_hugepage(help_text, hp_size, guest_numa) else: - logging.error("Can't pin hugepage without hugepage enabled" - "and Numa enabled") + LOG.error("Can't pin hugepage without hugepage enabled" + "and Numa enabled") cpu_mode = params.get("virt_cpu_mode", '') if cpu_mode: @@ -1162,8 +1162,7 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): if os.path.islink(pxeboot_link): os.unlink(pxeboot_link) if os.path.isdir(pxeboot_link): - logging.info("Removed old %s leftover directory", - pxeboot_link) + LOG.info("Removed old %s leftover directory", pxeboot_link) shutil.rmtree(pxeboot_link) os.symlink(kernel_dir, pxeboot_link) @@ -1292,14 +1291,14 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): if params.get("use_libvirt_cdrom_switch") == 'yes': # we don't want to skip the winutils iso if not cdrom == 'winutils': - logging.debug( + LOG.debug( "Using --cdrom instead of --disk for install") - logging.debug("Skipping CDROM:%s:%s", cdrom, iso) + LOG.debug("Skipping CDROM:%s:%s", cdrom, iso) continue if params.get("medium") == 'cdrom': if iso == params.get("cdrom_cd1"): - logging.debug("Using cdrom or url for install") - logging.debug("Skipping CDROM: %s", iso) + LOG.debug("Using cdrom or url for install") + LOG.debug("Skipping CDROM: %s", iso) continue if iso: @@ -1340,8 +1339,8 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): for nic in vm.virtnet: # make_create_command can be called w/o vm.create() nic = vm.add_nic(**dict(nic)) - logging.debug("make_create_command() setting up command for" - " nic: %s" % str(nic)) + LOG.debug("make_create_command() setting up command for" + " nic: %s" % str(nic)) virt_install_cmd += add_nic(help_text, nic) if params.get("use_no_reboot") == "yes": @@ -1356,14 +1355,14 @@ def add_tpmdevice(help_text, device_path, model=None, type=None): emulator_path = params.get("emulator_path", None) if emulator_path: if not has_sub_option('boot', 'emulator'): - logging.warning("emulator option not supported by virt-install") + LOG.warning("emulator option not supported by virt-install") else: virt_install_cmd += " --boot emulator=%s" % emulator_path bios_path = params.get("bios_path", None) if bios_path: if not has_sub_option('boot', 'loader'): - logging.warning("bios option not supported by virt-install") + LOG.warning("bios option not supported by virt-install") else: if "--boot" in virt_install_cmd: virt_install_cmd += "," @@ -1460,13 +1459,13 @@ def _create_serial_console(self): output_filename = self.get_serial_console_filename(self.serial_ports[0]) output_params = (output_filename,) prompt = self.params.get("shell_prompt", "[\#\$]") - logging.debug("Command used to create serial console: %s", cmd) + LOG.debug("Command used to create serial console: %s", cmd) self.serial_console = aexpect.ShellSession(command=cmd, auto_close=False, output_func=output_func, output_params=output_params, prompt=prompt) if not self.serial_console.is_alive(): - logging.error("Failed to create serial_console") + LOG.error("Failed to create serial_console") # Cause serial_console.close() to close open log file self.serial_console.set_log_file(output_filename) self.serial_console_log = os.path.join(utils_misc.get_log_file_dir(), @@ -1482,7 +1481,7 @@ def set_root_serial_console(self, device, remove=False): try: session = self.login() except (remote.LoginError, virt_vm.VMError) as e: - logging.debug(e) + LOG.debug(e) else: try: securetty_output = session.cmd_output("cat /etc/securetty") @@ -1494,11 +1493,11 @@ def set_root_serial_console(self, device, remove=False): if remove: session.sendline("sed -i -e /%s/d /etc/securetty" % device) - logging.debug("Set root login for %s successfully.", device) + LOG.debug("Set root login for %s successfully.", device) return True finally: session.close() - logging.debug("Set root login for %s failed.", device) + LOG.debug("Set root login for %s failed.", device) return False def set_kernel_console(self, device, speed=None, remove=False, @@ -1521,7 +1520,7 @@ def set_kernel_console(self, device, speed=None, remove=False, else: utils_test.update_boot_option(self, args_added=kernel_params, guest_arch_name=guest_arch_name) - logging.debug("Set kernel params for %s is successful", device) + LOG.debug("Set kernel params for %s is successful", device) return True def set_kernel_param(self, parameter, value=None, remove=False): @@ -1534,7 +1533,7 @@ def set_kernel_param(self, parameter, value=None, remove=False): :return: True if succeed of False if failed. """ if self.is_dead(): - logging.error("Can't set kernel param on a dead VM.") + LOG.error("Can't set kernel param on a dead VM.") return False session = self.wait_for_login() @@ -1546,8 +1545,8 @@ def set_kernel_param(self, parameter, value=None, remove=False): kernel_lines = [l.strip() for l in grub_text.splitlines() if re.match(r"\s*(linux|kernel).*", l)] if not kernel_lines: - logging.error("Can't find any kernel lines in grub " - "file %s:\n%s" % (grub_path, grub_text)) + LOG.error("Can't find any kernel lines in grub " + "file %s:\n%s" % (grub_path, grub_text)) return False for line in kernel_lines: @@ -1577,21 +1576,21 @@ def set_kernel_param(self, parameter, value=None, remove=False): new_line = " ".join((line, new_string)) line_patt = "\s*".join(line.split()) - logging.debug("Substituting grub line '%s' to '%s'." % - (line, new_line)) + LOG.debug("Substituting grub line '%s' to '%s'." % + (line, new_line)) stat_sed, output = session.cmd_status_output( "sed -i --follow-symlinks -e \"s@%s@%s@g\" %s" % (line_patt, new_line, grub_path)) if stat_sed: - logging.error("Failed to substitute grub file:\n%s" % - output) + LOG.error("Failed to substitute grub file:\n%s" % + output) return False if remove: - logging.debug("Remove kernel params %s successfully.", - parameter) + LOG.debug("Remove kernel params %s successfully.", + parameter) else: - logging.debug("Set kernel params %s to %s successfully.", - parameter, value) + LOG.debug("Set kernel params %s to %s successfully.", + parameter, value) return True finally: session.close() @@ -1605,7 +1604,7 @@ def set_boot_kernel(self, index, debug_kernel=False): :return: default kernel """ if self.is_dead(): - logging.error("Can't set kernel param on a dead VM.") + LOG.error("Can't set kernel param on a dead VM.") return False session = self.wait_for_login() @@ -1622,19 +1621,19 @@ def set_boot_kernel(self, index, debug_kernel=False): output = session.cmd("cat %s |grep initramfs" % grub_path) kernel_list = re.findall("-.*", output) if index >= len(kernel_list): - logging.error("Index out of kernel list") + LOG.error("Index out of kernel list") return - logging.debug("kernel list of vm:") - logging.debug(kernel_list) + LOG.debug("kernel list of vm:") + LOG.debug(kernel_list) if debug_kernel: index = -1 - logging.info("Setting debug kernel as default") + LOG.info("Setting debug kernel as default") for i in range(len(kernel_list)): if "debug" in kernel_list[i] and 'rescue' not in kernel_list[i].lower(): index = i break if index == -1: - logging.error("No debug kernel in grub file!") + LOG.error("No debug kernel in grub file!") return if grub == 1: cmd_set_grub = "sed -i 's/default=./default=%d/' " % index @@ -1655,7 +1654,7 @@ def has_swap(self): :return : True if swap is on or False otherwise. """ if self.is_dead(): - logging.error("Can't check swap on a dead VM.") + LOG.error("Can't check swap on a dead VM.") return False session = self.wait_for_login() @@ -1678,7 +1677,7 @@ def create_swap_partition(self, swap_path=None): :param swap_path: Swap image path. """ if self.is_dead(): - logging.error("Can't create swap on a dead VM.") + LOG.error("Can't create swap on a dead VM.") return False if not swap_path: @@ -1698,7 +1697,7 @@ def create_swap_partition(self, swap_path=None): return True finally: session.close() - logging.error("Failed to create a swap partition.") + LOG.error("Failed to create a swap partition.") return False def create_swap_file(self, swapfile='/swapfile'): @@ -1711,7 +1710,7 @@ def create_swap_file(self, swapfile='/swapfile'): :param swapfile: Swap file path in VM to be created. """ if self.is_dead(): - logging.error("Can't create swap on a dead VM.") + LOG.error("Can't create swap on a dead VM.") return False session = self.wait_for_login() @@ -1725,8 +1724,8 @@ def create_swap_file(self, swapfile='/swapfile'): "mkswap {1}".format(swap_size, swapfile)) stat_create, output = session.cmd_status_output(cmd) if stat_create: - logging.error("Fail to create swap file in guest." - "\n%s" % output) + LOG.error("Fail to create swap file in guest." + "\n%s" % output) return False self.created_swap_file = swapfile @@ -1775,17 +1774,17 @@ def create_swap_file(self, swapfile='/swapfile'): cmd = "swapon %s" % swapfile stat_swapon, output = session.cmd_status_output(cmd) if stat_create: - logging.error("Fail to activate swap file in guest." - "\n%s" % output) + LOG.error("Fail to activate swap file in guest." + "\n%s" % output) return False finally: session.close() if self.has_swap(): - logging.debug("Successfully created swapfile %s." % swapfile) + LOG.debug("Successfully created swapfile %s." % swapfile) return True else: - logging.error("Failed to create swap file.") + LOG.error("Failed to create swap file.") return False def cleanup_swap(self): @@ -1794,7 +1793,7 @@ def cleanup_swap(self): create_swap_file(). """ if self.is_dead(): - logging.error("Can't cleanup swap on a dead VM.") + LOG.error("Can't cleanup swap on a dead VM.") return False # Remove kernel parameters. @@ -1832,7 +1831,7 @@ def set_console_getty(self, device, getty="mgetty", remove=False): try: session = self.login() except (remote.LoginError, virt_vm.VMError) as e: - logging.debug(e) + LOG.debug(e) else: try: # Only configurate RHEL5 and below @@ -1841,7 +1840,7 @@ def set_console_getty(self, device, getty="mgetty", remove=False): regex += "|inittab is no longer used when using systemd" output = session.cmd_output("cat /etc/inittab") if re.search(regex, output): - logging.debug("Skip setting inittab for %s", device) + LOG.debug("Skip setting inittab for %s", device) return True getty_str = "co:2345:respawn:/sbin/%s %s" % (getty, device) matched_str = "respawn:/sbin/*getty %s" % device @@ -1852,11 +1851,11 @@ def set_console_getty(self, device, getty="mgetty", remove=False): if remove: session.sendline("sed -i -e /%s/d " "/etc/inittab" % matched_str) - logging.debug("Set inittab for %s successfully.", device) + LOG.debug("Set inittab for %s successfully.", device) return True finally: session.close() - logging.debug("Set inittab for %s failed.", device) + LOG.debug("Set inittab for %s failed.", device) return False def cleanup_serial_console(self): @@ -1968,29 +1967,29 @@ def create(self, name=None, params=None, root_dir=None, timeout=5.0, raise virt_vm.VMImageMissingError(iso) compare = False if cdrom_params.get("skip_hash", "no") == "yes": - logging.debug("Skipping hash comparison") + LOG.debug("Skipping hash comparison") elif cdrom_params.get("md5sum_1m"): - logging.debug("Comparing expected MD5 sum with MD5 sum of " - "first MB of ISO file...") + LOG.debug("Comparing expected MD5 sum with MD5 sum of " + "first MB of ISO file...") actual_hash = crypto.hash_file( iso, 1048576, algorithm="md5") expected_hash = cdrom_params.get("md5sum_1m") compare = True elif cdrom_params.get("md5sum"): - logging.debug("Comparing expected MD5 sum with MD5 sum of " - "ISO file...") + LOG.debug("Comparing expected MD5 sum with MD5 sum of " + "ISO file...") actual_hash = crypto.hash_file(iso, algorithm="md5") expected_hash = cdrom_params.get("md5sum") compare = True elif cdrom_params.get("sha1sum"): - logging.debug("Comparing expected SHA1 sum with SHA1 sum " - "of ISO file...") + LOG.debug("Comparing expected SHA1 sum with SHA1 sum " + "of ISO file...") actual_hash = crypto.hash_file(iso, algorithm="sha1") expected_hash = cdrom_params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: - logging.debug("Hashes match") + LOG.debug("Hashes match") else: raise virt_vm.VMHashMismatchError(actual_hash, expected_hash) @@ -2043,21 +2042,21 @@ def create(self, name=None, params=None, root_dir=None, timeout=5.0, if mac_source is not None: # Will raise exception if source doesn't # have corresponding nic - logging.debug("Copying mac for nic %s from VM %s", - nic.nic_name, mac_source.name) + LOG.debug("Copying mac for nic %s from VM %s", + nic.nic_name, mac_source.name) nic_params['mac'] = mac_source.get_mac_address( nic.nic_name) # make_create_command() calls vm.add_nic (i.e. on a copy) nic = self.add_nic(**nic_params) - logging.debug('VM.create activating nic %s' % nic) + LOG.debug('VM.create activating nic %s' % nic) self.activate_nic(nic.nic_name) # Make qemu command install_command = self.make_create_command() - logging.info("Running libvirt command (reformatted):") + LOG.info("Running libvirt command (reformatted):") for item in install_command.replace(" -", " \n -").splitlines(): - logging.info("%s", item) + LOG.info("%s", item) try: process.run(install_command, verbose=True, shell=True) except process.CmdError as details: @@ -2112,8 +2111,8 @@ def migrate(self, dest_uri="", option="--live --timeout 60", extra="", :param dargs: Standardized virsh function API keywords :return: True if command succeeded """ - logging.info("Migrating VM %s from %s to %s" % - (self.name, self.connect_uri, dest_uri)) + LOG.info("Migrating VM %s from %s to %s" % + (self.name, self.connect_uri, dest_uri)) result = virsh.migrate(self.name, dest_uri, option, extra, uri=self.connect_uri, **dargs) @@ -2160,9 +2159,9 @@ def attach_disk(self, source, target=None, prefix="vd", extra="", ignore_status=ignore_status, debug=debug) if result.exit_status: - logging.error("Failed to attach disk %s to VM." - "Detail: %s." - % (source, result.stderr_text)) + LOG.error("Failed to attach disk %s to VM." + "Detail: %s." + % (source, result.stderr_text)) return None return target @@ -2215,26 +2214,26 @@ def destroy(self, gracefully=True, free_mac_addresses=True): try: # Is it already dead? if self.is_alive(): - logging.debug("Destroying VM") + LOG.debug("Destroying VM") if self.is_paused(): self.resume() if (not self.is_lxc() and gracefully and self.params.get("shutdown_command")): # Try to destroy with shell command - logging.debug("Trying to shutdown VM with shell command") + LOG.debug("Trying to shutdown VM with shell command") try: session = self.login() except (remote.LoginError, virt_vm.VMError) as e: - logging.debug(e) + LOG.debug(e) else: try: # Send the shutdown command session.sendline( self.params.get("shutdown_command")) - logging.debug("Shutdown command sent; waiting for VM " - "to go down...") + LOG.debug("Shutdown command sent; waiting for VM " + "to go down...") if utils_misc.wait_for(self.is_dead, 60, 1, 1): - logging.debug("VM is down") + LOG.debug("VM is down") return finally: session.close() @@ -2249,10 +2248,10 @@ def destroy(self, gracefully=True, free_mac_addresses=True): self.cleanup_serial_console() if free_mac_addresses: if self.is_persistent(): - logging.warning("Requested MAC address release from " - "persistent vm %s. Ignoring." % self.name) + LOG.warning("Requested MAC address release from " + "persistent vm %s. Ignoring." % self.name) else: - logging.debug("Releasing MAC addresses for vm %s." % self.name) + LOG.debug("Releasing MAC addresses for vm %s." % self.name) for nic_name in self.virtnet.nic_name_list(): self.virtnet.free_mac_address(nic_name) @@ -2261,7 +2260,7 @@ def remove(self): if not self.undefine(): raise virt_vm.VMRemoveError("VM '%s' undefine error" % self.name) self.destroy(gracefully=False, free_mac_addresses=True) - logging.debug("VM '%s' was removed", self.name) + LOG.debug("VM '%s' was removed", self.name) def remove_with_storage(self): """ @@ -2354,12 +2353,12 @@ def get_pid(self): pid_file_contents = open(pid_file).read() pid = int(pid_file_contents) except IOError: - logging.error("Could not read %s to get PID", pid_file) + LOG.error("Could not read %s to get PID", pid_file) except TypeError: - logging.error("PID file %s has invalid contents: '%s'", - pid_file, pid_file_contents) + LOG.error("PID file %s has invalid contents: '%s'", + pid_file, pid_file_contents) else: - logging.debug("PID file %s not present", pid_file) + LOG.debug("PID file %s not present", pid_file) return pid @@ -2391,7 +2390,7 @@ def get_shared_meminfo(self): :return: Shared memory used by VM (MB) """ if self.is_dead(): - logging.error("Could not get shared memory info from dead VM.") + LOG.error("Could not get shared memory info from dead VM.") return None filename = "/proc/%d/statm" % self.get_pid() @@ -2408,7 +2407,7 @@ def get_cpu_topology_in_cmdline(self): cpu_topology = {} vm_pid = self.get_pid() if vm_pid is None: - logging.error("Fail to get VM pid") + LOG.error("Fail to get VM pid") else: cmdline = open("/proc/%d/cmdline" % vm_pid).read() values = re.findall("sockets=(\d+),cores=(\d+),threads=(\d+)", @@ -2449,7 +2448,7 @@ def reboot(self, session=None, method="shell", nic_index=0, timeout=240, :param serial: Just use to unify api in virt_vm module. :return: A new shell session object. """ - error_context.base_context("rebooting '%s'" % self.name, logging.info) + error_context.base_context("rebooting '%s'" % self.name, LOG.info) error_context.context("before reboot") session = session or self.login(timeout=timeout) error_context.context() @@ -2459,21 +2458,21 @@ def reboot(self, session=None, method="shell", nic_index=0, timeout=240, else: raise virt_vm.VMRebootError("Unknown reboot method: %s" % method) - error_context.context("waiting for guest to go down", logging.info) + error_context.context("waiting for guest to go down", LOG.info) if not utils_misc.wait_for(lambda: not session.is_responsive(timeout=30), 120, 0, 1): raise virt_vm.VMRebootError("Guest refuses to go down") session.close() - error_context.context("logging in after reboot", logging.info) + error_context.context("logging in after reboot", LOG.info) if serial: return self.wait_for_serial_login(timeout=timeout) return self.wait_for_login(nic_index, timeout=timeout) def screendump(self, filename, debug=False): if debug: - logging.debug("Requesting screenshot %s" % filename) + LOG.debug("Requesting screenshot %s" % filename) return virsh.screenshot(self.name, filename, uri=self.connect_uri) def start(self, autoconsole=True): @@ -2483,7 +2482,7 @@ def start(self, autoconsole=True): uid_result = virsh.domuuid(self.name, uri=self.connect_uri) self.uuid = uid_result.stdout_text.strip() - logging.debug("Starting vm '%s'", self.name) + LOG.debug("Starting vm '%s'", self.name) result = virsh.start(self.name, uri=self.connect_uri) if not result.exit_status: # Wait for the domain to be created @@ -2499,7 +2498,7 @@ def start(self, autoconsole=True): if autoconsole: self.create_serial_console() else: - logging.error("VM fails to start with:%s", result) + LOG.error("VM fails to start with:%s", result) raise virt_vm.VMStartError(self.name, result.stderr_text.strip()) @@ -2508,17 +2507,16 @@ def start(self, autoconsole=True): try: mac = self.get_virsh_mac_address(index) if 'mac' not in nic: - logging.debug("Updating nic %d with mac %s on vm %s" - % (index, mac, self.name)) + LOG.debug("Updating nic %d with mac %s on vm %s" + % (index, mac, self.name)) nic.mac = mac elif nic.mac != mac: - logging.warning("Requested mac %s doesn't match mac %s " - "as defined for vm %s", nic.mac, mac, - self.name) + LOG.warning("Requested mac %s doesn't match mac %s " + "as defined for vm %s", nic.mac, mac, self.name) # TODO: Checkout/Set nic_model, nettype, netdst also except virt_vm.VMMACAddressMissingError: - logging.warning("Nic %d requested by test but not defined for" - " vm %s" % (index, self.name)) + LOG.warning("Nic %d requested by test but not defined for" + " vm %s" % (index, self.name)) def wait_for_shutdown(self, count=60): """ @@ -2535,11 +2533,11 @@ def wait_for_shutdown(self, count=60): # check every 5 seconds if count % 5 == 0: if virsh.is_dead(self.name, uri=self.connect_uri): - logging.debug("Shutdown took %d seconds", timeout - count) + LOG.debug("Shutdown took %d seconds", timeout - count) return True count -= 1 time.sleep(1) - logging.debug("Waiting for guest to shutdown %d", count) + LOG.debug("Waiting for guest to shutdown %d", count) return False def shutdown(self): @@ -2550,14 +2548,14 @@ def shutdown(self): if self.state() != 'shut off': virsh.shutdown(self.name, uri=self.connect_uri) if self.wait_for_shutdown(): - logging.debug("VM %s shut down", self.name) + LOG.debug("VM %s shut down", self.name) self.cleanup_serial_console() return True else: - logging.error("VM %s failed to shut down", self.name) + LOG.error("VM %s failed to shut down", self.name) return False except process.CmdError: - logging.error("VM %s failed to shut down", self.name) + LOG.error("VM %s failed to shut down", self.name) return False def pause(self): @@ -2568,19 +2566,19 @@ def pause(self): self.name, uri=self.connect_uri, ignore_status=False) return True except Exception: - logging.error("VM %s failed to suspend", self.name) + LOG.error("VM %s failed to suspend", self.name) return False def resume(self): try: virsh.resume(self.name, ignore_status=False, uri=self.connect_uri) if self.is_alive(): - logging.debug("Resumed VM %s", self.name) + LOG.debug("Resumed VM %s", self.name) return True else: return False except process.CmdError as detail: - logging.error("Resume VM %s failed:\n%s", self.name, detail) + LOG.error("Resume VM %s failed:\n%s", self.name, detail) return False def save_to_file(self, path): @@ -2590,7 +2588,7 @@ def save_to_file(self, path): if self.is_dead(): raise virt_vm.VMStatusError( "Cannot save a VM that is %s" % self.state()) - logging.debug("Saving VM %s to %s" % (self.name, path)) + LOG.debug("Saving VM %s to %s" % (self.name, path)) result = virsh.save(self.name, path, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("Save VM to %s failed.\n" @@ -2607,7 +2605,7 @@ def restore_from_file(self, path): if self.is_alive(): raise virt_vm.VMStatusError( "Can not restore VM that is %s" % self.state()) - logging.debug("Restoring VM from %s" % path) + LOG.debug("Restoring VM from %s" % path) result = virsh.restore(path, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("Restore VM from %s failed.\n" @@ -2625,7 +2623,7 @@ def managedsave(self): if self.is_dead(): raise virt_vm.VMStatusError( "Cannot save a VM that is %s" % self.state()) - logging.debug("Managed saving VM %s" % self.name) + LOG.debug("Managed saving VM %s" % self.name) result = virsh.managedsave(self.name, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("Managed save VM failed.\n" @@ -2642,7 +2640,7 @@ def pmsuspend(self, target='mem', duration=0): if self.is_dead(): raise virt_vm.VMStatusError( "Cannot pmsuspend a VM that is %s" % self.state()) - logging.debug("PM suspending VM %s" % self.name) + LOG.debug("PM suspending VM %s" % self.name) result = virsh.dompmsuspend(self.name, target=target, duration=duration, uri=self.connect_uri) if result.exit_status: @@ -2658,7 +2656,7 @@ def pmwakeup(self): if self.is_dead(): raise virt_vm.VMStatusError( "Cannot pmwakeup a VM that is %s" % self.state()) - logging.debug("PM waking up VM %s" % self.name) + LOG.debug("PM waking up VM %s" % self.name) result = virsh.dompmwakeup(self.name, uri=self.connect_uri) if result.exit_status: raise virt_vm.VMError("PM waking up VM failed.\n" @@ -2748,7 +2746,7 @@ def get_blk_devices(self): uri=self.connect_uri) blklist = result.stdout_text.strip().splitlines() if result.exit_status != 0: - logging.info("Get vm devices failed.") + LOG.info("Get vm devices failed.") else: blklist = blklist[2:] for line in blklist: @@ -2782,7 +2780,7 @@ def get_first_disk_devices(self): uri=self.connect_uri) blklist = result.stdout_text.strip().splitlines() if result.exit_status != 0: - logging.info("Get vm devices failed.") + LOG.info("Get vm devices failed.") else: blklist = blklist[2:] linesplit = blklist[0].split(None, 4) @@ -2798,7 +2796,7 @@ def get_device_details(self, device_target): uri=self.connect_uri) details = result.stdout_text.strip().splitlines() if result.exit_status != 0: - logging.info("Get vm device details failed.") + LOG.info("Get vm device details failed.") else: for line in details: attrs = line.split(":") @@ -2827,7 +2825,7 @@ def domjobabort(self): """ result = virsh.domjobabort(self.name, ignore_status=True) if result.exit_status: - logging.debug(result) + LOG.debug(result) return False return True @@ -2852,7 +2850,7 @@ def get_job_type(self): if key.count("type"): return value.strip() else: - logging.error(jobresult) + LOG.error(jobresult) return False def get_pci_devices(self, device_str=None): @@ -2919,7 +2917,7 @@ def get_interface_mac(self, interface): mac = session.cmd_output(cmd) except Exception as detail: session.close() - logging.error(str(detail)) + LOG.error(str(detail)) return None session.close() return mac.strip() @@ -2938,8 +2936,8 @@ def install_package(self, name, ignore_status=False, timeout=300): name) except Exception as exception_detail: if ignore_status: - logging.error("When install: %s\nError happened: %s\n", - name, exception_detail) + LOG.error("When install: %s\nError happened: %s\n", + name, exception_detail) else: raise exception_detail finally: @@ -2957,7 +2955,7 @@ def remove_package(self, name, ignore_status=False): if not ignore_status: session.close() raise virt_vm.VMError("Removal of package %s failed" % name) - logging.error("Removal of package %s failed", name) + LOG.error("Removal of package %s failed", name) session.close() def prepare_guest_agent(self, prepare_xml=True, channel=True, start=True, @@ -3092,7 +3090,7 @@ def setenforce(self, mode): selinux_force = self.params.get("selinux_force", "no") == "yes" vm_distro = self.get_distro() if vm_distro.lower() == 'ubuntu' and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return self.install_package('selinux-policy') self.install_package('selinux-policy-targeted') @@ -3109,7 +3107,7 @@ def setenforce(self, mode): try: current_mode = self.getenforce() if current_mode == 'Disabled': - logging.warning("VM SELinux disabled. Can't set mode.") + LOG.warning("VM SELinux disabled. Can't set mode.") return elif current_mode != target_mode: cmd = "setenforce %s" % mode @@ -3118,6 +3116,6 @@ def setenforce(self, mode): raise virt_vm.VMError( "Set SELinux mode failed:\n%s" % output) else: - logging.debug("VM SELinux mode don't need change.") + LOG.debug("VM SELinux mode don't need change.") finally: session.close() diff --git a/virttest/libvirt_xml/domcapability_xml.py b/virttest/libvirt_xml/domcapability_xml.py index 88cd0adce1..9067b66ba6 100644 --- a/virttest/libvirt_xml/domcapability_xml.py +++ b/virttest/libvirt_xml/domcapability_xml.py @@ -8,6 +8,8 @@ from virttest import xml_utils from virttest.libvirt_xml import base, accessors, xcepts +LOG = logging.getLogger('avocado.' + __name__) + class DomCapabilityXML(base.LibvirtXMLBase): @@ -84,7 +86,7 @@ def get_additional_feature_list(self, cpu_mode_name, continue feature_list.append(item) except AttributeError as elem_attr: - logging.warn("Failed to find attribute %s" % elem_attr) + LOG.warn("Failed to find attribute %s" % elem_attr) feature_list = [] finally: return feature_list @@ -135,7 +137,7 @@ def get_hostmodel_name(self): if mode_node.get('name') == 'host-model': return mode_node.find('model').text except AttributeError as elem_attr: - logging.warn("Failed to find attribute %s" % elem_attr) + LOG.warn("Failed to find attribute %s" % elem_attr) return '' diff --git a/virttest/libvirt_xml/network_xml.py b/virttest/libvirt_xml/network_xml.py index 57c194b87f..340ce07b35 100644 --- a/virttest/libvirt_xml/network_xml.py +++ b/virttest/libvirt_xml/network_xml.py @@ -10,6 +10,8 @@ from virttest.libvirt_xml import base, xcepts, accessors from virttest.libvirt_xml.devices import librarian +LOG = logging.getLogger('avocado.' + __name__) + class RangeList(list): @@ -653,7 +655,7 @@ def del_element(self, element='', index=0): del_elem = xmltreefile.findall(element)[index] except IndexError as detail: del_elem = None - logging.warning(detail) + LOG.warning(detail) if del_elem is not None: xmltreefile.remove(del_elem) xmltreefile.write() @@ -811,7 +813,7 @@ def debug_xml(self): """ xml = str(self) # LibvirtXMLBase.__str__ returns XML content for debug_line in str(xml).splitlines(): - logging.debug("Network XML: %s", debug_line) + LOG.debug("Network XML: %s", debug_line) def state_dict(self): """ @@ -839,12 +841,12 @@ def orbital_nuclear_strike(self): self['active'] = False # deactivate (stop) network if active except xcepts.LibvirtXMLError as detail: # inconsequential, network will be removed - logging.warning(detail) + LOG.warning(detail) try: self['defined'] = False # undefine (delete) network if persistent except xcepts.LibvirtXMLError as detail: # network already gone - logging.warning(detail) + LOG.warning(detail) def exists(self): """ diff --git a/virttest/libvirt_xml/pool_xml.py b/virttest/libvirt_xml/pool_xml.py index 3846aca083..81d665f1e6 100644 --- a/virttest/libvirt_xml/pool_xml.py +++ b/virttest/libvirt_xml/pool_xml.py @@ -13,6 +13,8 @@ from ..libvirt_xml import base, xcepts, accessors from virttest import element_tree as ET +LOG = logging.getLogger('avocado.' + __name__) + class SourceXML(base.LibvirtXMLBase): @@ -391,7 +393,7 @@ def pool_undefine(self): try: self.virsh.pool_undefine(self.name, ignore_status=False) except process.CmdError: - logging.error("Undefine pool '%s' failed.", self.name) + LOG.error("Undefine pool '%s' failed.", self.name) return False def pool_define(self): @@ -400,9 +402,9 @@ def pool_define(self): """ result = self.virsh.pool_define(self.xml) if result.exit_status: - logging.error("Define %s failed.\n" - "Detail: %s.", self.name, - result.stderr_text) + LOG.error("Define %s failed.\n" + "Detail: %s.", self.name, + result.stderr_text) return False return True @@ -417,7 +419,7 @@ def pool_rename(name, new_name, uuid=None, virsh_instance=base.virsh): """ pool_ins = libvirt_storage.StoragePool() if not pool_ins.is_pool_persistent(name): - logging.error("Cannot rename for transient pool") + LOG.error("Cannot rename for transient pool") return False start_pool = False if pool_ins.is_pool_active(name): @@ -441,7 +443,7 @@ def _cleanup(details=""): else: poolxml.uuid = uuid # Re-define XML to libvirt - logging.debug("Rename pool: %s to %s.", name, new_name) + LOG.debug("Rename pool: %s to %s.", name, new_name) # error message for failed define error_msg = "Error reported while defining pool:\n" try: @@ -454,7 +456,7 @@ def _cleanup(details=""): backup.pool_define() raise xcepts.LibvirtXMLError(error_msg + "%s" % detail) if not poolxml.pool_define(): - logging.info("Pool xml: %s" % poolxml.get('xml')) + LOG.info("Pool xml: %s" % poolxml.get('xml')) _cleanup(details="Define pool %s failed" % new_name) if start_pool: pool_ins.start_pool(new_name) @@ -472,7 +474,7 @@ def backup_xml(name, virsh_instance=base.virsh): except Exception as detail: if os.path.exists(xml_file): os.remove(xml_file) - logging.error("Failed to backup xml file:\n%s", detail) + LOG.error("Failed to backup xml file:\n%s", detail) return "" def debug_xml(self): @@ -481,4 +483,4 @@ def debug_xml(self): """ xml = str(self) for debug_line in str(xml).splitlines(): - logging.debug("Pool XML: %s", debug_line) + LOG.debug("Pool XML: %s", debug_line) diff --git a/virttest/libvirt_xml/vm_xml.py b/virttest/libvirt_xml/vm_xml.py index 51792a72b8..10a5f9f107 100755 --- a/virttest/libvirt_xml/vm_xml.py +++ b/virttest/libvirt_xml/vm_xml.py @@ -12,6 +12,8 @@ from ..libvirt_xml import base, accessors, xcepts from ..libvirt_xml.devices import librarian +LOG = logging.getLogger('avocado.' + __name__) + class VMXMLDevices(list): @@ -601,8 +603,8 @@ def del_controller(self, controller_type=None): # no seclabel tag found in xml. del_controllers = self.get_controllers(controller_type=controller_type) if del_controllers == []: - logging.debug("Controller %s for this domain does not " - "exist" % controller_type) + LOG.debug("Controller %s for this domain does not " + "exist" % controller_type) for controller in del_controllers: self.xmltreefile.remove(controller) @@ -697,9 +699,9 @@ def define(self, virsh_instance=base.virsh): """Define VM with virsh from this instance""" result = virsh_instance.define(self.xml) if result.exit_status: - logging.error("Define %s failed.\n" - "Detail: %s.", self.vm_name, - result.stderr_text) + LOG.error("Define %s failed.\n" + "Detail: %s.", self.vm_name, + result.stderr_text) return False return True @@ -709,7 +711,7 @@ def sync(self, options=None, virsh_instance=base.virsh): try: backup = self.new_from_dumpxml(self.vm_name, virsh_instance=virsh_instance) except IOError: - logging.debug("Failed to backup %s.", self.vm_name) + LOG.debug("Failed to backup %s.", self.vm_name) backup = None if not self.undefine(options, virsh_instance=virsh_instance): @@ -720,8 +722,8 @@ def sync(self, options=None, virsh_instance=base.virsh): if result_define.exit_status: if backup: backup.define(virsh_instance=virsh_instance) - logging.error("Failed to define %s from xml:\n%s" - % (self.vm_name, self.xmltreefile)) + LOG.error("Failed to define %s from xml:\n%s" + % (self.vm_name, self.xmltreefile)) raise xcepts.LibvirtXMLError("Failed to define %s for reason:\n%s" % (self.vm_name, result_define.stderr_text)) @@ -764,7 +766,7 @@ def _cleanup(details=""): del vmxml.uuid else: vmxml.uuid = uuid - logging.debug("Rename %s to %s.", vm.name, new_name) + LOG.debug("Rename %s to %s.", vm.name, new_name) if not vmxml.define(): _cleanup(details="Define VM %s failed" % new_name) # Update the name and uuid property for VM object @@ -842,7 +844,7 @@ def set_vm_vcpus(vm_name, vcpus, current=None, sockets=None, cores=None, try: vmcpu_xml = vmxml['cpu'] except xcepts.LibvirtXMLNotFoundError: - logging.debug("Can not find any cpu tag, now create one.") + LOG.debug("Can not find any cpu tag, now create one.") vmcpu_xml = VMCPUXML() if topology_correction and ((int(sockets) * int(cores) * int(threads)) != vcpus): @@ -902,8 +904,8 @@ def set_vm_vcpus(vm_name, vcpus, current=None, sockets=None, cores=None, else: vmcpu_xml.set_numa_cell(vmcpu_xml.dicts_to_cells(nodexml_list)) else: - logging.warning("Guest numa could not be updated, expect " - "failures if guest numa is checked") + LOG.warning("Guest numa could not be updated, expect " + "failures if guest numa is checked") vmxml['cpu'] = vmcpu_xml except xcepts.LibvirtXMLNotFoundError: pass @@ -936,7 +938,7 @@ def get_cpu_topology(self): try: topology = self.cpu.topology except Exception: - logging.debug("/ xml element not found") + LOG.debug("/ xml element not found") return topology def get_disk_all(self): @@ -988,7 +990,7 @@ def get_disk_all_by_expr(self, *args): for expr in args: attr_expr = re.search(EXPR_PARSER, expr) if not attr_expr: - logging.error("invalid expression: %s", expr) + LOG.error("invalid expression: %s", expr) return disks attr_name, operator, attr_val = [ attr_expr.group(i) for i in range(1, 4)] @@ -1090,7 +1092,7 @@ def get_disk_attr(vm_name, target, tag, attr, virsh_instance=base.virsh): if tag in ["driver", "boot", "address", "alias", "source"]: attr_value = disk.find(tag).get(attr) except AttributeError: - logging.error("No %s/%s found.", tag, attr) + LOG.error("No %s/%s found.", tag, attr) return attr_value @@ -1115,7 +1117,7 @@ def check_disk_exist(vm_name, disk_src, virsh_instance=base.virsh): for disk in disk_list: file_list.append(disk.find('source').get('file')) except AttributeError: - logging.debug("No 'file' type disk.") + LOG.debug("No 'file' type disk.") if disk_src in file_list + blk_list: found = True return found @@ -1146,7 +1148,7 @@ def check_disk_type(vm_name, disk_src, disk_type, virsh_instance=base.virsh): if disk_src == disk_dev: found = True except AttributeError as detail: - logging.debug(str(detail)) + LOG.debug(str(detail)) continue return found @@ -1170,7 +1172,7 @@ def get_disk_serial(vm_name, disk_target, virsh_instance=base.virsh): try: serial = disk.find("serial").text except AttributeError: - logging.debug("No serial assigned.") + LOG.debug("No serial assigned.") return serial @@ -1195,7 +1197,7 @@ def get_disk_address(vm_name, disk_target, virsh_instance=base.virsh): disk_bus = disk.find("target").get("bus") address = disk.find("address") add_type = address.get("type") - logging.info("add_type %s", add_type) + LOG.info("add_type %s", add_type) if add_type == "ccw": cssid = address.get("cssid") ssid = address.get("ssid") @@ -1266,7 +1268,7 @@ def set_primary_serial(vm_name, dev_type, port, path=None, try: serial = vmxml.get_primary_serial()['serial'] except AttributeError: - logging.debug("Can not find any serial, now create one.") + LOG.debug("Can not find any serial, now create one.") # Create serial tree, default is pty serial = xml_utils.ElementTree.SubElement( xmltreefile.find('devices'), @@ -1318,7 +1320,7 @@ def set_agent_channel(self, src_path=None, :param ignore_exist: Whether add a channel even if another already exists. """ if not ignore_exist and self.get_agent_channels(): - logging.debug("Guest agent channel already exists") + LOG.debug("Guest agent channel already exists") return if not src_path: @@ -1442,9 +1444,9 @@ def get_iftune_params(vm_name, options="", virsh_instance=base.virsh): iftune_params['inbound'] = bandwidth.find('inbound') iftune_params['outbound'] = bandwidth.find('outbound') except AttributeError: - logging.error("Can't find or element") + LOG.error("Can't find or element") except AttributeError: - logging.error("Can't find element") + LOG.error("Can't find element") return iftune_params @@ -1517,7 +1519,7 @@ def set_cpu_mode(vm_name, mode='host-model', model='', try: cpuxml = vmxml['cpu'] except xcepts.LibvirtXMLNotFoundError: - logging.debug("Can not find any cpu tag, now create one.") + LOG.debug("Can not find any cpu tag, now create one.") cpuxml = VMCPUXML() cpuxml['mode'] = mode if model: @@ -1542,8 +1544,8 @@ def add_device(self, value, allow_dup=False): if not allow_dup: for device in devices: if device == value: - logging.debug("Device %s is already in VM %s.", - value, self.vm_name) + LOG.debug("Device %s is already in VM %s.", + value, self.vm_name) return devices.append(value) self.set_devices(devices) @@ -1566,8 +1568,7 @@ def del_device(self, value, by_tag=False): devices.remove(device) break if not_found: - logging.debug("Device %s does not exist in VM %s.", - value, self.vm_name) + LOG.debug("Device %s does not exist in VM %s.", value, self.vm_name) return self.set_devices(devices) @@ -1603,7 +1604,7 @@ def set_graphics_attr(vm_name, attr, index=0, virsh_instance=base.virsh): vm_name, virsh_instance=virsh_instance) graphic = vmxml.xmltreefile.find('devices').findall('graphics') for key in attr: - logging.debug("Set %s='%s'" % (key, attr[key])) + LOG.debug("Set %s='%s'" % (key, attr[key])) graphic[index].set(key, attr[key]) vmxml.sync(virsh_instance=virsh_instance) @@ -1677,9 +1678,9 @@ def get_blkio_params(vm_name, options="", virsh_instance=base.virsh): try: blkio_params['weight'] = blkio.find('weight').text except AttributeError: - logging.error("Can't find element") + LOG.error("Can't find element") except AttributeError: - logging.error("Can't find element") + LOG.error("Can't find element") if blkio and blkio.find('device'): blkio_params['device_weights_path'] = \ @@ -1833,7 +1834,7 @@ def set_vm_features(vm_name, **attrs): try: for attr_key, value in attrs.items(): setattr(features_xml, attr_key, value) - logging.debug('New features_xml: %s', features_xml) + LOG.debug('New features_xml: %s', features_xml) vmxml.features = features_xml vmxml.sync() except (AttributeError, TypeError, ValueError) as detail: @@ -2187,7 +2188,7 @@ def remove_elem_by_xpath(self, xpath_to_remove, remove_all=True): try: self.xmltreefile.remove_by_xpath(xpath_to_remove, remove_all) except (AttributeError, TypeError): - logging.info("Element '%s' already doesn't exist", xpath_to_remove) + LOG.info("Element '%s' already doesn't exist", xpath_to_remove) self.xmltreefile.write() @staticmethod @@ -3311,7 +3312,7 @@ def add_feature(self, name, attr_name='', attr_value=''): :params name: Feature name """ if self.has_feature(name): - logging.debug("Feature %s already exist, so remove it", name) + LOG.debug("Feature %s already exist, so remove it", name) self.remove_feature(name) root = self.__dict_get__('xml').getroot() new_attr = {} @@ -3328,7 +3329,7 @@ def remove_feature(self, name): root = self.__dict_get__('xml').getroot() remove_feature = root.find(name) if remove_feature is None: - logging.error("Feature %s doesn't exist", name) + LOG.error("Feature %s doesn't exist", name) else: root.remove(remove_feature) diff --git a/virttest/libvirtd_decorator.py b/virttest/libvirtd_decorator.py index fa7f63e44e..9d5ae09486 100644 --- a/virttest/libvirtd_decorator.py +++ b/virttest/libvirtd_decorator.py @@ -23,6 +23,8 @@ except path.CmdNotFoundError: LIBVIRTD = None +LOG = logging.getLogger('avocado.' + __name__) + def get_libvirtd_split_enable_bit(): base_cfg_path = os.path.join(data_dir.get_shared_dir(), 'cfg', 'base.cfg') @@ -32,7 +34,7 @@ def get_libvirtd_split_enable_bit(): if 'enable_split_libvirtd_feature' in line and 'yes' in line and '#' not in line: return True else: - logging.info("CAN NOT find base.cfg file") + LOG.info("CAN NOT find base.cfg file") return False @@ -61,22 +63,22 @@ def get_libvirt_version_compare(major, minor, update, session=None): func = session.cmd_output if LIBVIRTD is None: - logging.warn("Can not find command to dertermin libvirt version") + LOG.warn("Can not find command to dertermin libvirt version") return False libvirt_ver_cmd = "%s -V" % LIBVIRTD - logging.warn(libvirt_ver_cmd) + LOG.warn(libvirt_ver_cmd) try: regex = r'%s\s*.*[Ll]ibvirt.*\s*' % LIBVIRTD regex += r'(\d+)\.(\d+)\.(\d+)' lines = astring.to_text(func(libvirt_ver_cmd)).splitlines() - logging.warn("libvirt version value by libvirtd or virtqemud command: %s" % lines) + LOG.warn("libvirt version value by libvirtd or virtqemud command: %s" % lines) for line in lines: match = re.search(regex, line.strip()) if match: LIBVIRT_LIB_VERSION = int(match.group(1)) * 1000000 + int(match.group(2)) * 1000 + int(match.group(3)) break except (ValueError, TypeError, AttributeError): - logging.warn("Error determining libvirt version") + LOG.warn("Error determining libvirt version") return False compare_version = major * 1000000 + minor * 1000 + update @@ -113,10 +115,10 @@ def new_fn(*args, **kwargs): """ check_libvirt_version() if not IS_LIBVIRTD_SPLIT_VERSION or not LIBVIRTD_SPLIT_ENABLE_BIT: - logging.warn("legacy start libvirtd daemon NORMALLY with function name: %s" % fn.__name__) + LOG.warn("legacy start libvirtd daemon NORMALLY with function name: %s" % fn.__name__) return fn(*args, **kwargs) else: - logging.warn("legacy start libvirtd daemon IGNORED with function name: %s" % fn.__name__) + LOG.warn("legacy start libvirtd daemon IGNORED with function name: %s" % fn.__name__) return None return new_fn @@ -136,9 +138,9 @@ def new_fn(*args, **kwargs): """ check_libvirt_version() if IS_LIBVIRTD_SPLIT_VERSION and LIBVIRTD_SPLIT_ENABLE_BIT: - logging.warn("Split start libvirtd daemon NORMALLY with function name: %s" % fn.__name__) + LOG.warn("Split start libvirtd daemon NORMALLY with function name: %s" % fn.__name__) return fn(*args, **kwargs) else: - logging.warn("Split start libvirtd daemon IGNORED with function name: %s" % fn.__name__) + LOG.warn("Split start libvirtd daemon IGNORED with function name: %s" % fn.__name__) return None return new_fn diff --git a/virttest/logging_manager.py b/virttest/logging_manager.py index dc3f1137f7..240f0b8550 100644 --- a/virttest/logging_manager.py +++ b/virttest/logging_manager.py @@ -3,7 +3,7 @@ # implementation follows -logger = logging.getLogger() +logger = logging.getLogger('avocado.vt') _caller_code_to_skip_in_logging_stack = set() @@ -25,7 +25,7 @@ class LoggingFile(object): """ def __init__(self, prefix='', level=logging.DEBUG, - logger=logging.getLogger()): + logger=logging.getLogger('avocado.vt')): """ :param prefix - The prefix for each line logged by this object. """ diff --git a/virttest/lvm.py b/virttest/lvm.py index c58d9e1ccd..0d2c251352 100644 --- a/virttest/lvm.py +++ b/virttest/lvm.py @@ -35,6 +35,8 @@ from virttest import utils_misc from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + UNIT = "B" COMMON_OPTS = "--noheading --nosuffix --unit=%s" % UNIT @@ -49,7 +51,7 @@ def normalize_data_size(size): def cmd_output(cmd, res="[\w/]+"): result = process.run(cmd, ignore_status=True) if result.exit_status != 0: - logging.warn(result) + LOG.warn(result) return None output = result.stdout_text for line in output.splitlines(): @@ -119,7 +121,7 @@ def create(self, extra_args="-ff --yes"): self.umount() cmd = "pvcreate %s %s" % (extra_args, self.name) process.system(cmd) - logging.info("Create physical volume: %s", self.name) + LOG.info("Create physical volume: %s", self.name) return self.path def remove(self, extra_args=" -ff --yes"): @@ -131,7 +133,7 @@ def remove(self, extra_args=" -ff --yes"): """ cmd = "lvm pvremove %s %s" % (extra_args, self.name) process.system(cmd) - logging.info("logical physical volume (%s) removed", self.name) + LOG.info("logical physical volume (%s) removed", self.name) def resize(self, size, extra_args="-ff --yes"): """ @@ -147,7 +149,7 @@ def resize(self, size, extra_args="-ff --yes"): self.name) process.system(cmd) self.size = size - logging.info("resize volume %s to %s B" % (self.name, self.size)) + LOG.info("resize volume %s to %s B" % (self.name, self.size)) def display(self): """ @@ -203,7 +205,7 @@ def create(self, extra_args="-ff --yes"): pv.vg.remove() cmd += " %s" % pv.name process.system(cmd) - logging.info("Create new volumegroup %s", self.name) + LOG.info("Create new volumegroup %s", self.name) return self.name def remove(self, extra_args="-ff --yes"): @@ -214,7 +216,7 @@ def remove(self, extra_args="-ff --yes"): """ cmd = "lvm vgremove %s %s" % (extra_args, self.name) process.system(cmd) - logging.info("logical volume-group(%s) removed", self.name) + LOG.info("logical volume-group(%s) removed", self.name) def get_attr(self, attr): """ @@ -248,8 +250,7 @@ def reduce_pv(self, pv, extra_args="-ff --yes"): cmd = "lvm vgreduce %s %s %s" % (extra_args, self.name, pv.name) process.system(cmd) self.pvs.remove(pv) - logging.info("reduce volume %s from volume group %s" % (pv.name, - self.name)) + LOG.info("reduce volume %s from volume group %s" % (pv.name, self.name)) def extend_pv(self, pv, extra_args=""): """ @@ -263,7 +264,7 @@ def extend_pv(self, pv, extra_args=""): cmd = "lvm vgextend %s %s" % (self.name, pv.name) process.system(cmd) self.pvs.append(pv) - logging.info("add volume %s to volumegroup %s" % (pv.name, self.name)) + LOG.info("add volume %s to volumegroup %s" % (pv.name, self.name)) def exists(self): """ @@ -297,7 +298,7 @@ def create(self): if self.lv_extra_options: cmd += " %s" % self.lv_extra_options process.system(cmd) - logging.info("create logical volume %s", self.path) + LOG.info("create logical volume %s", self.path) return self.get_attr("lv_path") def remove(self, extra_args="-ff --yes", timeout=300): @@ -314,7 +315,7 @@ def remove(self, extra_args="-ff --yes", timeout=300): self.vg.name, self.name) status = process.system(cmd, ignore_status=True) if status == 0: - logging.info("logical volume(%s) removed", self.name) + LOG.info("logical volume(%s) removed", self.name) break time.sleep(0.5) @@ -337,8 +338,7 @@ def resize(self, size, extra_args="-ff"): cmd = "lvm lvresize -n -L %s%s %s %s" % (size, UNIT, path, extra_args) process.system(cmd) self.size = size - logging.info("resize logical volume %s size to %s" % (self.path, - self.size)) + LOG.info("resize logical volume %s size to %s" % (self.path, self.size)) return size def display(self, extra_args=""): @@ -418,7 +418,7 @@ def register(self, vol): """ if isinstance(vol, Volume) or isinstance(vol, VolumeGroup): self.trash.append(vol) - logging.info("Install new volume %s", vol.name) + LOG.info("Install new volume %s", vol.name) def unregister(self, vol): """ @@ -428,7 +428,7 @@ def unregister(self, vol): """ if vol in self.trash: self.trash.remove(vol) - logging.info("Uninstall volume %s", vol.name) + LOG.info("Uninstall volume %s", vol.name) def __reload_lvs(self): """ @@ -533,8 +533,8 @@ def setup_vg(self, lv): pv.set_vg(vg) self.vgs.append(vg) else: - logging.info("VolumeGroup(%s) really exists" % vg_name + - "skip to create it") + LOG.info("VolumeGroup(%s) really exists" % vg_name + + "skip to create it") pv_name = self.params["pv_name"].split()[0] pv = self.get_vol(pv_name, "pvs") if pv and pv.vg is vg: @@ -569,12 +569,12 @@ def setup_lv(self): self.register(lv) self.lvs.append(lv) else: - logging.info("LogicalVolume(%s) really exists " % lv_name + - "skip to create it") + LOG.info("LogicalVolume(%s) really exists " % lv_name + + "skip to create it") if lv.size != lv_size: lv.display() - logging.warn("lv size(%s) mismath," % lv.size + - "required size %s;" % lv_size) + LOG.warn("lv size(%s) mismath," % lv.size + + "required size %s;" % lv_size) lv.resize(lv_size) return lv @@ -617,7 +617,7 @@ def rescan(self): lvm_reload_cmd = self.params.get("lvm_reload_cmd") if lvm_reload_cmd: process.system(lvm_reload_cmd, ignore_status=True) - logging.info("reload lvm monitor service") + LOG.info("reload lvm monitor service") class EmulatedLVM(LVM): @@ -642,7 +642,7 @@ def make_emulate_image(self): img_path = self.get_emulate_image_name() bs_size = normalize_data_size("8M") count = int(math.ceil(img_size / bs_size)) + 8 - logging.info("create emulated image file(%s)" % img_path) + LOG.info("create emulated image file(%s)" % img_path) cmd = "dd if=/dev/zero of=%s bs=8M count=%s" % (img_path, count) process.system(cmd) self.params["pv_size"] = count * bs_size @@ -682,8 +682,8 @@ def setup_pv(self, vg): self.register(pv) self.pvs.append(pv) else: - logging.warn("PhysicalVolume(%s) really exists" % pv_name + - "skip to create it") + LOG.warn("PhysicalVolume(%s) really exists" % pv_name + + "skip to create it") pv.set_vg(vg) pvs.append(pv) return pvs @@ -714,9 +714,9 @@ def cleanup(self): devices = re.findall("(/dev/loop\d+)", output, re.M | re.I) for dev in devices: cmd = "losetup -d %s" % dev - logging.info("disconnect %s", dev) + LOG.info("disconnect %s", dev) process.system(cmd, ignore_status=True) emulate_image_file = self.get_emulate_image_name() cmd = "rm -f %s" % emulate_image_file process.system(cmd, ignore_status=True) - logging.info("remove emulate image file %s", emulate_image_file) + LOG.info("remove emulate image file %s", emulate_image_file) diff --git a/virttest/lvsb.py b/virttest/lvsb.py index 79c3799563..babef57199 100644 --- a/virttest/lvsb.py +++ b/virttest/lvsb.py @@ -11,6 +11,8 @@ from virttest import lvsb_base +LOG = logging.getLogger('avocado.' + __name__) + # This utility function lets test-modules quickly create a list of all # sandbox aggregate types, themselves containing a list of individual # sandboxes. @@ -110,8 +112,8 @@ def results(self, each_timeout=5): total_timeout_seconds)) # Kill off all sandboxes, just to be safe self.for_each(lambda sb: sb.stop()) - logging.info("%d sandboxe(s) finished in %s", self.count, - end - start) + LOG.info("%d sandboxe(s) finished in %s", self.count, + end - start) # Return a list of stdout contents from each return self.for_each(lambda sb: sb.recv()) diff --git a/virttest/lvsb_base.py b/virttest/lvsb_base.py index b0b0896c52..627812351f 100644 --- a/virttest/lvsb_base.py +++ b/virttest/lvsb_base.py @@ -12,6 +12,9 @@ from six.moves import xrange +LOG = logging.getLogger('avocado.' + __name__) + + class SandboxException(Exception): """ @@ -88,7 +91,7 @@ def close_session(self, warn_if_nonexist=True): self.session.close() else: if warn_if_nonexist: - logging.warning("Closing nonexisting sandbox session") + LOG.warning("Closing nonexisting sandbox session") def kill_session(self, sig=signal.SIGTERM): """ @@ -204,7 +207,7 @@ def run(self, extra=None): :param extra: String of extra command-line to use but not store """ sandbox_cmdline = self.make_sandbox_command_line(extra) - logging.debug("Launching %s", sandbox_cmdline) + LOG.debug("Launching %s", sandbox_cmdline) self._session.new_session(sandbox_cmdline) def stop(self): @@ -469,7 +472,7 @@ def __init__(self, params, env): if key_gen in params and not option: self.flag.append(value) - logging.debug("All of options(%s) and flags(%s)", self.opts, self.flag) + LOG.debug("All of options(%s) and flags(%s)", self.opts, self.flag) def init_sandboxes(self): """ diff --git a/virttest/migration.py b/virttest/migration.py index 6313212cb7..04a259a051 100644 --- a/virttest/migration.py +++ b/virttest/migration.py @@ -22,6 +22,9 @@ from virttest.utils_test import libvirt +LOG = logging.getLogger('avocado.' + __name__) + + # Migration Relative functions############## class MigrationTest(object): @@ -64,13 +67,12 @@ def post_migration_check(self, vms, params, uptime=None, uri=None): if not libvirt.check_vm_state(vm.name, vm_state, uri=uri): raise exceptions.TestFail("Migrated VMs failed to be in %s " "state at destination" % vm_state) - logging.info("Guest state is '%s' at destination is as expected", - vm_state) + LOG.info("Guest state is '%s' at destination is as expected", + vm_state) if "offline" not in params.get("migrate_options", params.get("virsh_migrate_options", "")): if uptime: vm_uptime = vm.uptime(connect_uri=uri) - logging.info("uptime of migrated VM %s: %s", vm.name, - vm_uptime) + LOG.info("uptime of migrated VM %s: %s", vm.name, vm_uptime) if vm_uptime < uptime[vm.name]: raise exceptions.TestFail("vm went for a reboot during " "migration") @@ -123,9 +125,9 @@ def ping_vm(self, vm, params, uri=None, ping_count=10, # and raise if VM fails to respond vm_ip[vm.name] = vm.get_address(session=server_session, timeout=240) - logging.info("Check VM network connectivity after migrating") + LOG.info("Check VM network connectivity after migrating") else: - logging.info("Check VM network connectivity before migration") + LOG.info("Check VM network connectivity before migration") if not vm.is_alive(): vm.start() vm.wait_for_login() @@ -133,9 +135,9 @@ def ping_vm(self, vm, params, uri=None, ping_count=10, params["vm_ip_dict"] = vm_ip s_ping, o_ping = utils_net.ping(vm_ip[vm.name], count=ping_count, timeout=ping_timeout, - output_func=logging.debug, + output_func=LOG.debug, session=server_session) - logging.info(o_ping) + LOG.info(o_ping) if uri and uri != 'qemu:///system': vm.connect_uri = uri_backup if server_session: @@ -143,8 +145,8 @@ def ping_vm(self, vm, params, uri=None, ping_count=10, if s_ping != 0: if uri: if "offline" in params.get("migrate_options", ""): - logging.info("Offline Migration: %s will not responded to " - "ping as expected", vm.name) + LOG.info("Offline Migration: %s will not responded to " + "ping as expected", vm.name) return func("%s did not respond after %d sec." % (vm.name, ping_timeout)) @@ -176,11 +178,11 @@ def thread_func_migration(self, vm, desturi, options=None, etime = int(time.time()) self.mig_time[vm.name] = etime - stime if self.ret.exit_status != 0: - logging.debug("Migration to %s returns failed exit status %d", - desturi, self.ret.exit_status) + LOG.debug("Migration to %s returns failed exit status %d", + desturi, self.ret.exit_status) is_error = True except process.CmdError as detail: - logging.error("Migration to %s failed:\n%s", desturi, detail) + LOG.error("Migration to %s failed:\n%s", desturi, detail) is_error = True finally: if is_error is True: @@ -206,7 +208,7 @@ def migrate_pre_setup(self, desturi, params, try: utils_path.find_command("firewall-cmd") except utils_path.CmdNotFoundError: - logging.debug("Using iptables for replacement") + LOG.debug("Using iptables for replacement") use_firewall_cmd = False if use_firewall_cmd: @@ -312,7 +314,7 @@ def _run_collect_event_cmd(): auto_close=True, uri=srcuri) virsh_event_session.sendline(cmd) - logging.debug("Begin to collect domain events...") + LOG.debug("Begin to collect domain events...") return virsh_event_session def _need_collect_events(funcs_to_run): @@ -367,10 +369,10 @@ def _run_simple_func(vm, one_func): one_func(vm.name, uri=srcuri, debug=True) else: if 'func_params' in args: - logging.debug("Run function {} with parameters".format(one_func)) + LOG.debug("Run function {} with parameters".format(one_func)) one_func(args['func_params']) else: - logging.debug("Run function {}".format(one_func)) + LOG.debug("Run function {}".format(one_func)) one_func() def _run_complex_func(vm, one_func, virsh_event_session=None): @@ -383,7 +385,7 @@ def _run_complex_func(vm, one_func, virsh_event_session=None): :raises: exceptions.TestError if any error happens """ - logging.debug("Handle function invoking:%s", one_func) + LOG.debug("Handle function invoking:%s", one_func) before_vm_pause = 'yes' == one_func.get('before_pause', 'no') after_event = one_func.get('after_event') before_event = one_func.get('before_event') @@ -393,14 +395,14 @@ def _run_complex_func(vm, one_func, virsh_event_session=None): "events is not provided") if after_event: - logging.debug("Below events are received:" - "%s", virsh_event_session.get_stripped_output()) + LOG.debug("Below events are received:" + "%s", virsh_event_session.get_stripped_output()) if not utils_misc.wait_for( lambda: re.findall(after_event, virsh_event_session.get_stripped_output()), 30): raise exceptions.TestError("Unable to find " "event {}".format(after_event)) - logging.debug("Receive the event '{}'".format(after_event)) + LOG.debug("Receive the event '{}'".format(after_event)) # If 'before_event' is provided, then 'after_event' must be provided if before_event and re.findall(before_event, virsh_event_session.get_stripped_output()): @@ -421,11 +423,11 @@ def _run_complex_func(vm, one_func, virsh_event_session=None): func_param = one_func.get("func_param") if func_param: #one_param_dict = args['multi_func_params'][func] - logging.debug("Run function {} with " - "parameters '{}'".format(func, func_param)) + LOG.debug("Run function {} with " + "parameters '{}'".format(func, func_param)) self.func_ret.update({func: func(func_param)}) else: - logging.debug("Run function {}".format(func)) + LOG.debug("Run function {}".format(func)) self.func_ret.update({func: func()}) def _run_funcs(vm, funcs_to_run, before_pause, virsh_event_session=None): @@ -444,8 +446,8 @@ def _run_funcs(vm, funcs_to_run, before_pause, virsh_event_session=None): if not before_pause: _run_simple_func(vm, one_func) else: - logging.error("Only support to run the function " - "after guest is paused") + LOG.error("Only support to run the function " + "after guest is paused") elif isinstance(one_func, dict): before_vm_pause = 'yes' == one_func.get('before_pause', 'no') if before_vm_pause == before_pause: @@ -494,18 +496,18 @@ def _do_orderly_migration(vm_name, vm, srcuri, desturi, options=None, migrate_options=migrate_options.strip()) if migration_started: - logging.info("Migration started for %s", vm.name) + LOG.info("Migration started for %s", vm.name) time.sleep(3) # To avoid executing the command lines before starting migration _run_funcs(vm, funcs_to_run, before_pause=False, virsh_event_session=virsh_event_session) else: - logging.error("Migration failed to start for %s", vm.name) + LOG.error("Migration failed to start for %s", vm.name) eclipse_time = int(time.time()) - stime - logging.debug("start_time:%d, eclipse_time:%d", stime, eclipse_time) + LOG.debug("start_time:%d, eclipse_time:%d", stime, eclipse_time) if eclipse_time < thread_timeout: migration_thread.join(thread_timeout - eclipse_time) if migration_thread.is_alive(): - logging.error("Migrate %s timeout.", migration_thread) + LOG.error("Migrate %s timeout.", migration_thread) self.RET_LOCK.acquire() self.RET_MIGRATION = False self.RET_LOCK.release() @@ -543,7 +545,7 @@ def _do_orderly_migration(vm_name, vm, srcuri, desturi, options=None, thread2.join(thread_timeout) vm_remote = vm if thread1.is_alive() or thread1.is_alive(): - logging.error("Cross migrate timeout.") + LOG.error("Cross migrate timeout.") self.RET_LOCK.acquire() self.RET_MIGRATION = False self.RET_LOCK.release() @@ -563,14 +565,14 @@ def _do_orderly_migration(vm_name, vm, srcuri, desturi, options=None, for thread in migration_threads: thread.join(thread_timeout) if thread.is_alive(): - logging.error("Migrate %s timeout.", thread) + LOG.error("Migrate %s timeout.", thread) self.RET_LOCK.acquire() self.RET_MIGRATION = False self.RET_LOCK.release() if not self.RET_MIGRATION and not ignore_status: raise exceptions.TestFail() - logging.info("Checking migration result...") + LOG.info("Checking migration result...") self.check_result(self.ret, args) def cleanup_dest_vm(self, vm, srcuri, desturi): @@ -598,7 +600,7 @@ def cleanup_vm(self, vm, desturi): try: self.cleanup_dest_vm(vm, vm.connect_uri, desturi) except Exception as err: - logging.error(err) + LOG.error(err) if vm.is_alive(): vm.destroy(gracefully=False) @@ -696,8 +698,8 @@ def check_result(self, result, params): if not result: raise exceptions.TestError("No migration result is returned.") - logging.info("Migration out: %s", result.stdout_text.strip()) - logging.info("Migration error: %s", result.stderr_text.strip()) + LOG.info("Migration out: %s", result.stdout_text.strip()) + LOG.info("Migration error: %s", result.stderr_text.strip()) if status_error: # Migration should fail if err_msg: # Special error messages are expected @@ -707,10 +709,10 @@ def check_result(self, result, params): % (err_msg, result.stderr_text.strip())) else: - logging.debug("It is the expected error message") + LOG.debug("It is the expected error message") else: if int(result.exit_status) != 0: - logging.debug("Migration failure is expected result") + LOG.debug("Migration failure is expected result") else: raise exceptions.TestFail("Migration success is unexpected result") else: @@ -731,7 +733,7 @@ def _get_pid(): pid = utils_misc.wait_for(_get_pid, 30) if utils_misc.safe_kill(pid, sig): - logging.info("Succeed to cancel migration: [%s].", pid.strip()) + LOG.info("Succeed to cancel migration: [%s].", pid.strip()) else: raise exceptions.TestError("Fail to cancel migration: [%s]" % pid.strip()) @@ -780,10 +782,10 @@ def _set_speed(extra_option=''): old_speed = virsh.migrate_getspeed(vm_name, extra=extra_option, **virsh_args) - logging.debug("Current %s migration speed is %s " - "MiB/s\n", extra_option, old_speed.stdout_text.strip()) - logging.debug("Set %s migration speed to %d " - "MiB/s\n", extra_option, to_speed) + LOG.debug("Current %s migration speed is %s " + "MiB/s\n", extra_option, old_speed.stdout_text.strip()) + LOG.debug("Set %s migration speed to %d " + "MiB/s\n", extra_option, to_speed) virsh.migrate_setspeed(vm_name, to_speed, extra=extra_option, **virsh_args) @@ -796,10 +798,10 @@ def _set_speed(extra_option=''): if not libvirt_version.version_compare(5, 0, 0): if mode == 'both': - logging.warning("%s Only precopy speed is set.", warning_msg) + LOG.warning("%s Only precopy speed is set.", warning_msg) mode = 'precopy' if mode == 'postcopy': - logging.warning("%s Skipping", warning_msg) + LOG.warning("%s Skipping", warning_msg) return if mode == 'both': _set_speed() diff --git a/virttest/migration_template.py b/virttest/migration_template.py index 995e17731a..be8d409477 100644 --- a/virttest/migration_template.py +++ b/virttest/migration_template.py @@ -43,6 +43,8 @@ VIR_MIGRATE_PERSIST_DEST_XML = (1 << 14) VIR_MIGRATE_DEST_XML = (1 << 15) +LOG = logging.getLogger('avocado.' + __name__) + # Phase of the migration test class phase(Enum): @@ -66,7 +68,7 @@ def set_phase(phase): :param phase: Enum value, phase to be set """ global CURRENT_PHASE - logging.info("Entering phase: %s", phase.name) + LOG.info("Entering phase: %s", phase.name) CURRENT_PHASE = phase @@ -365,7 +367,7 @@ def _setup_common(self): self._setup_for_modular_daemon() # Back up vm xml for recovery - logging.debug("Backup vm xml before migration") + LOG.debug("Backup vm xml before migration") for vm in self.vms: backup = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if not backup: @@ -374,23 +376,23 @@ def _setup_common(self): self.vm_xml_backup.append(backup) # Destroy vm on src host if it's alive - logging.debug("Destroy vm on src host") + LOG.debug("Destroy vm on src host") for vm in self.vms: if vm.is_alive(): vm.destroy() # Do migration pre-setup - logging.debug("Do migration pre-setup") + LOG.debug("Do migration pre-setup") self.obj_migration.migrate_pre_setup(self.dest_uri, self.params) if self.migrate_vm_back == 'yes': - logging.debug("Do migration pre-setup for migrate back") + LOG.debug("Do migration pre-setup for migrate back") self.obj_migration.migrate_pre_setup(self.src_uri, self.params) # Setup libvirtd remote access env self._setup_libvirtd_remote_access() # Clean up vm on dest host - logging.debug("Clean up vm on dest host before migration") + LOG.debug("Clean up vm on dest host before migration") for vm in self.vms: self.obj_migration.cleanup_dest_vm(vm, self.src_uri, self.dest_uri) @@ -415,7 +417,7 @@ def _start_vm(self): """ for vm in self.vms: - logging.debug("Start vm %s.", vm.name) + LOG.debug("Start vm %s.", vm.name) vm.start() # Make sure vm fully boots up @@ -453,12 +455,12 @@ def _migrate(self): # Set selinux state before migration # NOTE: if selinux state is set too early, it may be changed # in other methods unexpectedly, so set it just before migration - logging.debug("Set selinux to enforcing before migration") + LOG.debug("Set selinux to enforcing before migration") utils_selinux.set_status(self.selinux_state) # TODO: Set selinux on migrate_dest_host # Check vm uptime before migration - logging.debug("Check vm uptime before migration") + LOG.debug("Check vm uptime before migration") self.uptime = {} for vm in self.vms: self.uptime[vm.name] = vm.uptime(connect_uri=vm.connect_uri) @@ -471,7 +473,7 @@ def _migrate(self): virsh.migrate_setspeed(vm.name, 1, uri=vm.connect_uri) # Monitor event "Suspended Post-copy" for postcopy migration - logging.debug("Monitor the event for postcopy migration") + LOG.debug("Monitor the event for postcopy migration") virsh_session = virsh.VirshSession(virsh_exec=virsh.VIRSH_EXEC, auto_close=True) self.objs_list.append(virsh_session) @@ -485,7 +487,7 @@ def _migrate(self): func = None # Start to do migration - logging.debug("Start to do migration") + LOG.debug("Start to do migration") thread_timeout = self.migrate_thread_timeout self.obj_migration.do_migration(self.vms, self.src_uri, self.dest_uri, "orderly", @@ -495,21 +497,20 @@ def _migrate(self): virsh_uri=self.src_uri, func=func, shell=True) - logging.info("Check migration result: succeed or" - " fail with expected error") + LOG.info("Check migration result: succeed or fail with expected error") self.obj_migration.check_result(self.obj_migration.ret, self.params) # Check "suspended post-copy" event after postcopy migration if self.migrate_flags & VIR_MIGRATE_POSTCOPY: - logging.debug("Check event after postcopy migration") + LOG.debug("Check event after postcopy migration") virsh_session.send_ctrl("^c") events_output = virsh_session.get_stripped_output() - logging.debug("Events_output are %s", events_output) + LOG.debug("Events_output are %s", events_output) pattern = "Suspended Post-copy" if pattern not in events_output: self.test.error("Migration didn't switch to postcopy mode") - logging.debug("Do post migration check after migrate to dest") + LOG.debug("Do post migration check after migrate to dest") self.params["migrate_options"] = self.virsh_migrate_options self.obj_migration.post_migration_check(self.vms, self.params, self.uptime, uri=self.dest_uri) @@ -529,13 +530,13 @@ def _migrate_back(self): 4.Do post migration check: check vm state, uptime, network """ # Check vm uptime before migration back - logging.debug("Check vm uptime before migrate back") + LOG.debug("Check vm uptime before migrate back") self.uptime = {} for vm in self.vms: self.uptime[vm.name] = vm.uptime(connect_uri=vm.connect_uri) # Migrate vm back to src host - logging.debug("Start to migrate vm back to src host") + LOG.debug("Start to migrate vm back to src host") self.obj_migration.do_migration(self.vms, self.dest_uri, self.src_uri_full, "orderly", options=self.virsh_migrate_options, @@ -544,8 +545,7 @@ def _migrate_back(self): virsh_uri=self.dest_uri, shell=True) - logging.info("Check migration result: succeed or" - " fail with expected error") + LOG.info("Check migration result: succeed or fail with expected error") self.obj_migration.check_result(self.obj_migration.ret, self.params) # Set vm connect_uri to self.src_uri if migration back succeeds @@ -553,7 +553,7 @@ def _migrate_back(self): for vm in self.vms: vm.connect_uri = self.src_uri - logging.debug("Do post migration check after migrate back to src") + LOG.debug("Do post migration check after migrate back to src") self.obj_migration.post_migration_check(self.vms, self.params, self.uptime, uri=self.src_uri) @@ -571,7 +571,7 @@ def _extra_migrate_options(self): :return extra migrate options, string type """ - logging.info("Generate extra virsh migrate options") + LOG.info("Generate extra virsh migrate options") options = self.virsh_migrate_options extra_options = "" @@ -623,7 +623,7 @@ def _extra_migrate_options(self): if self.dest_persist_xml and "--persistent-xml" not in options: extra_options += " --persistent-xml DEST_PERSIST_XML" - logging.debug("Extra migrate options is: %s", extra_options) + LOG.debug("Extra migrate options is: %s", extra_options) return extra_options def _update_xmlfile_path_in_migrate_options(self): @@ -631,7 +631,7 @@ def _update_xmlfile_path_in_migrate_options(self): Generate and replace the xml file path for --xml and/or --persistent-xml """ - logging.info("Generate and replace xml file path for --xml and/or --persistent-xml") + LOG.info("Generate and replace xml file path for --xml and/or --persistent-xml") new_options = self.virsh_migrate_options @@ -654,7 +654,7 @@ def _migrate_flags(self): :return migrate flag """ - logging.info("Generate migrate flags") + LOG.info("Generate migrate flags") flags = 0 @@ -689,7 +689,7 @@ def _migrate_flags(self): if self.dest_persist_xml: flags |= VIR_MIGRATE_PERSIST_DEST_XML - logging.debug("Migrate flags is: %s", flags) + LOG.debug("Migrate flags is: %s", flags) return flags def _setup_for_modular_daemon(self): @@ -697,7 +697,7 @@ def _setup_for_modular_daemon(self): Setup env for modular daemon """ - logging.info("Setup env for modular daemon") + LOG.info("Setup env for modular daemon") self._set_libvirt_conf_for_modular_daemon() def _set_libvirt_conf_for_modular_daemon(self): @@ -706,14 +706,14 @@ def _set_libvirt_conf_for_modular_daemon(self): """ if self.migrate_desturi_proto == "ssh": - logging.info("Set libvirt.conf for modular daemon if \ + LOG.info("Set libvirt.conf for modular daemon if \ migrate_desturi_proto is ssh") params = {} - logging.info("Setup src libvirt.conf for modular daemon") + LOG.info("Setup src libvirt.conf for modular daemon") conf_obj = remove_key_for_modular_daemon(params) self.local_conf_objs.append(conf_obj) if self.migrate_vm_back == "yes": - logging.info("Setup dest libvirt.conf for modular daemon") + LOG.info("Setup dest libvirt.conf for modular daemon") remote_dict = dict(self.remote_dict) remote_dict.update(file_path="/etc/libvirt/libvirt.conf") conf_obj = remove_key_for_modular_daemon(params, remote_dict) @@ -725,9 +725,9 @@ def _set_vm_disk(self, cache="none"): :param cache: vm disk cache mode """ - logging.debug("Prepare shared disk in vm xml for live migration") + LOG.debug("Prepare shared disk in vm xml for live migration") if self.storage_type == 'nfs': - logging.debug("Prepare nfs backed disk in vm xml") + LOG.debug("Prepare nfs backed disk in vm xml") for vm in self.vms: libvirt.update_vm_disk_source(vm.name, self.nfs_mount_dir) @@ -747,7 +747,7 @@ def _create_disk_image_on_dest(self): the disk image backing chain by yourself if --copy-storage-inc is used """ - logging.debug("Create disk image on dest host before migration") + LOG.debug("Create disk image on dest host before migration") all_vm_disks = self.main_vm.get_blk_devices() for disk in list(itervalues(all_vm_disks)): disk_type = disk.get("type") @@ -765,11 +765,11 @@ def _setup_libvirtd_remote_access(self): """ Setup libvirtd remote access env for migration """ - logging.debug("Setup libvirtd remote access env") + LOG.debug("Setup libvirtd remote access env") protocol = self.migrate_desturi_proto self._setup_remote_connection_base(protocol, reverse=False) if self.migrate_vm_back == 'yes': - logging.debug("Setup libvirtd remote access env\ + LOG.debug("Setup libvirtd remote access env\ for reverse migration") tls_args = {} if protocol == 'tls': @@ -787,12 +787,12 @@ def _setup_qemu_tls(self): """ Set up native encryption migration env """ - logging.debug("Setup qemu tls env") + LOG.debug("Setup qemu tls env") tls_args = {'custom_pki_path': '/etc/pki/qemu', 'qemu_tls': 'yes'} self._setup_remote_connection_base(protocol='tls', add_args=tls_args) if self.migrate_vm_back == 'yes': - logging.debug("Setup qemu tls env for reverse migration") + LOG.debug("Setup qemu tls env for reverse migration") tls_args.update(ca_cakey_path=tls_args.get('custom_pki_path')) tls_args.update(scp_new_cacert='no') self._setup_remote_connection_base(protocol='tls', @@ -829,7 +829,7 @@ def _setup_remote_connection_base(self, protocol='ssh', reverse=False, 'tcp': TCPConnection, 'ssh': SSHConnection} - logging.debug("Setup remote connection env") + LOG.debug("Setup remote connection env") conn_obj = protocol_to_class[protocol](conn_args) conn_obj.conn_setup() conn_obj.auto_recover = True @@ -857,10 +857,10 @@ def _open_libvirtd_port_in_iptables(server_ip, cleanup): server_dict=server_dict, session=session, cleanup=cleanup) - logging.debug("Enable libvirtd remote port in firewalld on dst host") + LOG.debug("Enable libvirtd remote port in firewalld on dst host") _open_libvirtd_port_in_iptables(self.migrate_dest_host, cleanup) if self.migrate_vm_back == 'yes': - logging.debug("Enable libvirtd remote port in firewalld\ + LOG.debug("Enable libvirtd remote port in firewalld\ on src host") _open_libvirtd_port_in_iptables(self.migrate_source_host, cleanup) @@ -883,7 +883,7 @@ def open_port_in_iptables(self, port, protocol='tcp', server_dict=None, try: utils_path.find_command("firewall-cmd") except utils_path.CmdNotFoundError: - logging.debug("Using iptables for replacement") + LOG.debug("Using iptables for replacement") use_firewall_cmd = False if use_firewall_cmd: @@ -917,34 +917,34 @@ def cleanup(self): """ Cleanup env """ - logging.debug("Start to clean up env") + LOG.debug("Start to clean up env") # Shutdown vms for vm in self.vms: vm.destroy() # Recover source vm definition (just in case). - logging.info("Recover vm definition on source") + LOG.info("Recover vm definition on source") for backup in self.vm_xml_backup: backup.define() # Clean up ssh, tcp, tls test env if self.objs_list and len(self.objs_list) > 0: - logging.debug("Clean up test env: ssh, tcp, tls, etc") + LOG.debug("Clean up test env: ssh, tcp, tls, etc") self.objs_list.reverse() for obj in self.objs_list: obj.__del__() # Cleanup migrate_pre_setup - logging.debug("Clean up migration setup on dest host") + LOG.debug("Clean up migration setup on dest host") self.obj_migration.migrate_pre_setup(self.dest_uri, self.params, cleanup=True) if self.migrate_vm_back == 'yes': - logging.debug("Clean up migration setup on src host") + LOG.debug("Clean up migration setup on src host") self.obj_migration.migrate_pre_setup(self.src_uri, self.params, cleanup=True) # Restore conf files - logging.debug("Restore conf files") + LOG.debug("Restore conf files") for conf_obj in self.local_conf_objs: conf_obj.restore() for conf_obj in self.remote_conf_objs: @@ -952,10 +952,10 @@ def cleanup(self): # Disable opened ports in firewalld for port in self.opened_ports_local: - logging.debug("Disable port %s in firewalld on local host", port) + LOG.debug("Disable port %s in firewalld on local host", port) self.open_port_in_iptables(port, cleanup=True) for port in self.opened_ports_remote: - logging.debug("Disable port %s in firewalld on remote host", port) + LOG.debug("Disable port %s in firewalld on remote host", port) self.open_port_in_iptables(port, server_dict=self.remote_dict, session=self.remote_session, @@ -975,7 +975,7 @@ def manage_session(vm, *args, **kwargs): :param vm: VM object """ - logging.debug("vm's connect_uri is: %s", vm.connect_uri) + LOG.debug("vm's connect_uri is: %s", vm.connect_uri) try: if vm.connect_uri == "qemu:///system": vm.session = vm.wait_for_login(serial=True) diff --git a/virttest/nfs.py b/virttest/nfs.py index 7614056268..dfcf6b6446 100644 --- a/virttest/nfs.py +++ b/virttest/nfs.py @@ -18,6 +18,8 @@ from virttest.utils_conn import SSHConnection from virttest.staging import service +LOG = logging.getLogger('avocado.' + __name__) + def nfs_exported(session=None): """ @@ -108,8 +110,8 @@ def unexport(self): unexport_cmd = "exportfs -u %s:%s" % (self.client, self.path) self.func(unexport_cmd) else: - logging.warn("Target %s %s is not exported yet." - "Can not unexport it." % (self.client, self.path)) + LOG.warn("Target %s %s is not exported yet." + "Can not unexport it." % (self.client, self.path)) def reset_export(self): """ @@ -134,8 +136,7 @@ def export(self): self.unexport() else: self.already_exported = True - logging.warn("Already exported target." - " Don't need export it again") + LOG.warn("Already exported target. Don't need export it again") return True export_cmd = "exportfs" if self.options: @@ -144,7 +145,7 @@ def export(self): try: self.func(export_cmd) except (process.CmdError, aexpect.ShellTimeoutError) as export_failed_err: - logging.error("Can not export target: %s" % export_failed_err) + LOG.error("Can not export target: %s" % export_failed_err) return False return True @@ -183,7 +184,7 @@ def __init__(self, params): self.session = test_setup.remote_session(nfs_server_params) distro_details = utils_misc.get_distro(self.session) if self.session.cmd_status("exportfs -h") != 0: - logging.error("exportfs cmd not available in remote host") + LOG.error("exportfs cmd not available in remote host") elif params.get("setup_local_nfs") == "yes": self.nfs_setup = True @@ -235,7 +236,7 @@ def setup(self): """ if self.nfs_setup: if not self.nfs_service.status(): - logging.debug("Restart NFS service.") + LOG.debug("Restart NFS service.") self.rpcbind_service.restart() self.nfs_service.restart() @@ -245,7 +246,7 @@ def setup(self): self.exportfs.export() self.unexportfs_in_clean = not self.exportfs.already_exported - logging.debug("Mount %s to %s" % (self.mount_src, self.mount_dir)) + LOG.debug("Mount %s to %s" % (self.mount_src, self.mount_dir)) if(utils_misc.check_exists(self.mount_dir, session=self.session) and not utils_misc.check_isdir(self.mount_dir, session=self.session)): raise OSError( @@ -318,10 +319,10 @@ def is_mounted(self): find_mountpoint_cmd = "mount | grep -E '.*%s.*%s.*'" % (self.mount_src, self.mount_dir) cmd = self.ssh_cmd + "'%s'" % find_mountpoint_cmd - logging.debug("The command: %s", cmd) + LOG.debug("The command: %s", cmd) status, output = process.getstatusoutput(cmd) if status: - logging.debug("The command result: <%s:%s>", status, output) + LOG.debug("The command result: <%s:%s>", status, output) return False return True @@ -344,8 +345,7 @@ def umount(self): """ Unmount the mount directory in remote host """ - logging.debug("Umount %s from %s" % - (self.mount_dir, self.nfs_client_ip)) + LOG.debug("Umount %s from %s" % (self.mount_dir, self.nfs_client_ip)) umount_cmd = self.ssh_cmd + "'umount -l %s'" % self.mount_dir try: process.system(umount_cmd, verbose=True) @@ -392,17 +392,17 @@ def firewall_to_permit_nfs(self): service_cmd += "--add-service=nfs" ret = process.run(service_cmd, shell=True) if ret.exit_status: - logging.error("nfs service not added in firewall: " - "%s", ret.stdout_text) + LOG.error("nfs service not added in firewall: " + "%s", ret.stdout_text) else: - logging.debug("nfs service added to firewall " - "sucessfully") + LOG.debug("nfs service added to firewall " + "sucessfully") firewalld.restart() else: - logging.debug("nfs service already permitted by firewall") + LOG.debug("nfs service already permitted by firewall") except process.CmdError: # For RHEL 6 based system firewall-cmd is not available - logging.debug("Using iptables to permit NFS service") + LOG.debug("Using iptables to permit NFS service") nfs_ports = [] rule_list = [] nfsd = service.Factory.create_service("nfs") @@ -436,19 +436,18 @@ def firewall_to_permit_nfs(self): Iptables.setup_or_cleanup_iptables_rules(rule_list) iptables.restart() except Exception as info: - logging.error("Firewall setting to add nfs service " - "failed: %s", info) + LOG.error("Firewall setting to add nfs service failed: %s", info) def setup_remote(self): """ Mount sharing directory to remote host. """ check_mount_dir_cmd = self.ssh_cmd + "'ls -d %s'" % self.mount_dir - logging.debug("To check if the %s exists", self.mount_dir) + LOG.debug("To check if the %s exists", self.mount_dir) output = process.getoutput(check_mount_dir_cmd) if re.findall("No such file or directory", output, re.M): mkdir_cmd = self.ssh_cmd + "'mkdir -p %s'" % self.mount_dir - logging.debug("Prepare to create %s", self.mount_dir) + LOG.debug("Prepare to create %s", self.mount_dir) s, o = process.getstatusoutput(mkdir_cmd) if s != 0: raise exceptions.TestFail("Failed to run %s: %s" % @@ -460,7 +459,7 @@ def setup_remote(self): self.firewall_to_permit_nfs() self.mount_src = "%s:%s" % (self.nfs_server_ip, self.mount_src) - logging.debug("Mount %s to %s" % (self.mount_src, self.mount_dir)) + LOG.debug("Mount %s to %s" % (self.mount_src, self.mount_dir)) mount_cmd = "mount -t nfs %s %s" % (self.mount_src, self.mount_dir) if self.mount_options: mount_cmd += " -o %s" % self.mount_options diff --git a/virttest/openvswitch.py b/virttest/openvswitch.py index 970dc1700e..4b16a14783 100644 --- a/virttest/openvswitch.py +++ b/virttest/openvswitch.py @@ -10,6 +10,7 @@ from .versionable_class import VersionableClass, Manager, factory from . import utils_misc +LOG = logging.getLogger('avocado.' + __name__) # Register to class manager. man = Manager(__name__) @@ -174,7 +175,7 @@ def get_version(cls): version = re.search(pattern, result.stdout_text).group(1) except process.CmdError: - logging.debug("OpenVSwitch is not available in system.") + LOG.debug("OpenVSwitch is not available in system.") return version def status(self): @@ -277,7 +278,7 @@ def del_br(self, br_name): try: self.ovs_vsctl(["del-br", br_name]) except process.CmdError as e: - logging.debug(e.result) + LOG.debug(e.result) raise def br_exist(self, br_name): @@ -379,8 +380,8 @@ def check_db_daemon(self): working = utils_misc.program_is_alive( "ovsdb-server", self.pid_files_path) if not working: - logging.error("OpenVSwitch database daemon with PID in file %s" - " not working.", self.db_pidfile) + LOG.error("OpenVSwitch database daemon with PID in file %s" + " not working.", self.db_pidfile) return working def check_switch_daemon(self): @@ -390,8 +391,8 @@ def check_switch_daemon(self): working = utils_misc.program_is_alive( "ovs-vswitchd", self.pid_files_path) if not working: - logging.error("OpenVSwitch switch daemon with PID in file %s" - " not working.", self.ovs_pidfile) + LOG.error("OpenVSwitch switch daemon with PID in file %s" + " not working.", self.ovs_pidfile) return working def check_db_file(self): @@ -400,8 +401,8 @@ def check_db_file(self): """ exists = os.path.exists(self.db_path) if not exists: - logging.error("OpenVSwitch database file %s not exists.", - self.db_path) + LOG.error("OpenVSwitch database file %s not exists.", + self.db_path) return exists def check_db_socket(self): @@ -410,8 +411,8 @@ def check_db_socket(self): """ exists = os.path.exists(self.db_socket) if not exists: - logging.error("OpenVSwitch database socket file %s not exists.", - self.db_socket) + LOG.error("OpenVSwitch database socket file %s not exists.", + self.db_socket) return exists def check(self): @@ -427,8 +428,8 @@ def init_system(self): if linux_modules.load_module("openvswitch"): sm.restart("openvswitch") except process.CmdError: - logging.error("Service OpenVSwitch is probably not" - " installed in system.") + LOG.error("Service OpenVSwitch is probably not" + " installed in system.") raise self.pid_files_path = "/var/run/openvswitch/" @@ -560,11 +561,11 @@ def init_new(self): self.start_ovs_vswitchd() def clean(self): - logging.debug("Killall ovsdb-server") + LOG.debug("Killall ovsdb-server") utils_misc.signal_program("ovsdb-server") if utils_misc.program_is_alive("ovsdb-server"): utils_misc.signal_program("ovsdb-server", signal.SIGKILL) - logging.debug("Killall ovs-vswitchd") + LOG.debug("Killall ovs-vswitchd") utils_misc.signal_program("ovs-vswitchd") if utils_misc.program_is_alive("ovs-vswitchd"): utils_misc.signal_program("ovs-vswitchd", signal.SIGKILL) diff --git a/virttest/ovirt.py b/virttest/ovirt.py index f6afa720b6..7517984dbb 100644 --- a/virttest/ovirt.py +++ b/virttest/ovirt.py @@ -18,6 +18,8 @@ _api = None _connected = False +LOG = logging.getLogger('avocado.' + __name__) + class WaitStateTimeoutError(Exception): @@ -52,7 +54,7 @@ def connect(params): password = params.get('ovirt_engine_password') if not all([url, username, password]): - logging.error('ovirt_engine[url|user|password] are necessary!!') + LOG.error('ovirt_engine[url|user|password] are necessary!!') global connection, _connected, version @@ -72,9 +74,9 @@ def connect(params): else: return connection, version except Exception as e: - logging.error('Failed to connect: %s\n' % str(e)) + LOG.error('Failed to connect: %s\n' % str(e)) else: - logging.info('Succeed to connect oVirt/Rhevm manager\n') + LOG.info('Succeed to connect oVirt/Rhevm manager\n') def disconnect(): @@ -156,7 +158,7 @@ def list(self): vm_list.append(vms[i].name) return vm_list except Exception as e: - logging.error('Failed to get vms:\n%s' % str(e)) + LOG.error('Failed to get vms:\n%s' % str(e)) def state(self): """ @@ -166,7 +168,7 @@ def state(self): self.update_instance() return self.instance.get().status except Exception as e: - logging.error('Failed to get %s status:\n%s' % (self.name, str(e))) + LOG.error('Failed to get %s status:\n%s' % (self.name, str(e))) def get_mac_address(self, net_name='*'): """ @@ -180,7 +182,7 @@ def get_mac_address(self, net_name='*'): try: return [vnet.mac.address for vnet in vnet_list if vnet.mac.address] except Exception as e: - logging.error('Failed to get %s status:\n%s' % (self.name, str(e))) + LOG.error('Failed to get %s status:\n%s' % (self.name, str(e))) def lookup_by_storagedomains(self, storage_name): """ @@ -193,15 +195,15 @@ def lookup_by_storagedomains(self, storage_name): target_vm = [vm for vm in export_vms_service.list() if vm.name == self.name][0] return target_vm except Exception as e: - logging.error('Failed to get %s from %s:\n%s' % (self.name, - storage_name, str(e))) + LOG.error('Failed to get %s from %s:\n%s' % (self.name, + storage_name, str(e))) def is_dead(self): """ Judge if a VM is dead. """ if self.state() == types.VmStatus.DOWN: - logging.info('VM %s status is ' % self.name) + LOG.info('VM %s status is ' % self.name) return True else: return False @@ -219,7 +221,7 @@ def is_paused(self): if self.state() == types.VmStatus.SUSPENDED: return True else: - logging.debug('VM %s status is %s ' % (self.name, self.state())) + LOG.debug('VM %s status is %s ' % (self.name, self.state())) return False def start(self, wait_for_up=True, timeout=300): @@ -228,7 +230,7 @@ def start(self, wait_for_up=True, timeout=300): """ end_time = time.time() + timeout if self.is_dead(): - logging.info('Starting VM %s' % self.name) + LOG.info('Starting VM %s' % self.name) self.instance.start() vm_powering_up = False vm_up = False @@ -236,7 +238,7 @@ def start(self, wait_for_up=True, timeout=300): if self.state() == types.VmStatus.POWERING_UP: vm_powering_up = True if wait_for_up: - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') if self.state() == types.VmStatus.UP: vm_up = True break @@ -249,7 +251,7 @@ def start(self, wait_for_up=True, timeout=300): if not vm_powering_up and not vm_up: raise WaitVMStateTimeoutError("START", self.state()) else: - logging.debug('VM is alive') + LOG.debug('VM is alive') def suspend(self, timeout): """ @@ -259,17 +261,17 @@ def suspend(self, timeout): vm_suspend = False while time.time() < end_time: try: - logging.info('Suspend VM %s' % self.name) + LOG.info('Suspend VM %s' % self.name) self.instance.suspend() - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') if self.is_paused(): vm_suspend = True break except Exception as e: if e.reason == 'Bad Request' \ and 'asynchronous running tasks' in e.detail: - logging.warning("VM has asynchronous running tasks, " - "trying again") + LOG.warning("VM has asynchronous running tasks, " + "trying again") time.sleep(1) else: raise e @@ -284,9 +286,9 @@ def resume(self, timeout): end_time = time.time() + timeout try: if self.state() != 'up': - logging.info('Resume VM %s' % self.name) + LOG.info('Resume VM %s' % self.name) self.instance.start() - logging.info('Waiting for VM to status') + LOG.info('Waiting for VM to status') vm_resume = False while time.time() < end_time: if self.state() == types.VmStatus.UP: @@ -296,9 +298,9 @@ def resume(self, timeout): if not vm_resume: raise WaitVMStateTimeoutError("RESUME", self.state()) else: - logging.debug('VM already up') + LOG.debug('VM already up') except Exception as e: - logging.error('Failed to resume VM:\n%s' % str(e)) + LOG.error('Failed to resume VM:\n%s' % str(e)) def shutdown(self, gracefully=True, timeout=300): """ @@ -306,12 +308,12 @@ def shutdown(self, gracefully=True, timeout=300): """ end_time = time.time() + timeout if self.is_alive(): - logging.info('Shutdown VM %s' % self.name) + LOG.info('Shutdown VM %s' % self.name) if gracefully: self.instance.shutdown() else: self.instance.stop() - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') vm_down = False while time.time() < end_time: if self.is_dead(): @@ -321,7 +323,7 @@ def shutdown(self, gracefully=True, timeout=300): if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) else: - logging.debug('VM already down') + LOG.debug('VM already down') def delete(self, timeout=300): """ @@ -329,9 +331,9 @@ def delete(self, timeout=300): """ end_time = time.time() + timeout if self.name in self.list(): - logging.info('Delete VM %s' % self.name) + LOG.info('Delete VM %s' % self.name) self.instance.remove() - logging.info('Waiting for VM to be ') + LOG.info('Waiting for VM to be ') vm_delete = False while time.time() < end_time: if self.name not in self.list(): @@ -340,9 +342,9 @@ def delete(self, timeout=300): time.sleep(1) if not vm_delete: raise WaitVMStateTimeoutError("DELETE", self.state()) - logging.info('VM was removed successfully') + LOG.info('VM was removed successfully') else: - logging.debug('VM not exist') + LOG.debug('VM not exist') def destroy(self, gracefully=False): """ @@ -362,11 +364,11 @@ def delete_from_export_domain(self, export_name): try: sds_service = self.connection.system_service().storage_domains_service() export_sd = sds_service.list(search=export_name)[0] - logging.info('Remove VM %s from export storage' % self.name) + LOG.info('Remove VM %s from export storage' % self.name) export_vms_service = sds_service.storage_domain_service(export_sd.id).vms_service() export_vms_service.vm_service(vm.id).remove() except Exception as e: - logging.error('Failed to remove VM:\n%s' % str(e)) + LOG.error('Failed to remove VM:\n%s' % str(e)) def import_from_export_domain(self, export_name, storage_name, cluster_name, timeout=300): @@ -386,7 +388,7 @@ def import_from_export_domain(self, export_name, storage_name, storage_domains = sds_service.list(search=storage_name)[0] clusters_service = self.connection.system_service().clusters_service() cluster = clusters_service.list(search=cluster_name)[0] - logging.info('Import VM %s' % self.name) + LOG.info('Import VM %s' % self.name) export_vms_service = sds_service.storage_domain_service(export_sd.id).vms_service() export_vms_service.vm_service(vm.id).import_( storage_domain=types.StorageDomain(id=storage_domains.id), @@ -394,7 +396,7 @@ def import_from_export_domain(self, export_name, storage_name, vm=types.Vm(id=vm.id), exclusive=False ) - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') vm_down = False while time.time() < end_time: if self.name in self.list(): @@ -404,8 +406,8 @@ def import_from_export_domain(self, export_name, storage_name, time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) - logging.info('Import %s successfully(time lapse %ds)', - self.name, time.time() - begin_time) + LOG.info('Import %s successfully(time lapse %ds)', + self.name, time.time() - begin_time) def export_from_export_domain(self, export_name, timeout=300): """ @@ -415,9 +417,9 @@ def export_from_export_domain(self, export_name, timeout=300): """ end_time = time.time() + timeout storage_domains = self.connection.storagedomains.get(export_name) - logging.info('Export VM %s' % self.name) + LOG.info('Export VM %s' % self.name) self.instance.export(types.Action(storage_domain=storage_domains)) - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') vm_down = False while time.time() < end_time: if self.is_dead(): @@ -426,7 +428,7 @@ def export_from_export_domain(self, export_name, timeout=300): time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) - logging.info('Export %s successfully', self.name) + LOG.info('Export %s successfully', self.name) def snapshot(self, snapshot_name='my_snapshot', timeout=300): """ @@ -438,10 +440,10 @@ def snapshot(self, snapshot_name='my_snapshot', timeout=300): end_time = time.time() + timeout snap_params = types.Snapshot(description=snapshot_name, vm=self.instance) - logging.info('Creating a snapshot %s for VM %s' - % (snapshot_name, self.name)) + LOG.info('Creating a snapshot %s for VM %s' + % (snapshot_name, self.name)) self.instance.snapshots.add(snap_params) - logging.info('Waiting for snapshot creation to finish') + LOG.info('Waiting for snapshot creation to finish') vm_snapsnop = False while time.time() < end_time: if self.state() != 'image_locked': @@ -450,7 +452,7 @@ def snapshot(self, snapshot_name='my_snapshot', timeout=300): time.sleep(1) if not vm_snapsnop: raise WaitVMStateTimeoutError("SNAPSHOT", self.state()) - logging.info('Snapshot was created successfully') + LOG.info('Snapshot was created successfully') def create_template(self, cluster_name, template_name='my_template', timeout=300): """ @@ -467,10 +469,10 @@ def create_template(self, cluster_name, template_name='my_template', timeout=300 vm=self.instance, cluster=cluster) try: - logging.info('Creating a template %s from VM %s' - % (template_name, self.name)) + LOG.info('Creating a template %s from VM %s' + % (template_name, self.name)) self.connection.templates.add(tmpl_params) - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') vm_down = False while time.time() < end_time: if self.is_dead(): @@ -480,7 +482,7 @@ def create_template(self, cluster_name, template_name='my_template', timeout=300 if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) except Exception as e: - logging.error('Failed to create a template from VM:\n%s' % str(e)) + LOG.error('Failed to create a template from VM:\n%s' % str(e)) def add(self, memory, disk_size, cluster_name, storage_name, nic_name='eth0', network_interface='virtio', @@ -525,16 +527,16 @@ def add(self, memory, disk_size, cluster_name, storage_name, bootable=True) try: - logging.info('Creating a VM %s' % self.name) + LOG.info('Creating a VM %s' % self.name) self.connection.vms.add(vm_params) - logging.info('NIC is added to VM %s' % self.name) + LOG.info('NIC is added to VM %s' % self.name) self.instance.nics.add(nic_params) - logging.info('Disk is added to VM %s' % self.name) + LOG.info('Disk is added to VM %s' % self.name) self.instance.disks.add(disk_params) - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') vm_down = False while time.time() < end_time: if self.is_dead(): @@ -544,7 +546,7 @@ def add(self, memory, disk_size, cluster_name, storage_name, if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) except Exception as e: - logging.error('Failed to create VM with disk and NIC\n%s' % str(e)) + LOG.error('Failed to create VM with disk and NIC\n%s' % str(e)) def add_vm_from_template(self, cluster_name, template_name='Blank', new_name='my_new_vm', timeout=300): @@ -561,10 +563,10 @@ def add_vm_from_template(self, cluster_name, template_name='Blank', cluster=self.connection.clusters.get(cluster_name), template=self.connection.templates.get(template_name)) try: - logging.info('Creating a VM %s from template %s' - % (new_name, template_name)) + LOG.info('Creating a VM %s from template %s' + % (new_name, template_name)) self.connection.vms.add(vm_params) - logging.info('Waiting for VM to reach status') + LOG.info('Waiting for VM to reach status') vm_down = False while time.time() < end_time: if self.is_dead(): @@ -573,9 +575,9 @@ def add_vm_from_template(self, cluster_name, template_name='Blank', time.sleep(1) if not vm_down: raise WaitVMStateTimeoutError("DOWN", self.state()) - logging.info('VM was created from template successfully') + LOG.info('VM was created from template successfully') except Exception as e: - logging.error('Failed to create VM from template:\n%s' % str(e)) + LOG.error('Failed to create VM from template:\n%s' % str(e)) def get_address(self, index=0, *args): """ @@ -626,13 +628,13 @@ def list(self): """ dc_list = [] try: - logging.info('List Data centers') + LOG.info('List Data centers') dcs = self.dcs_service.list(search='name=%s' % self.name) for i in range(len(dcs)): dc_list.append(dcs[i].name) return dc_list except Exception as e: - logging.error('Failed to get data centers:\n%s' % str(e)) + LOG.error('Failed to get data centers:\n%s' % str(e)) def add(self, storage_type): """ @@ -641,12 +643,12 @@ def add(self, storage_type): if not self.name: self.name = "my_datacenter" try: - logging.info('Creating a %s type datacenter %s' - % (storage_type, self.name)) + LOG.info('Creating a %s type datacenter %s' + % (storage_type, self.name)) if self.dcs_service.add(types.DataCenter(name=self.name, storage_type=storage_type, version=self.version)): - logging.info('Data center was created successfully') + LOG.info('Data center was created successfully') except Exception as e: - logging.error('Failed to create data center:\n%s' % str(e)) + LOG.error('Failed to create data center:\n%s' % str(e)) class ClusterManager(object): @@ -670,13 +672,13 @@ def list(self): """ cluster_list = [] try: - logging.info('List clusters') + LOG.info('List clusters') clusters = self.clusters_service.list() for i in range(len(clusters)): cluster_list.append(clusters[i].name) return cluster_list except Exception as e: - logging.error('Failed to get clusters:\n%s' % str(e)) + LOG.error('Failed to get clusters:\n%s' % str(e)) def add(self, dc_name, cpu_type='Intel Nehalem Family'): """ @@ -687,15 +689,15 @@ def add(self, dc_name, cpu_type='Intel Nehalem Family'): dc = self.connection.system_service().data_centers_service().list(search='name=%s' % dc_name)[0] try: - logging.info('Creating a cluster %s in datacenter %s' - % (self.name, dc_name)) + LOG.info('Creating a cluster %s in datacenter %s' + % (self.name, dc_name)) if self.clusters_service.add(types.Cluster(name=self.name, cpu=types.CPU(id=cpu_type), data_center=dc, version=self.version)): - logging.info('Cluster was created successfully') + LOG.info('Cluster was created successfully') except Exception as e: - logging.error('Failed to create cluster:\n%s' % str(e)) + LOG.error('Failed to create cluster:\n%s' % str(e)) class HostManager(object): @@ -719,13 +721,13 @@ def list(self): """ host_list = [] try: - logging.info('List hosts') + LOG.info('List hosts') hosts = self.hosts_service.list() for i in range(len(hosts)): host_list.append(hosts[i].name) return host_list except Exception as e: - logging.error('Failed to get hosts:\n%s' % str(e)) + LOG.error('Failed to get hosts:\n%s' % str(e)) def state(self): """ @@ -734,7 +736,7 @@ def state(self): try: return self.instance.status.state except Exception as e: - logging.error('Failed to get %s status:\n%s' % (self.name, str(e))) + LOG.error('Failed to get %s status:\n%s' % (self.name, str(e))) def add(self, host_address, host_password, cluster_name, timeout=300): """ @@ -749,10 +751,10 @@ def add(self, host_address, host_password, cluster_name, timeout=300): host_params = types.Host(name=self.name, address=host_address, cluster=clusters, root_password=host_password) try: - logging.info('Registing a host %s into cluster %s' - % (self.name, cluster_name)) + LOG.info('Registing a host %s into cluster %s' + % (self.name, cluster_name)) if self.hosts_service.add(host_params): - logging.info('Waiting for host to reach the status ...') + LOG.info('Waiting for host to reach the status ...') host_up = False while time.time() < end_time: if self.state() == types.VmStatus.UP: @@ -761,20 +763,20 @@ def add(self, host_address, host_password, cluster_name, timeout=300): time.sleep(1) if not host_up: raise WaitHostStateTimeoutError("UP", self.state()) - logging.info('Host was installed successfully') + LOG.info('Host was installed successfully') except Exception as e: - logging.error('Failed to install host:\n%s' % str(e)) + LOG.error('Failed to install host:\n%s' % str(e)) def get_address(self): """ Return host IP address. """ try: - logging.info('Get host %s IP' % self.name) + LOG.info('Get host %s IP' % self.name) return self.instance.get_address() except Exception as e: - logging.error('Failed to get host %s IP address:\n%s' % - (self.name, str(e))) + LOG.error('Failed to get host %s IP address:\n%s' % + (self.name, str(e))) class StorageDomainManager(object): @@ -798,13 +800,13 @@ def list(self): """ storage_list = [] try: - logging.info('List storage domains') + LOG.info('List storage domains') storages = self.sds_service.list() for i in range(len(storages)): storage_list.append(storages[i].name) return storage_list except Exception as e: - logging.error('Failed to get storage domains:\n%s' % str(e)) + LOG.error('Failed to get storage domains:\n%s' % str(e)) def attach_iso_export_domain_into_datacenter(self, address, path, dc_name, host_name, @@ -835,22 +837,19 @@ def attach_iso_export_domain_into_datacenter(self, address, path, storage=storage_params) try: - logging.info('Create/import ISO storage domain %s' % name) + LOG.info('Create/import ISO storage domain %s' % name) if self.api.storagedomains.add(storage_domain__params): - logging.info('%s domain was created/imported successfully' - % domain_type) + LOG.info('%s domain was created/imported successfully' + % domain_type) - logging.info('Attach ISO storage domain %s' % name) + LOG.info('Attach ISO storage domain %s' % name) if self.api.datacenters.get(dc_name).storagedomains.add( self.api.storagedomains.get(name)): - logging.info('%s domain was attached successfully' - % domain_type) + LOG.info('%s domain was attached successfully' % domain_type) - logging.info('Activate ISO storage domain %s' % name) + LOG.info('Activate ISO storage domain %s' % name) if self.api.datacenters.get(dc_name).storagedomains.get( name).activate(): - logging.info('%s domain was activated successfully' - % domain_type) + LOG.info('%s domain was activated successfully' % domain_type) except Exception as e: - logging.error('Failed to add %s domain:\n%s' - % (domain_type, str(e))) + LOG.error('Failed to add %s domain:\n%s' % (domain_type, str(e))) diff --git a/virttest/ovs_utils.py b/virttest/ovs_utils.py index d25edb06c8..828463e97d 100644 --- a/virttest/ovs_utils.py +++ b/virttest/ovs_utils.py @@ -9,6 +9,9 @@ from . import utils_net +LOG = logging.getLogger('avocado.' + __name__) + + class Machine(object): def __init__(self, vm=None, src=None): @@ -181,7 +184,7 @@ def compile_autotools_app_tar(self, path, package_name): :param path: Path where shoule be program compiled. :param dst_dir: Installation path. """ - logging.debug("Install %s to %s.", package_name, self.src) + LOG.debug("Install %s to %s.", package_name, self.src) self.prepare_directory(self.src) pack_dir = None diff --git a/virttest/postprocess_iozone.py b/virttest/postprocess_iozone.py index 10c6d57a0e..7fac79c924 100755 --- a/virttest/postprocess_iozone.py +++ b/virttest/postprocess_iozone.py @@ -22,6 +22,8 @@ from . import utils_misc +LOG = logging.getLogger('avocado.' + __name__) + _LABELS = ['file_size', 'record_size', 'write', 'rewrite', 'read', 'reread', 'randread', 'randwrite', 'bkwdread', 'recordrewrite', 'strideread', 'fwrite', 'frewrite', 'fread', 'freread'] @@ -101,7 +103,7 @@ def __init__(self, list_files, output_dir): if not os.path.isdir(output_dir): os.makedirs(output_dir) self.output_dir = output_dir - logging.info("Results will be stored in %s", output_dir) + LOG.info("Results will be stored in %s", output_dir) def average_performance(self, results, size=None): """ @@ -319,7 +321,7 @@ def analyze(self): file_size = [] for file_path in self.list_files: fileobj = open(file_path, 'r') - logging.info('FILE: %s', file_path) + LOG.info('FILE: %s', file_path) results = self.parse_file(fileobj) @@ -354,8 +356,7 @@ def __init__(self, results_file, output_dir): try: self.gnuplot = path.find_command("gnuplot") except path.CmdNotFoundError: - logging.error("Command gnuplot not found, disabling graph " - "generation") + LOG.error("Command gnuplot not found, disabling graph generation") self.active = False if not os.path.isdir(output_dir): @@ -363,8 +364,8 @@ def __init__(self, results_file, output_dir): self.output_dir = output_dir if not os.path.isfile(results_file): - logging.error("Invalid file %s provided, disabling graph " - "generation", results_file) + LOG.error("Invalid file %s provided, disabling graph generation", + results_file) self.active = False self.results_file = None else: @@ -415,8 +416,8 @@ def plot_2d_graphs(self): try: process.system("%s %s" % (self.gnuplot, commands_path)) except process.CmdError: - logging.error("Problem plotting from commands file %s", - commands_path) + LOG.error("Problem plotting from commands file %s", + commands_path) def plot_3d_graphs(self): """ @@ -453,8 +454,8 @@ def plot_3d_graphs(self): try: process.system("%s %s" % (self.gnuplot, commands_path)) except process.CmdError: - logging.error("Problem plotting from commands file %s", - commands_path) + LOG.error("Problem plotting from commands file %s", + commands_path) def plot_all(self): """ diff --git a/virttest/ppm_utils.py b/virttest/ppm_utils.py index 85c38d6e3c..771bf13a93 100644 --- a/virttest/ppm_utils.py +++ b/virttest/ppm_utils.py @@ -41,6 +41,8 @@ # Some directory/filename utils, for consistency +LOG = logging.getLogger('avocado.' + __name__) + def _md5eval(data): """ @@ -382,7 +384,7 @@ def image_crop_save(image, new_image, box=None): try: img.crop(box).save(new_image) except (KeyError, SystemError) as e: - logging.error("Fail to crop image: %s", e) + LOG.error("Fail to crop image: %s", e) return False return True diff --git a/virttest/qemu_devices/qcontainer.py b/virttest/qemu_devices/qcontainer.py index 74244a42c2..1c2807f041 100644 --- a/virttest/qemu_devices/qcontainer.py +++ b/virttest/qemu_devices/qcontainer.py @@ -46,6 +46,8 @@ from virttest.qemu_capabilities import Flags, Capabilities, MigrationParams from virttest.utils_version import VersionInterval +LOG = logging.getLogger('avocado.' + __name__) + # # Device container (device representation of VM) # This class represents VM by storing all devices and their connections (buses) @@ -1137,8 +1139,8 @@ def machine_q35(cmd=False): :param cmd: If set uses "-M $cmd" to force this machine type :return: List of added devices (including default buses) """ - logging.warn('Using Q35 machine which is not yet fully tested on ' - 'avocado-vt. False errors might occur.') + LOG.warn('Using Q35 machine which is not yet fully tested on ' + 'avocado-vt. False errors might occur.') devices = [] bus = (qdevices.QPCIEBus('pcie.0', 'PCIE', root_port_type, 'pci.0', pcie_root_port_params), @@ -1291,7 +1293,7 @@ def machine_arm64_mmio(cmd=False): :param cmd: If set uses "-M $cmd" to force this machine type :return: List of added devices (including default buses) """ - logging.warn('Support for aarch64 is highly experimental!') + LOG.warn('Support for aarch64 is highly experimental!') devices = [] # Add virtio-bus # TODO: Currently this uses QNoAddrCustomBus and does not @@ -1314,7 +1316,7 @@ def machine_arm64_pci(cmd=False): :param cmd: If set uses "-M $cmd" to force this machine type :return: List of added devices (including default buses) """ - logging.warn('Support for aarch64 is highly experimental!') + LOG.warn('Support for aarch64 is highly experimental!') devices = [] bus = (qdevices.QPCIEBus('pcie.0', 'PCIE', root_port_type, @@ -1367,7 +1369,7 @@ def machine_s390_virtio(cmd=False): # set the device's properties. This means that the qemu qtree # and autotest's representations are completely different and # can't be used. - logging.warn('Support for s390x is highly experimental!') + LOG.warn('Support for s390x is highly experimental!') bus = (qdevices.QNoAddrCustomBus('bus', [['addr'], [64]], 'virtio-blk-ccw', 'virtio-bus', 'virtio-blk-ccw'), @@ -1387,10 +1389,10 @@ def machine_riscv64_mmio(cmd=False): :param cmd: If set uses "-M $cmd" to force this machine type :return: List of added devices (including default buses) """ - logging.warn("Support for riscv64 is highly experimental. See " - "https://avocado-vt.readthedocs.io" - "/en/latest/Experimental.html#riscv64 for " - "setup information.") + LOG.warn("Support for riscv64 is highly experimental. See " + "https://avocado-vt.readthedocs.io" + "/en/latest/Experimental.html#riscv64 for " + "setup information.") devices = [] # Add virtio-bus # TODO: Currently this uses QNoAddrCustomBus and does not @@ -1412,8 +1414,8 @@ def machine_other(cmd=False): :param cmd: If set uses "-M $cmd" to force this machine type :return: List of added devices (including default buses) """ - logging.warn('Machine type isa/unknown is not supported by ' - 'avocado-vt. False errors might occur') + LOG.warn('Machine type isa/unknown is not supported by ' + 'avocado-vt. False errors might occur') devices = [] devices.append(qdevices.QStringDevice('machine', cmdline=cmd)) return devices @@ -1467,8 +1469,8 @@ def machine_other(cmd=False): # similar to i440fx one (1 PCI bus, ..) devices = machine_i440FX("-M %s" % machine_type) else: - raise exceptions.TestSkipError("Unsupported machine type %s." % - (machine_type)) + raise exceptions.TestSkipError("Unsupported machine type %s." + % (machine_type)) else: devices = None machine_opts = [] @@ -1484,12 +1486,12 @@ def machine_other(cmd=False): elif 'isapc' not in machine_type: # i440FX devices = machine_i440FX(cmd) else: # isapc (or other) - logging.warn('Machine isa/unknown is not supported by ' - 'avocado-vt. False errors might occur') + LOG.warn('Machine isa/unknown is not supported by ' + 'avocado-vt. False errors might occur') devices = machine_other(cmd) if not devices: - logging.warn('Unable to find the default machine type, using ' - 'i440FX') + LOG.warn('Unable to find the default machine type, using ' + 'i440FX') devices = machine_i440FX(cmd) if params.get("vm_pci_hole64_fix"): @@ -1627,8 +1629,8 @@ def usb_by_variables(self, usb_name, usb_type, controller_type, bus=None, cmdline='-usbdevice %s' % usb_name) else: device = qdevices.QStringDevice('missing-usb-%s' % usb_name) - logging.error("This qemu supports only tablet device; ignoring" - " %s", usb_name) + LOG.error("This qemu supports only tablet device; ignoring" + " %s", usb_name) return device def usb_by_params(self, usb_name, params): @@ -1928,8 +1930,8 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, use_device = self.has_option("device") if fmt == "scsi": # fmt=scsi force the old version of devices - logging.warn("'scsi' drive_format is deprecated, please use the " - "new lsi_scsi type for disk %s", name) + LOG.warn("'scsi' drive_format is deprecated, please use the " + "new lsi_scsi type for disk %s", name) use_device = False if not fmt: use_device = False @@ -1961,16 +1963,16 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, port = none_or_int(port) # Third level # Compatibility with old params - scsiid, lun if scsiid is not None: - logging.warn("drive_scsiid param is obsolete, use drive_unit " - "instead (disk %s)", name) + LOG.warn("drive_scsiid param is obsolete, use drive_unit " + "instead (disk %s)", name) unit = none_or_int(scsiid) if lun is not None: - logging.warn("drive_lun param is obsolete, use drive_port instead " - "(disk %s)", name) + LOG.warn("drive_lun param is obsolete, use drive_port instead " + "(disk %s)", name) port = none_or_int(lun) if pci_addr is not None and fmt == 'virtio': - logging.warn("drive_pci_addr is obsolete, use drive_bus instead " - "(disk %s)", name) + LOG.warn("drive_pci_addr is obsolete, use drive_bus instead " + "(disk %s)", name) bus = none_or_int(pci_addr) # @@ -1984,8 +1986,8 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, and (scsi_hba == 'lsi53c895a' or scsi_hba == 'spapr-vscsi'))): if not (bus is None and unit is None and port is None): - logging.warn("Using scsi interface without -device " - "support; ignoring bus/unit/port. (%s)", name) + LOG.warn("Using scsi interface without -device " + "support; ignoring bus/unit/port. (%s)", name) bus, unit, port = None, None, None # In case we hotplug, lsi wasn't added during the startup hook if arch.ARCH in ('ppc64', 'ppc64le'): @@ -1999,8 +2001,8 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, devices.extend(_[0]) elif fmt == "ide": if bus: - logging.warn('ide supports only 1 hba, use drive_unit to set' - 'ide.* for disk %s', name) + LOG.warn('ide supports only 1 hba, use drive_unit to set' + 'ide.* for disk %s', name) bus = unit dev_parent = {'type': 'IDE', 'atype': 'ide'} elif fmt == "ahci": @@ -2033,8 +2035,8 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, devices.extend(_) elif fmt in ('usb1', 'usb2', 'usb3'): if bus: - logging.warn('Manual setting of drive_bus is not yet supported' - ' for usb disk %s', name) + LOG.warn('Manual setting of drive_bus is not yet supported' + ' for usb disk %s', name) bus = None if fmt == 'usb1': dev_parent = {'type': 'uhci'} @@ -2126,8 +2128,8 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, if Flags.BLOCKDEV in self.caps: for opt, val in zip(('serial', 'boot'), (serial, boot)): if val is not None: - logging.warn("The command line option %s is not supported " - "on %s by -blockdev." % (opt, name)) + LOG.warn("The command line option %s is not supported " + "on %s by -blockdev." % (opt, name)) if media == 'cdrom': readonly = 'on' format_node.set_param('read-only', readonly, bool) @@ -2177,7 +2179,7 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, if 'aio' in self.get_help_text(): if aio == 'native' and snapshot == 'yes': - logging.warn('snapshot is on, fallback aio to threads.') + LOG.warn('snapshot is on, fallback aio to threads.') aio = 'threads' if Flags.BLOCKDEV in self.caps: if isinstance(protocol_node, (qdevices.QBlockdevProtocolFile, @@ -2323,8 +2325,8 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, devices[-1].set_param('addr', pci_addr) devices[-1].parent_bus = (pci_bus,) if not media == 'cdrom': - logging.warn("Using -drive fmt=xxx for %s is unsupported " - "method, false errors might occur.", name) + LOG.warn("Using -drive fmt=xxx for %s is unsupported " + "method, false errors might occur.", name) return devices # @@ -2370,10 +2372,10 @@ def define_hbas(qtype, atype, bus, unit, port, qbus, pci_bus, iothread, devices[-1] = qdevices.QFloppy(unit, 'drive_%s' % name, name, ({'busid': 'drive_%s' % name}, {'type': fmt})) else: - logging.warn('Using default device handling (disk %s)', name) + LOG.warn('Using default device handling (disk %s)', name) devices[-1].set_param('driver', fmt) if force_fmt: - logging.info("Force to use %s for the device" % force_fmt) + LOG.info("Force to use %s for the device" % force_fmt) devices[-1].set_param('driver', force_fmt) # Get the supported options options = self.execute_qemu("-device %s,?" % devices[-1]['driver']) @@ -2455,7 +2457,7 @@ def images_define_by_params(self, name, image_params, media=None, sn_img = qemu_storage.QemuImg(sn_params, data_dir.get_data_dir(), sn) image_filename = sn_img.create(sn_params)[0] os.chmod(image_filename, stat.S_IRUSR | stat.S_IWUSR) - logging.info( + LOG.info( "'snapshot=on' is not supported by '-blockdev' but " "requested from the image '%s', imitating the behavior " "of '-drive' to keep compatibility", name) @@ -2574,7 +2576,7 @@ def serials_define_by_variables(self, serial_id, serial_type, chardev_id, bus = self.idx_of_next_named_bus(_hba) bus = self.list_missing_named_buses( _hba, 'SERIAL', bus + 1)[-1] - logging.debug("list missing named bus: %s", bus) + LOG.debug("list missing named bus: %s", bus) bus_params["id"] = bus devices.append( qdevices.QDevice(bus_type, @@ -2724,8 +2726,7 @@ def cdroms_define_by_params(self, name, image_params, media=None, scsi_hba = "virtio-scsi-ccw" if cd_format in (None, "ide"): if not self.get_buses({'atype': 'ide'}): - logging.warn("cd_format IDE not available, using AHCI " - "instead.") + LOG.warn("cd_format IDE not available, using AHCI instead.") cd_format = 'ahci' if scsi_hba == "virtio-scsi-pci": if "mmio" in image_params.get("machine_type"): @@ -2903,7 +2904,7 @@ def memory_define_by_params(self, params, name): params = params.object_params(name) devices = [] if not self.has_device("pc-dimm"): - logging.warn("'PC-DIMM' does not support by your qemu") + LOG.warn("'PC-DIMM' does not support by your qemu") return devices mem = self.memory_object_define_by_params(params, name) if mem: @@ -2959,7 +2960,7 @@ def input_define_by_params(self, params, name, bus=None): dev.set_param("id", "input_%s" % name) devices.append(dev) else: - logging.warn("'%s' is not supported by your qemu", driver) + LOG.warn("'%s' is not supported by your qemu", driver) return devices @@ -3083,7 +3084,7 @@ def _handle_log(line): try: utils_misc.log_line('%s_%s_swtpm_setup.log' % (self.vmname, name), line) except Exception as e: - logging.warn("Can't log %s_%s_swtpm_setup output: %s.", self.vmname, name, e) + LOG.warn("Can't log %s_%s_swtpm_setup output: %s.", self.vmname, name, e) def _emulator_setup(binary, extra_options=None): setup_cmd = binary @@ -3103,7 +3104,7 @@ def _emulator_setup(binary, extra_options=None): if extra_options: setup_cmd += extra_options - logging.info('Running TPM emulator setup command: %s.', setup_cmd) + LOG.info('Running TPM emulator setup command: %s.', setup_cmd) _process = aexpect.run_bg(setup_cmd, None, _handle_log, auto_close=False) status_ending = 'Ending vTPM manufacturing' _process.read_until_any_line_matches(status_ending, timeout=5) diff --git a/virttest/qemu_devices/qdevices.py b/virttest/qemu_devices/qdevices.py index 88f4065000..fac364c377 100644 --- a/virttest/qemu_devices/qdevices.py +++ b/virttest/qemu_devices/qdevices.py @@ -24,6 +24,8 @@ import six from six.moves import xrange +LOG = logging.getLogger('avocado.' + __name__) + def _convert_args(arg_dict): """ @@ -183,7 +185,7 @@ def __eq__(self, dev2, dynamic=True): if _ != getattr(dev2, check_attr)(): return False except Exception: - logging.error(traceback.format_exc()) + LOG.error(traceback.format_exc()) return False return True @@ -499,9 +501,9 @@ def set_param(self, option, value, option_type=None): Ignore addr parameters as they are not supported by old qemus """ if option == 'addr': - logging.warn("Ignoring 'addr=%s' parameter of %s due of old qemu" - ", PCI addresses might be messed up.", value, - self.str_short()) + LOG.warn("Ignoring 'addr=%s' parameter of %s due of old qemu" + ", PCI addresses might be messed up.", value, + self.str_short()) return return super(QOldDrive, self).set_param(option, value, option_type) @@ -1392,7 +1394,7 @@ def _query(self, monitor): return True except qemu_monitor.MonitorError as err: if "DeviceNotFound" in str(err): - logging.warning(err) + LOG.warning(err) return False raise err @@ -1791,21 +1793,21 @@ def start_daemon(self): start_until_timeout = self.get_param('start_until_timeout', 1) if cmd is None: - logging.warn('No provided command to start %s daemon.', name) + LOG.warn('No provided command to start %s daemon.', name) self._daemon_process = None if self.is_daemon_alive(): return - logging.info('Running %s daemon command %s.', name, cmd) + LOG.info('Running %s daemon command %s.', name, cmd) self._daemon_process = aexpect.run_bg(cmd, **run_bg_kwargs) if status_active: self._daemon_process.read_until_any_line_matches( status_active, timeout=read_until_timeout) else: time.sleep(start_until_timeout) - logging.info("Created %s daemon process with parent PID %d.", - name, self._daemon_process.get_pid()) + LOG.info("Created %s daemon process with parent PID %d.", + name, self._daemon_process.get_pid()) def stop_daemon(self): """Stop daemon.""" @@ -1895,7 +1897,7 @@ def _handle_log(self, line): try: utils_misc.log_line('%s-%s.log' % (self.get_qid(), name), line) except Exception as e: - logging.warn("Can't log %s-%s, output: '%s'.", self.get_qid(), name, e) + LOG.warn("Can't log %s-%s, output: '%s'.", self.get_qid(), name, e) def start_daemon(self): """Start the virtiofs daemon in background.""" diff --git a/virttest/qemu_installer.py b/virttest/qemu_installer.py index b104f0339f..634974374f 100644 --- a/virttest/qemu_installer.py +++ b/virttest/qemu_installer.py @@ -15,6 +15,8 @@ __all__ = ['GitRepoInstaller', 'LocalSourceDirInstaller', 'LocalSourceTarInstaller', 'RemoteSourceTarInstaller'] +LOG = logging.getLogger('avocado.' + __name__) + class QEMUBaseInstaller(base_installer.BaseInstaller): @@ -44,7 +46,7 @@ def _kill_qemu_processes(self): :return: None """ - logging.debug("Killing any qemu processes that might be left behind") + LOG.debug("Killing any qemu processes that might be left behind") process.system("pkill qemu", ignore_status=True) # Let's double check to see if some other process is holding /dev/kvm if os.path.isfile("/dev/kvm"): @@ -90,10 +92,10 @@ def _create_symlink_unittest(self): unittest_dst = os.path.join(self.test_builddir, "unittests") if os.path.lexists(unittest_dst): - logging.debug("Unlinking unittest dir") + LOG.debug("Unlinking unittest dir") os.unlink(unittest_dst) - logging.debug("Linking unittest dir") + LOG.debug("Linking unittest dir") os.symlink(unittest_src, unittest_dst) def _qemu_bin_exists_at_prefix(self): @@ -111,10 +113,10 @@ def _qemu_bin_exists_at_prefix(self): break if result is not None: - logging.debug('Found QEMU binary at %s', result) + LOG.debug('Found QEMU binary at %s', result) else: - logging.debug('Could not find QEMU binary at prefix %s', - self.install_prefix) + LOG.debug('Could not find QEMU binary at prefix %s', + self.install_prefix) return result @@ -127,11 +129,11 @@ def _qemu_img_bin_exists_at_prefix(self): qemu_img_bin_name = os.path.join(self.install_prefix, 'bin', self.QEMU_IMG_BIN) if os.path.isfile(qemu_img_bin_name): - logging.debug('Found qemu-img binary at %s', qemu_img_bin_name) + LOG.debug('Found qemu-img binary at %s', qemu_img_bin_name) return qemu_img_bin_name else: - logging.debug('Could not find qemu-img binary at prefix %s', - self.install_prefix) + LOG.debug('Could not find qemu-img binary at prefix %s', + self.install_prefix) return None def _qemu_io_bin_exists_at_prefix(self): @@ -143,11 +145,11 @@ def _qemu_io_bin_exists_at_prefix(self): qemu_io_bin_name = os.path.join(self.install_prefix, 'bin', self.QEMU_IO_BIN) if os.path.isfile(qemu_io_bin_name): - logging.debug('Found qemu-io binary at %s', qemu_io_bin_name) + LOG.debug('Found qemu-io binary at %s', qemu_io_bin_name) return qemu_io_bin_name else: - logging.debug('Could not find qemu-io binary at prefix %s', - self.install_prefix) + LOG.debug('Could not find qemu-io binary at prefix %s', + self.install_prefix) return None def _qemu_fs_proxy_bin_exists_at_prefix(self): @@ -159,12 +161,11 @@ def _qemu_fs_proxy_bin_exists_at_prefix(self): qemu_fs_proxy_bin_name = os.path.join(self.install_prefix, 'bin', self.QEMU_FS_PROXY_BIN) if os.path.isfile(qemu_fs_proxy_bin_name): - logging.debug('Found qemu fs proxy binary at %s', - qemu_fs_proxy_bin_name) + LOG.debug('Found qemu fs proxy binary at %s', qemu_fs_proxy_bin_name) return qemu_fs_proxy_bin_name else: - logging.debug('Could not find qemu fs proxy binary at prefix %s', - self.install_prefix) + LOG.debug('Could not find qemu fs proxy binary at prefix %s', + self.install_prefix) return None def _create_symlink_qemu(self): @@ -173,7 +174,7 @@ def _create_symlink_qemu(self): :return: None """ - logging.debug("Linking QEMU binaries") + LOG.debug("Linking QEMU binaries") qemu_dst = os.path.join(self.test_builddir, self.QEMU_BIN) qemu_img_dst = os.path.join(self.test_builddir, self.QEMU_IMG_BIN) @@ -203,7 +204,7 @@ def _create_symlink_qemu(self): if qemu_fs_proxy_bin is not None: os.symlink(qemu_fs_proxy_bin, qemu_fs_proxy_dst) else: - logging.warning('Qemu fs proxy path %s not found on source dir') + LOG.warning('Qemu fs proxy path %s not found on source dir') def _install_phase_init(self): """ diff --git a/virttest/qemu_qtree.py b/virttest/qemu_qtree.py index fc9b7576b9..2d38e792d0 100644 --- a/virttest/qemu_qtree.py +++ b/virttest/qemu_qtree.py @@ -18,6 +18,7 @@ import six +LOG = logging.getLogger('avocado.' + __name__) OFFSET_PER_LEVEL = 2 @@ -414,7 +415,7 @@ def parse_info_block(self, info): name = disk.get_qname() if name not in info: error_msg = "disk %s is in block but not in qtree" % name - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) missing += 1 continue @@ -423,7 +424,7 @@ def parse_info_block(self, info): for disk in self.disks: if disk.get_block() == {}: error_msg = "disk in qtree but not in info block\n%s" % disk - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) additional += 1 return (additional, missing) @@ -440,7 +441,7 @@ def generate_params(self): disk.generate_params() except ValueError: error_msg = "generate_params error: %s" % disk - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) err += 1 return err @@ -494,12 +495,12 @@ def check_guests_proc_scsi(self, info): proc_not_scsi += 1 for disk in disks.difference(scsis): error_msg = 'Disk %s is in qtree but not in /proc/scsi/scsi' % disk - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) additional += 1 for disk in scsis.difference(disks): error_msg = 'Disk %s is in /proc/scsi/scsi but not in qtree' % disk - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) missing += 1 return (additional, missing, qtree_not_scsi, proc_not_scsi) @@ -530,7 +531,7 @@ def check_drive_format(node, params): except AttributeError: error_msg = "Failed to check drive format," error_msg += " can't get parent of:\n%s" % node - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) if actual == 'virtio-scsi-device': # new name for virtio-scsi actual = 'virtio-scsi-pci' @@ -575,7 +576,7 @@ def check_drive_format(node, params): break if not current: error_msg = "Disk %s is not in qtree but is in params." % name - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) err += 1 continue @@ -585,7 +586,7 @@ def check_drive_format(node, params): out = check_drive_format(current_node, image_params) if out: error_msg = "Disk %s %s" % (qname, out) - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) err += 1 handled = True @@ -597,14 +598,14 @@ def check_drive_format(node, params): error_msg += "%s=%s doesn't match" % ( prop, current.get(prop)) error_msg += " params %s" % image_params.get(prop) - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) err += 1 disks.pop(qname) if disks: error_msg = "Some disks were in qtree but not in" error_msg += " autotest params: %s" % disks - logging.error(error_msg) + LOG.error(error_msg) self.errors.append(error_msg) err += 1 return err diff --git a/virttest/qemu_storage.py b/virttest/qemu_storage.py index a6897a8dd2..96bf0e2efa 100755 --- a/virttest/qemu_storage.py +++ b/virttest/qemu_storage.py @@ -23,6 +23,8 @@ from virttest import data_dir from virttest import error_context +LOG = logging.getLogger('avocado.' + __name__) + def filename_to_file_opts(filename): """Convert filename into file opts, used by both qemu-img and qemu-kvm""" @@ -777,22 +779,22 @@ def create(self, params, ignore_errors=False): if image_dirname and not os.path.isdir(image_dirname): e_msg = ("Parent directory of the image file %s does " "not exist" % self.image_filename) - logging.error(e_msg) - logging.error("This usually means a serious setup exceptions.") - logging.error("Please verify if your data dir contains the " - "expected directory structure") - logging.error("Backing data dir: %s", - data_dir.get_backing_data_dir()) - logging.error("Directory structure:") + LOG.error(e_msg) + LOG.error("This usually means a serious setup exceptions.") + LOG.error("Please verify if your data dir contains the " + "expected directory structure") + LOG.error("Backing data dir: %s", + data_dir.get_backing_data_dir()) + LOG.error("Directory structure:") for root, _, _ in os.walk(data_dir.get_backing_data_dir()): - logging.error(root) + LOG.error(root) - logging.warning("We'll try to proceed by creating the dir. " - "Other errors may ensue") + LOG.warning("We'll try to proceed by creating the dir. " + "Other errors may ensue") os.makedirs(image_dirname) msg = "Create image by command: %s" % qemu_img_cmd - error_context.context(msg, logging.info) + error_context.context(msg, LOG.info) cmd_result = process.run( qemu_img_cmd, shell=True, verbose=False, ignore_status=True) if cmd_result.exit_status != 0 and not ignore_errors: @@ -928,8 +930,8 @@ def convert(self, params, root_dir, cache_mode=None, convert_cmd = self.image_cmd + " " + \ self._cmd_formatter.format(self.convert_cmd, **cmd_dict) - logging.info("Convert image %s from %s to %s", self.image_filename, - self.image_format, convert_image.image_format) + LOG.info("Convert image %s from %s to %s", self.image_filename, + self.image_format, convert_image.image_format) process.run(convert_cmd) if convert_image.encryption_config.key_secret: convert_image.encryption_config.key_secret.save_to_file() @@ -979,8 +981,8 @@ def rebase(self, params, cache_mode=None, source_cache_mode=None): raise exceptions.TestError("Can not find the image parameters need" " for rebase.") - logging.info("Rebase snapshot %s to %s..." % (self.image_filename, - self.base_image_filename)) + LOG.info("Rebase snapshot %s to %s..." % (self.image_filename, + self.base_image_filename)) rebase_cmd = self.image_cmd + " " + \ self._cmd_formatter.format(self.rebase_cmd, **cmd_dict) process.run(rebase_cmd) @@ -1020,7 +1022,7 @@ def commit(self, params={}, cache_mode=None, base=None, drop=False): cmd_dict.pop("image_format") commit_cmd = self.image_cmd + " " + \ self._cmd_formatter.format(self.commit_cmd, **cmd_dict) - logging.info("Commit image %s" % self.image_filename) + LOG.info("Commit image %s" % self.image_filename) process.run(commit_cmd) return self.image_filename @@ -1098,12 +1100,12 @@ def remove(self): """ Remove an image file. """ - logging.debug("Removing image file %s", self.image_filename) + LOG.debug("Removing image file %s", self.image_filename) storage.file_remove(self.params, self.image_filename) if self.data_file: - logging.debug("Removing external data file of image %s", - self.data_file.image_filename) + LOG.debug("Removing external data file of image %s", + self.data_file.image_filename) storage.file_remove(self.data_file.params, self.data_file.image_filename) @@ -1135,7 +1137,7 @@ def info(self, force_share=False, output="human"): :param output: string of output format(`human`, `json`) """ - logging.debug("Run qemu-img info command on %s", self.image_filename) + LOG.debug("Run qemu-img info command on %s", self.image_filename) backing_chain = self.params.get("backing_chain") force_share &= self.cap_force_share cmd = self.image_cmd @@ -1161,7 +1163,7 @@ def info(self, force_share=False, output="human"): if "--backing-chain" in self.help_text: cmd += " --backing-chain" else: - logging.warn("'--backing-chain' option is not supported") + LOG.warn("'--backing-chain' option is not supported") if force_share: cmd += " -U" @@ -1175,7 +1177,7 @@ def info(self, force_share=False, output="human"): cmd += " %s --output=%s" % (image_filename, output) output = process.run(cmd, verbose=True).stdout_text else: - logging.debug("Image file %s not found", image_filename) + LOG.debug("Image file %s not found", image_filename) output = None return output @@ -1199,8 +1201,7 @@ def support_cmd(self, cmd): supports_cmd = True if cmd not in self.help_text: - logging.error("%s does not support command '%s'", self.image_cmd, - cmd) + LOG.error("%s does not support command '%s'", self.image_cmd, cmd) supports_cmd = False return supports_cmd @@ -1221,10 +1222,10 @@ def compare_images(self, image1, image2, strict_mode=False, compare_images = self.support_cmd("compare") force_share &= self.cap_force_share if not compare_images: - logging.warn("sub-command compare not supported by qemu-img") + LOG.warn("sub-command compare not supported by qemu-img") return None else: - logging.info("Comparing images %s and %s", image1, image2) + LOG.info("Comparing images %s and %s", image1, image2) compare_cmd = "%s compare" % self.image_cmd if force_share: compare_cmd += " -U" @@ -1235,11 +1236,10 @@ def compare_images(self, image1, image2, strict_mode=False, shell=True) if verbose: - logging.debug("Output from command: %s", - cmd_result.stdout_text) + LOG.debug("Output from command: %s", cmd_result.stdout_text) if cmd_result.exit_status == 0: - logging.info("Compared images are equal") + LOG.info("Compared images are equal") elif cmd_result.exit_status == 1: raise exceptions.TestFail("Compared images differ") else: @@ -1259,11 +1259,11 @@ def compare_to(self, target_image, source_cache_mode=None, :return: compare result [process.CmdResult] """ if not self.support_cmd("compare"): - logging.warn("qemu-img subcommand compare not supported") + LOG.warn("qemu-img subcommand compare not supported") return force_share &= self.cap_force_share - logging.info("compare image %s to image %s", - self.image_filename, target_image.image_filename) + LOG.info("compare image %s to image %s", + self.image_filename, target_image.image_filename) cmd_dict = { "image_format": self.image_format, @@ -1334,7 +1334,7 @@ def compare_to(self, target_image, source_cache_mode=None, result = process.run(compare_cmd, ignore_status=True, shell=True) if verbose: - logging.debug("compare output:\n%s", result.stdout_text) + LOG.debug("compare output:\n%s", result.stdout_text) return result @@ -1353,7 +1353,7 @@ def check(self, params, root_dir, force_share=False, output=None): :return: The output of check result if the image exists, or None. """ image_filename = self.image_filename - logging.debug("Checking image file %s", image_filename) + LOG.debug("Checking image file %s", image_filename) force_share &= self.cap_force_share cmd_dict = {"image_filename": image_filename, @@ -1410,7 +1410,7 @@ def check_image(self, params, root_dir, force_share=False): :raise VMImageCheckError: In case qemu-img check fails on the image. """ image_filename = self.image_filename - logging.debug("Checking image file %s", image_filename) + LOG.debug("Checking image file %s", image_filename) image_is_checkable = self.image_format in ['qcow2', 'qed'] force_share &= self.cap_force_share @@ -1420,18 +1420,17 @@ def check_image(self, params, root_dir, force_share=False): # FIXME: do we really need it? self.info(force_share) except process.CmdError: - logging.error("Error getting info from image %s", - image_filename) + LOG.error("Error getting info from image %s", image_filename) cmd_result = self.check(params, root_dir, force_share) # Error check, large chances of a non-fatal problem. # There are chances that bad data was skipped though if cmd_result.exit_status == 1: stdout = cmd_result.stdout_text for e_line in stdout.splitlines(): - logging.error("[stdout] %s", e_line) + LOG.error("[stdout] %s", e_line) stderr = cmd_result.stderr_text for e_line in stderr.splitlines(): - logging.error("[stderr] %s", e_line) + LOG.error("[stderr] %s", e_line) chk = params.get("backup_image_on_check_error", "no") if chk == "yes": self.backup_image(params, root_dir, "backup", False) @@ -1444,10 +1443,10 @@ def check_image(self, params, root_dir, force_share=False): elif cmd_result.exit_status == 2: stdout = cmd_result.stdout_text for e_line in stdout.splitlines(): - logging.error("[stdout] %s", e_line) + LOG.error("[stdout] %s", e_line) stderr = cmd_result.stderr_text for e_line in stderr.splitlines(): - logging.error("[stderr] %s", e_line) + LOG.error("[stderr] %s", e_line) chk = params.get("backup_image_on_check_error", "no") if chk == "yes": self.backup_image(params, root_dir, "backup", False) @@ -1461,10 +1460,10 @@ def check_image(self, params, root_dir, force_share=False): "though. (%s)" % image_filename) else: if not storage.file_exists(params, image_filename): - logging.debug("Image file %s not found, skipping check", - image_filename) + LOG.debug("Image file %s not found, skipping check", + image_filename) elif not image_is_checkable: - logging.debug( + LOG.debug( "Image format %s is not checkable, skipping check", self.image_format) @@ -1545,7 +1544,7 @@ def amend(self, params, cache_mode=None, ignore_status=False): self.params, self.root_dir)) else: cmd_list.append("-f %s %s" % (self.image_format, self.image_filename)) - logging.info("Amend image %s" % self.image_filename) + LOG.info("Amend image %s" % self.image_filename) cmd_result = process.run(" ".join(cmd_list), ignore_status=ignore_status) return cmd_result @@ -1725,8 +1724,7 @@ def setup(self): Access the iscsi target. And return the local raw device name. """ if self.iscsidevice.logged_in(): - logging.warn("Session already present. Don't need to" - " login again") + LOG.warn("Session already present. Don't need to login again") else: self.iscsidevice.login() @@ -1748,11 +1746,11 @@ def cleanup(self): if self.exec_cleanup: self.iscsidevice.cleanup() if self.emulated_file_remove: - logging.debug("Removing file %s", self.emulated_image) + LOG.debug("Removing file %s", self.emulated_image) if os.path.exists(self.emulated_image): os.unlink(self.emulated_image) else: - logging.debug("File %s not found", self.emulated_image) + LOG.debug("File %s not found", self.emulated_image) class LVMdev(storage.LVMdev): diff --git a/virttest/qemu_virtio_port.py b/virttest/qemu_virtio_port.py index add63262ea..42069ed045 100644 --- a/virttest/qemu_virtio_port.py +++ b/virttest/qemu_virtio_port.py @@ -25,6 +25,8 @@ SOCKET_SIZE = 2048 +LOG = logging.getLogger('avocado.' + __name__) + class VirtioPortException(Exception): @@ -71,8 +73,8 @@ def __getstate__(self): """ # TODO: add port cleanup into qemu_vm.py if self.is_open(): - logging.warn("Force closing virtio_port socket, FIX the code to " - " close the socket prior this to avoid possible err.") + LOG.warn("Force closing virtio_port socket, FIX the code to " + " close the socket prior this to avoid possible err.") self.close() return self.__dict__.copy() @@ -128,14 +130,14 @@ def clean_port(self): elif not self.port_was_opened: # BUG: Don't even try opening port which was never used. It # hangs for ever... (virtio_console bug) - logging.debug("No need to clean port %s", self) + LOG.debug("No need to clean port %s", self) return - logging.debug("Cleaning port %s", self) + LOG.debug("Cleaning port %s", self) self.open() ret = select.select([self.sock], [], [], 1.0) if ret[0]: buf = self.sock.recv(1024) - logging.debug("Rest in socket: " + repr(buf)) + LOG.debug("Rest in socket: " + repr(buf)) def close(self): """ @@ -244,22 +246,21 @@ def __init__(self, vm): # set echo off (self.cmd() mustn't contain C:) self.session.sendline("echo off") # Compile worker - logging.debug("Compile %s on guest %s", guest_script_py, - self.vm.name) + LOG.debug("Compile %s on guest %s", guest_script_py, self.vm.name) try: self.cmd(cmd_compile, timeout) except VirtioPortException: if not self.os_linux: - logging.error("Script execution failed, do you have python" - " and pywin32 installed? Currently this " - "needs to be done manually!") + LOG.error("Script execution failed, do you have python" + " and pywin32 installed? Currently this " + "needs to be done manually!") raise self.session.sendline() # set echo off (self.cmd() mustn't contain C:) self.session.sendline("echo off") - logging.debug("Starting %so on guest %s", guest_script_py, - self.vm.name) + LOG.debug("Starting %so on guest %s", guest_script_py, + self.vm.name) self._execute_worker(timeout) self._init_guest(timeout) @@ -269,9 +270,9 @@ def _execute_worker(self, timeout=10): self.cmd(self.__cmd_execute_worker, timeout) except VirtioPortException: if not self.os_linux: - logging.error("Script execution failed, do you have python" - " and pywin32 installed? Currently this " - "needs to be done manually!") + LOG.error("Script execution failed, do you have python" + " and pywin32 installed? Currently this " + "needs to be done manually!") raise # Let the system rest # FIXME: Is this always necessarily? @@ -320,8 +321,8 @@ def _cmd(self, cmd, timeout=10, patterns=None): """ if not patterns: patterns = ("^PASS:", "^FAIL:") - logging.debug("Executing '%s' on virtio_console_guest.py," - " vm: %s, timeout: %s", cmd, self.vm.name, timeout) + LOG.debug("Executing '%s' on virtio_console_guest.py," + " vm: %s, timeout: %s", cmd, self.vm.name, timeout) self.session.sendline(cmd) try: (match, data) = self.session.read_until_any_line_matches(patterns, @@ -380,7 +381,7 @@ def safe_exit_loopback_threads(self, send_pts, recv_pts): match, tmp = self._cmd("virt.exit_threads()", 3, ("^PASS: All threads" " finished",)) if match is None: - logging.warn("Workaround the stuck thread on guest") + LOG.warn("Workaround the stuck thread on guest") # Thread is stuck in read/write for send_pt in send_pts: timeout = None @@ -405,7 +406,7 @@ def safe_exit_loopback_threads(self, send_pts, recv_pts): match, tmp = self._cmd("print('PASS: nothing')", 10, ('^PASS: nothing', '^FAIL:')) if match != 0: - logging.error("Python is stuck/FAILed after read-out:\n%s", tmp) + LOG.error("Python is stuck/FAILed after read-out:\n%s", tmp) try: self.session.close() self.session = self.vm.wait_for_login() @@ -416,7 +417,7 @@ def safe_exit_loopback_threads(self, send_pts, recv_pts): self._execute_worker() self._init_guest() except Exception as inst: - logging.error(inst) + LOG.error(inst) raise VirtioPortFatalException("virtio-console driver is " "irreparably blocked, further tests might FAIL.") @@ -429,8 +430,8 @@ def cleanup_ports(self): # Check if python is still alive match, tmp = self._cmd("is_alive()", 10) if match != 0: - logging.error("Python died/is stuck/have remaining threads") - logging.debug(tmp) + LOG.error("Python died/is stuck/have remaining threads") + LOG.debug(tmp) try: self.vm.verify_kernel_crash() @@ -440,7 +441,7 @@ def cleanup_ports(self): self.session = self.vm.wait_for_login() # On windows it dies with the connection if match != 0 and self.os_linux: - logging.debug(tmp) + LOG.debug(tmp) self.cmd("killall -9 `command -v python python3 | head -1` " "&& echo -n PASS: python killed" "|| echo -n PASS: python was already dead", 10) @@ -450,7 +451,7 @@ def cleanup_ports(self): self._cleanup_ports() except Exception as inst: - logging.error(inst) + LOG.error(inst) raise VirtioPortFatalException("virtio-console driver is " "irreparably blocked, further tests might FAIL.") @@ -467,8 +468,8 @@ def cleanup(self): self.session.close() # On windows it dies with the connection if match != 0 and self.os_linux: - logging.warn('guest_worker stuck during cleanup:\n%s\n,' - ' killing python...', tmp) + LOG.warn('guest_worker stuck during cleanup:\n%s\n,' + ' killing python...', tmp) self.session = self.vm.wait_for_login() self.cmd("killall -9 `command -v python python3 | head -1` " "&& echo -n PASS: python killed" @@ -496,8 +497,7 @@ def __init__(self, port, data, exit_event, quiet=False): # FIXME: socket.send(data>>127998) without read blocks thread if len(data) > 102400: data = data[0:102400] - logging.error("Data is too long, using only first %d bytes", - len(data)) + LOG.error("Data is too long, using only first %d bytes", len(data)) self.data = data self.exitevent = exit_event self.idx = 0 @@ -505,16 +505,15 @@ def __init__(self, port, data, exit_event, quiet=False): self.ret_code = 1 # sets to 0 when finish properly def run(self): - logging.debug("ThSend %s: run", self.getName()) + LOG.debug("ThSend %s: run", self.getName()) try: while not self.exitevent.isSet(): self.idx += self.port.send(self.data) - logging.debug("ThSend %s: exit(%d)", self.getName(), - self.idx) + LOG.debug("ThSend %s: exit(%d)", self.getName(), self.idx) except Exception as ints: if not self.quiet: raise ints - logging.debug(ints) + LOG.debug(ints) self.ret_code = 0 @@ -540,8 +539,7 @@ def __init__(self, port, exit_event, queues, blocklen=1024, # FIXME: socket.send(data>>127998) without read blocks thread if blocklen > 102400: blocklen = 102400 - logging.error("Data is too long, using blocklen = %d", - blocklen) + LOG.error("Data is too long, using blocklen = %d", blocklen) self.blocklen = blocklen self.exitevent = exit_event self.migrate_event = migrate_event @@ -550,7 +548,7 @@ def __init__(self, port, exit_event, queues, blocklen=1024, self.reduced_set = reduced_set def run(self): - logging.debug("ThSendCheck %s: run", self.getName()) + LOG.debug("ThSendCheck %s: run", self.getName()) _err_msg_exception = ('ThSendCheck ' + str(self.getName()) + ': Got ' 'exception %s, continuing') _err_msg_disconnect = ('ThSendCheck ' + str(self.getName()) + ': Port ' @@ -580,14 +578,14 @@ def run(self): " is expected behavior set migrate_event " "to support reconnection." % self.getName()) if self.port.sock is None: - logging.debug(_err_msg_disconnect) + LOG.debug(_err_msg_disconnect) while self.port.sock is None: if self.exitevent.isSet(): break time.sleep(0.1) - logging.debug(_err_msg_reconnect) + LOG.debug(_err_msg_reconnect) else: - logging.debug(_err_msg_exception, inst) + LOG.debug(_err_msg_exception, inst) continue if ret[1]: # Generate blocklen of random data add them to the FIFO @@ -614,8 +612,8 @@ def run(self): "pipe. If this is expected behavior " "set migrate_event to support " "reconnection." % self.getName()) - logging.debug("ThSendCheck %s: Broken pipe " - ", reconnecting. ", self.getName()) + LOG.debug("ThSendCheck %s: Broken pipe " + ", reconnecting. ", self.getName()) attempt = 10 while (attempt > 1 and not self.exitevent.isSet()): @@ -625,8 +623,8 @@ def run(self): pass if self.exitevent.isSet(): break - logging.debug("ThSendCheck %s: Broken pipe resumed" - ", reconnecting...", self.getName()) + LOG.debug("ThSendCheck %s: Broken pipe resumed" + ", reconnecting...", self.getName()) self.port.sock = False self.port.open() try: @@ -638,11 +636,9 @@ def run(self): attempt = 0 buf = buf[idx:] self.idx += idx - logging.debug("ThSendCheck %s: exit(%d)", self.getName(), - self.idx) + LOG.debug("ThSendCheck %s: exit(%d)", self.getName(), self.idx) if too_much_data: - logging.error("ThSendCheck: working around the 'too_much_data'" - "bug") + LOG.error("ThSendCheck: working around the 'too_much_data' bug") self.ret_code = 0 @@ -670,7 +666,7 @@ def __init__(self, port, event, blocklen=1024, quiet=False): self.ret_code = 1 # sets to 0 when finish properly def run(self): - logging.debug("ThRecv %s: run", self.getName()) + LOG.debug("ThRecv %s: run", self.getName()) try: while not self.exitevent.isSet(): # TODO: Workaround, it didn't work with select :-/ @@ -679,11 +675,11 @@ def run(self): except socket.timeout: pass self.port.settimeout(self._port_timeout) - logging.debug("ThRecv %s: exit(%d)", self.getName(), self.idx) + LOG.debug("ThRecv %s: exit(%d)", self.getName(), self.idx) except Exception as ints: if not self.quiet: raise ints - logging.debug(ints) + LOG.debug(ints) self.ret_code = 0 @@ -730,8 +726,8 @@ def reload_loss_idx(self): """ if self.sendidx >= 0: self.minsendidx = min(self.minsendidx, self.sendidx) - logging.debug("ThRecvCheck %s: Previous data loss was %d.", - self.getName(), (self.sendlen - self.sendidx)) + LOG.debug("ThRecvCheck %s: Previous data loss was %d.", + self.getName(), (self.sendlen - self.sendidx)) self.sendidx = self.sendlen def run(self): @@ -741,8 +737,8 @@ def run(self): elif self.debug == 'normal' or not self.debug: self.run_normal() else: - logging.error('ThRecvCheck %s: Unsupported debug mode, using ' - 'normal mode.', self.getName()) + LOG.error('ThRecvCheck %s: Unsupported debug mode, using ' + 'normal mode.', self.getName()) self.run_normal() def run_normal(self): @@ -752,7 +748,7 @@ def run_normal(self): after host socket reconnection or you can overwrite this value from other thread. """ - logging.debug("ThRecvCheck %s: run", self.getName()) + LOG.debug("ThRecvCheck %s: run", self.getName()) _err_msg_missing_migrate_ev = ("ThRecvCheck %s: Broken pipe. If " "this is expected behavior set migrate_event to " "support reconnection." % self.getName()) @@ -769,14 +765,14 @@ def run_normal(self): except Exception as inst: # self.port is not yet set while reconnecting if self.port.sock is None: - logging.debug(_err_msg_disconnect) + LOG.debug(_err_msg_disconnect) while self.port.sock is None: if self.exitevent.isSet(): break time.sleep(0.1) - logging.debug(_err_msg_reconnect) + LOG.debug(_err_msg_reconnect) else: - logging.debug(_err_msg_exception, inst) + LOG.debug(_err_msg_exception, inst) continue if ret[0] and (not self.exitevent.isSet()): try: @@ -784,14 +780,14 @@ def run_normal(self): except Exception as inst: # self.port is not yet set while reconnecting if self.port.sock is None: - logging.debug(_err_msg_disconnect) + LOG.debug(_err_msg_disconnect) while self.port.sock is None: if self.exitevent.isSet(): break time.sleep(0.1) - logging.debug(_err_msg_reconnect) + LOG.debug(_err_msg_reconnect) else: - logging.debug(_err_msg_exception, inst) + LOG.debug(_err_msg_exception, inst) continue if buf: # Compare the received data with the control data @@ -809,30 +805,30 @@ def run_normal(self): _char = self.buff.popleft() else: self.exitevent.set() - logging.error("ThRecvCheck %s: " - "Failed to recv %dth " - "character", - self.getName(), self.idx) - logging.error("ThRecvCheck %s: " - "%s != %s", - self.getName(), - repr(char), repr(_char)) - logging.error("ThRecvCheck %s: " - "Recv = %s", - self.getName(), repr(buf)) + LOG.error("ThRecvCheck %s: " + "Failed to recv %dth " + "character", + self.getName(), self.idx) + LOG.error("ThRecvCheck %s: " + "%s != %s", + self.getName(), + repr(char), repr(_char)) + LOG.error("ThRecvCheck %s: " + "Recv = %s", + self.getName(), repr(buf)) # sender might change the buff :-( time.sleep(1) _char = b"" for buf in self.buff: _char += buf _char += b' ' - logging.error("ThRecvCheck %s: " - "Queue = %s", - self.getName(), repr(_char)) - logging.info("ThRecvCheck %s: " - "MaxSendIDX = %d", - self.getName(), - (self.sendlen - self.sendidx)) + LOG.error("ThRecvCheck %s: " + "Queue = %s", + self.getName(), repr(_char)) + LOG.info("ThRecvCheck %s: " + "MaxSendIDX = %d", + self.getName(), + (self.sendlen - self.sendidx)) raise exceptions.TestFail("ThRecvCheck %s: " "incorrect data" % self.getName()) @@ -845,8 +841,8 @@ def run_normal(self): self.exitevent.set() raise exceptions.TestFail( _err_msg_missing_migrate_ev) - logging.debug("ThRecvCheck %s: Broken pipe " - ", reconnecting. ", self.getName()) + LOG.debug("ThRecvCheck %s: Broken pipe " + ", reconnecting. ", self.getName()) self.reload_loss_idx() # Wait until main thread sets the new self.port while not (self.exitevent.isSet() or @@ -854,20 +850,19 @@ def run_normal(self): pass if self.exitevent.isSet(): break - logging.debug("ThRecvCheck %s: Broken pipe resumed, " - "reconnecting...", self.getName()) + LOG.debug("ThRecvCheck %s: Broken pipe resumed, " + "reconnecting...", self.getName()) self.port.sock = False self.port.open() if self.sendidx >= 0: self.minsendidx = min(self.minsendidx, self.sendidx) if (self.sendlen - self.minsendidx): - logging.error("ThRecvCheck %s: Data loss occurred during socket" - "reconnection. Maximal loss was %d per one " - "migration.", self.getName(), - (self.sendlen - self.minsendidx)) - logging.debug("ThRecvCheck %s: exit(%d)", self.getName(), - self.idx) + LOG.error("ThRecvCheck %s: Data loss occurred during socket" + "reconnection. Maximal loss was %d per one " + "migration.", self.getName(), + (self.sendlen - self.minsendidx)) + LOG.debug("ThRecvCheck %s: exit(%d)", self.getName(), self.idx) self.ret_code = 0 def run_debug(self): @@ -880,7 +875,7 @@ def run_debug(self): Unlike normal run this one supports booth - loss and duplications. It's not friendly to data corruption. """ - logging.debug("ThRecvCheck %s: run", self.getName()) + LOG.debug("ThRecvCheck %s: run", self.getName()) attempt = 10 max_loss = 0 sum_loss = 0 @@ -899,9 +894,9 @@ def run_debug(self): verif_buf.append(_char) else: # Detect the duplicated/lost characters. - logging.debug("ThRecvCheck %s: fail to receive " - "%dth character.", self.getName(), - self.idx) + LOG.debug("ThRecvCheck %s: fail to receive " + "%dth character.", self.getName(), + self.idx) buf = buf[idx_char:] for i in xrange(100): if len(self.buff) < self.sendidx: @@ -910,10 +905,10 @@ def run_debug(self): break sendidx = min(self.sendidx, len(self.buff)) if sendidx < self.sendidx: - logging.debug("ThRecvCheck %s: sendidx was " - "lowered as there is not enough " - "data after 1s. Using sendidx=" - "%s.", self.getName(), sendidx) + LOG.debug("ThRecvCheck %s: sendidx was " + "lowered as there is not enough " + "data after 1s. Using sendidx=" + "%s.", self.getName(), sendidx) for _ in xrange(sendidx // self.blocklen): if self.exitevent.isSet(): break @@ -939,9 +934,9 @@ def run_debug(self): self.sendidx -= offset_a max_loss = max(max_loss, offset_a) sum_loss += offset_a - logging.debug("ThRecvCheck %s: DUP %s (out of " - "%s)", self.getName(), offset_a, - sendidx) + LOG.debug("ThRecvCheck %s: DUP %s (out of " + "%s)", self.getName(), offset_a, + sendidx) buf = buf[offset_a + 1:] for _ in xrange(len(buf)): self.buff.popleft() @@ -950,9 +945,9 @@ def run_debug(self): elif offset_b: # Data loss max_loss = max(max_loss, offset_b) sum_loss += offset_b - logging.debug("ThRecvCheck %s: LOST %s (out of" - " %s)", self.getName(), offset_b, - sendidx) + LOG.debug("ThRecvCheck %s: LOST %s (out of" + " %s)", self.getName(), offset_b, + sendidx) # Pop-out the lost characters from verif_queue # (first one is already out) self.sendidx -= offset_b @@ -967,11 +962,11 @@ def run_debug(self): for _ in xrange(-min(sendidx, len(verif_buf)), 0): verif += verif_buf[_] - logging.error("ThRecvCheck %s: mismatched data" - ":\nverified: ..%s\nreceived: " - "%s\nsent: %s", - self.getName(), repr(verif), - repr(buf), repr(queue)) + LOG.error("ThRecvCheck %s: mismatched data" + ":\nverified: ..%s\nreceived: " + "%s\nsent: %s", + self.getName(), repr(verif), + repr(buf), repr(queue)) raise exceptions.TestFail("Recv and sendqueue " "don't match with any offset.") # buf was changed, break from this loop @@ -988,8 +983,8 @@ def run_debug(self): " If this is expected behavior set migrate" "_event to support reconnection." % self.getName()) - logging.debug("ThRecvCheck %s: Broken pipe " - ", reconnecting. ", self.getName()) + LOG.debug("ThRecvCheck %s: Broken pipe " + ", reconnecting. ", self.getName()) self.reload_loss_idx() # Wait until main thread sets the new self.port while not (self.exitevent.isSet() or @@ -997,22 +992,21 @@ def run_debug(self): pass if self.exitevent.isSet(): break - logging.debug("ThRecvCheck %s: Broken pipe resumed, " - "reconnecting...", self.getName()) + LOG.debug("ThRecvCheck %s: Broken pipe resumed, " + "reconnecting...", self.getName()) self.port.sock = False self.port.open() if self.sendidx >= 0: self.minsendidx = min(self.minsendidx, self.sendidx) if (self.sendlen - self.minsendidx): - logging.debug("ThRecvCheck %s: Data loss occurred during socket" - "reconnection. Maximal loss was %d per one " - "migration.", self.getName(), - (self.sendlen - self.minsendidx)) + LOG.debug("ThRecvCheck %s: Data loss occurred during socket" + "reconnection. Maximal loss was %d per one " + "migration.", self.getName(), + (self.sendlen - self.minsendidx)) if sum_loss > 0: - logging.debug("ThRecvCheck %s: Data offset detected, cumulative " - "err: %d, max err: %d(%d)", self.getName(), sum_loss, - max_loss, float(max_loss) / self.blocklen) - logging.debug("ThRecvCheck %s: exit(%d)", self.getName(), - self.idx) + LOG.debug("ThRecvCheck %s: Data offset detected, cumulative " + "err: %d, max err: %d(%d)", self.getName(), sum_loss, + max_loss, float(max_loss) / self.blocklen) + LOG.debug("ThRecvCheck %s: exit(%d)", self.getName(), self.idx) self.ret_code = 0 diff --git a/virttest/qemu_vm.py b/virttest/qemu_vm.py index 52bf074b7e..be2c16847a 100644 --- a/virttest/qemu_vm.py +++ b/virttest/qemu_vm.py @@ -7,7 +7,7 @@ from __future__ import division import time import os -import logging as log +import logging import fcntl import re import random @@ -52,7 +52,7 @@ # Using as lower capital is not the best way to do, but this is just a # workaround to avoid changing the entire file. -logging = log.getLogger('avocado.' + __name__) +LOG = logging.getLogger('avocado.' + __name__) class QemuSegFaultError(virt_vm.VMError): @@ -335,7 +335,7 @@ def verify_disk_image_bootable(self): try: seabios_log = self.logsessions['seabios'].get_output() if re.search(pattern, seabios_log, re.S): - logging.error("Can't boot guest from image.") + LOG.error("Can't boot guest from image.") # Set 'shutdown_command' to None to force autotest # shuts down guest with monitor. self.params["shutdown_command"] = None @@ -512,8 +512,8 @@ def add_human_monitor(devices, monitor_name, filename): def add_qmp_monitor(devices, monitor_name, filename): if not devices.has_option("qmp"): - logging.warn("Fallback to human monitor since qmp is" - " unsupported") + LOG.warn("Fallback to human monitor since qmp is" + " unsupported") return add_human_monitor(devices, monitor_name, filename) if not devices.has_option("chardev"): @@ -674,7 +674,7 @@ def add_net(devices, vlan, nettype, ifname=None, tftp=None, elif nettype == 'user': mode = 'user' else: - logging.warning("Unknown/unsupported nettype %s" % nettype) + LOG.warning("Unknown/unsupported nettype %s" % nettype) return '' if devices.has_option("netdev"): @@ -702,7 +702,7 @@ def add_net(devices, vlan, nettype, ifname=None, tftp=None, else: txt += " qemu do not support vhostfd." if txt: - logging.warn(txt) + LOG.warn(txt) # For negative test if add_vhostfd: cmd += ",vhostfd=%(vhostfd)s" @@ -860,7 +860,7 @@ def add_qemu_option(devices, name, optsinfo): device.set_param(key, val, vtype, False) devices.insert(device) else: - logging.warn("option '-%s' not supportted" % name) + LOG.warn("option '-%s' not supportted" % name) def add_pcidevice(devices, host, params, device_driver="pci-assign", pci_bus='pci.0'): @@ -886,7 +886,7 @@ def add_pcidevice(devices, host, params, device_driver="pci-assign", msg = ("parameter %s is not support in device pci-assign." " It only support following parameter:\n %s" % (", ".join(fail_param), pcidevice_help)) - logging.warn(msg) + LOG.warn(msg) devices.insert(dev) def add_virtio_rng(devices, rng_params, parent_bus="pci.0"): @@ -961,8 +961,8 @@ def add_memorys(devices, params): if not usable_mem_m: raise exceptions.TestError("Insufficient memory to" " start a VM.") - logging.info("Auto set guest memory size to %s MB" % - usable_mem_m) + LOG.info("Auto set guest memory size to %s MB" % + usable_mem_m) mem_size_m = usable_mem_m # vm_mem_limit(max) and vm_mem_minimum(min) take control here @@ -970,8 +970,8 @@ def add_memorys(devices, params): max_mem_size_m = params.get("vm_mem_limit") max_mem_size_m = float(normalize_data_size(max_mem_size_m)) if mem_size_m >= max_mem_size_m: - logging.info("Guest max memory is limited to %s" - % max_mem_size_m) + LOG.info("Guest max memory is limited to %s" + % max_mem_size_m) mem_size_m = max_mem_size_m if mem_params.get("vm_mem_minimum"): @@ -1277,11 +1277,11 @@ def add_boot(devices, opts): machine_type = params.get('machine_type', "") if (machine_type.startswith("arm") or machine_type.startswith('riscv')): - logging.warn("-boot on %s is usually not supported, use " - "bootindex instead.", machine_type) + LOG.warn("-boot on %s is usually not supported, use " + "bootindex instead.", machine_type) return "" if machine_type.startswith("s390"): - logging.warn("-boot on s390x only support boot strict=on") + LOG.warn("-boot on s390x only support boot strict=on") return "-boot strict=on" cmd = " -boot" options = [] @@ -1490,7 +1490,7 @@ def sort_key(dev): if params.get("numa_node"): numa_node = int(params.get("numa_node")) if len(utils_misc.get_node_cpus()) < int(params.get("smp", 1)): - logging.info("Skip pinning, no enough nodes") + LOG.info("Skip pinning, no enough nodes") elif numa_node < 0: n = utils_misc.NumaNode(numa_node) cmd += "numactl -m %s " % n.node_id @@ -1614,8 +1614,8 @@ def sort_key(dev): break else: cpu_model = model - logging.error("Non existing CPU model %s will be passed " - "to qemu (wrong config or negative test)", model) + LOG.error("Non existing CPU model %s will be passed " + "to qemu (wrong config or negative test)", model) if use_default_cpu_model: cpu_model = params.get("default_cpu_model", "") @@ -1649,7 +1649,7 @@ def sort_key(dev): not cpu_model.startswith('EPYC')): vcpu_threads = 1 txt = "Set vcpu_threads to 1 for AMD non-EPYC cpu." - logging.warn(txt) + LOG.warn(txt) smp_err = "" if vcpu_maxcpus != 0: @@ -1764,11 +1764,11 @@ def sort_key(dev): if params.get("numa_consistency_check_cpu_mem", "no") == "yes": if (numa_total_cpus > vcpu_maxcpus or numa_total_mem > int(mem) or len(params.objects("guest_numa_nodes")) > vcpu_maxcpus): - logging.debug("-numa need %s vcpu and %s memory. It is not " - "matched the -smp and -mem. The vcpu number " - "from -smp is %s, and memory size from -mem is" - " %s" % (numa_total_cpus, numa_total_mem, - vcpu_maxcpus, mem)) + LOG.debug("-numa need %s vcpu and %s memory. It is not " + "matched the -smp and -mem. The vcpu number " + "from -smp is %s, and memory size from -mem is" + " %s" % (numa_total_cpus, numa_total_mem, + vcpu_maxcpus, mem)) raise virt_vm.VMDeviceError("The numa node cfg can not fit" " smp and memory cfg.") @@ -1804,8 +1804,8 @@ def sort_key(dev): ctype = cpu_driver_items[cpu_driver_items.index("cpu") - 1] self.cpuinfo.qemu_type = ctype except ValueError: - logging.warning("Can not assign cpuinfo.type, assign as" - " 'unknown'") + LOG.warning("Can not assign cpuinfo.type, assign as" + " 'unknown'") self.cpuinfo.qemu_type = "unknown" cmd = add_cpu_flags(devices, cpu_model, flags, vendor, family) devices.insert(StrDev('cpu', cmdline=cmd)) @@ -1872,7 +1872,7 @@ def sort_key(dev): else: pvpanic = 'pvpanic' if not devices.has_device(pvpanic): - logging.warn("%s device is not supported", pvpanic) + LOG.warn("%s device is not supported", pvpanic) else: if pvpanic == 'pvpanic-pci': pvpanic_dev = qdevices.QDevice(pvpanic, @@ -1896,7 +1896,7 @@ def sort_key(dev): # Add vmcoreinfo device if params.get("vmcoreinfo") == "yes": if not devices.has_device("vmcoreinfo"): - logging.warn("vmcoreinfo device is not supported") + LOG.warn("vmcoreinfo device is not supported") else: vmcoreinfo_dev = qdevices.QDevice("vmcoreinfo") devices.insert(vmcoreinfo_dev) @@ -2423,10 +2423,10 @@ def sort_key(dev): if not params.get("vm_accelerator"): if (params.get("enable_kvm", "yes") == "no"): devices.insert(StrDev('nokvm', cmdline=disable_kvm_option)) - logging.debug("qemu will run in TCG mode") + LOG.debug("qemu will run in TCG mode") else: devices.insert(StrDev('kvm', cmdline=enable_kvm_option)) - logging.debug("qemu will run in KVM mode") + LOG.debug("qemu will run in KVM mode") compat = params.get("qemu_compat") if compat and devices.has_option("compat"): @@ -2545,9 +2545,9 @@ def sort_key(dev): root_port.set_param('addr', port_addr) devices.insert(root_port) except DeviceError: - logging.warning("No sufficient free slot for extra" - " root port, discarding %d of them" - % (extra_port_num - num)) + LOG.warning("No sufficient free slot for extra" + " root port, discarding %d of them" + % (extra_port_num - num)) break return devices, spice_options @@ -2567,8 +2567,8 @@ def _nic_tap_add_helper(self, nic): else: nic.tapfds = utils_net.open_tap("/dev/net/tun", nic.ifname, queues=nic.queues, vnet_hdr=True) - logging.debug("Adding VM %s NIC ifname %s to bridge %s", - self.name, nic.ifname, nic.netdst) + LOG.debug("Adding VM %s NIC ifname %s to bridge %s", + self.name, nic.ifname, nic.netdst) if nic.nettype == 'bridge': utils_net.add_to_bridge(nic.ifname, nic.netdst) utils_net.bring_up_ifname(nic.ifname) @@ -2576,12 +2576,12 @@ def _nic_tap_add_helper(self, nic): def _nic_tap_remove_helper(self, nic): try: if nic.nettype == 'macvtap': - logging.info("Remove macvtap ifname %s", nic.ifname) + LOG.info("Remove macvtap ifname %s", nic.ifname) tap = utils_net.Macvtap(nic.ifname) tap.delete() else: - logging.debug("Removing VM %s NIC ifname %s from bridge %s", - self.name, nic.ifname, nic.netdst) + LOG.debug("Removing VM %s NIC ifname %s from bridge %s", + self.name, nic.ifname, nic.netdst) if nic.tapfds: for i in nic.tapfds.split(':'): os.close(int(i)) @@ -2604,7 +2604,7 @@ def _create_serial_console(self): Note: requires a version of netcat that supports -U """ if self.serial_session_device is None: - logging.warning("No serial ports defined!") + LOG.warning("No serial ports defined!") return log_name = "serial-%s-%s.log" % ( self.serial_session_device, self.name) @@ -2678,9 +2678,9 @@ def update_system_dependent_devs(self): devs = devices.get_by_params({'netdev_id': netdev_id}) # TODO: Is every NIC a PCI device? if len(devs) > 1: - logging.error("There are %d devices with netdev_id %s." - " This shouldn't happens." % (len(devs), - netdev_id)) + LOG.error("There are %d devices with netdev_id %s." + " This shouldn't happens." % (len(devs), + netdev_id)) devs[0].params.update(net_params) def update_vga_global_default(self, params, migrate=None): @@ -2726,16 +2726,16 @@ def qmp_monitors(self): @property def spice_port(self): - logging.warning("'VM.spice_port' will be removed by the end of " - "the year 2017, please use 'self.spice_options." - "get(\"spice_port\")' instead") + LOG.warning("'VM.spice_port' will be removed by the end of " + "the year 2017, please use 'self.spice_options." + "get(\"spice_port\")' instead") return self.spice_options.get("spice_port") @property def spice_tls_port(self): - logging.warning("'VM.spice_tls_port' will be removed by the end of " - "the year 2017, please use 'self.spice_options." - "get(\"spice_tls_port\")' instead") + LOG.warning("'VM.spice_tls_port' will be removed by the end of " + "the year 2017, please use 'self.spice_options." + "get(\"spice_tls_port\")' instead") return self.spice_options.get("spice_tls_port") @error_context.context_aware @@ -2817,29 +2817,29 @@ def create(self, name=None, params=None, root_dir=None, compare = False if cdrom_params.get("skip_hash", "no") == "yes": - logging.debug("Skipping hash comparison") + LOG.debug("Skipping hash comparison") elif cdrom_params.get("md5sum_1m"): - logging.debug("Comparing expected MD5 sum with MD5 sum of " - "first MB of ISO file...") + LOG.debug("Comparing expected MD5 sum with MD5 sum of " + "first MB of ISO file...") actual_hash = crypto.hash_file(iso, 1048576, algorithm="md5") expected_hash = cdrom_params.get("md5sum_1m") compare = True elif cdrom_params.get("md5sum"): - logging.debug("Comparing expected MD5 sum with MD5 sum of " - "ISO file...") + LOG.debug("Comparing expected MD5 sum with MD5 sum of " + "ISO file...") actual_hash = crypto.hash_file(iso, algorithm="md5") expected_hash = cdrom_params.get("md5sum") compare = True elif cdrom_params.get("sha1sum"): - logging.debug("Comparing expected SHA1 sum with SHA1 sum " - "of ISO file...") + LOG.debug("Comparing expected SHA1 sum with SHA1 sum " + "of ISO file...") actual_hash = crypto.hash_file(iso, algorithm="sha1") expected_hash = cdrom_params.get("sha1sum") compare = True if compare: if actual_hash == expected_hash: - logging.debug("Hashes match") + LOG.debug("Hashes match") else: raise virt_vm.VMHashMismatchError(actual_hash, expected_hash) @@ -2929,8 +2929,8 @@ def create(self, name=None, params=None, root_dir=None, if mac_source: # Will raise exception if source doesn't # have corresponding nic - logging.debug("Copying mac for nic %s from VM %s" - % (nic.nic_name, mac_source.name)) + LOG.debug("Copying mac for nic %s from VM %s" + % (nic.nic_name, mac_source.name)) nic.mac = mac_source.get_mac_address(nic.nic_name) if nic.ifname in utils_net.get_net_if(): @@ -2956,9 +2956,9 @@ def create(self, name=None, params=None, root_dir=None, for fd in vhostfds: pass_fds.append(int(fd)) elif nic.nettype == 'user': - logging.info("Assuming dependencies met for " - "user mode nic %s, and ready to go" - % nic.nic_name) + LOG.info("Assuming dependencies met for " + "user mode nic %s, and ready to go" + % nic.nic_name) # Update the fd and vhostfd for nic devices if self.devices is not None: for device in self.devices: @@ -2989,8 +2989,8 @@ def create(self, name=None, params=None, root_dir=None, self.pa_pci_ids = self.pci_assignable.request_devs() if self.pa_pci_ids: - logging.debug("Successfully assigned devices: %s", - self.pa_pci_ids) + LOG.debug("Successfully assigned devices: %s", + self.pa_pci_ids) else: raise virt_vm.VMPAError(pa_type) @@ -3008,8 +3008,8 @@ def create(self, name=None, params=None, root_dir=None, try: self.devices, self.spice_options = self.make_create_command() self.update_vga_global_default(params, migration_mode) - logging.debug(self.devices.str_short()) - logging.debug(self.devices.str_bus_short()) + LOG.debug(self.devices.str_short()) + LOG.debug(self.devices.str_bus_short()) qemu_command = self.devices.cmdline() except (exceptions.TestSkipError, exceptions.TestCancel): # TestSkipErrors should be kept as-is so we generate SKIP @@ -3080,15 +3080,15 @@ def create(self, name=None, params=None, root_dir=None, proxy_helper_cmd += " -s " + p9_socket_name proxy_helper_cmd += " -n" - logging.info("Running Proxy Helper:\n%s", proxy_helper_cmd) + LOG.info("Running Proxy Helper:\n%s", proxy_helper_cmd) self.process = aexpect.run_tail(proxy_helper_cmd, None, - logging.info, + LOG.info, "[9p proxy helper]", auto_close=False) else: - logging.info("Running qemu command (reformatted):\n%s", - qemu_command.replace(" -", " \\\n -")) + LOG.info("Running qemu command (reformatted):\n%s", + qemu_command.replace(" -", " \\\n -")) self.qemu_command = qemu_command monitor_exit_status = \ params.get("vm_monitor_exit_status", "yes") == "yes" @@ -3096,11 +3096,11 @@ def create(self, name=None, params=None, root_dir=None, qemu_command, partial(qemu_proc_term_handler, self, monitor_exit_status), - logging.info, "[qemu output] ", + LOG.info, "[qemu output] ", auto_close=False, pass_fds=pass_fds) - logging.info("Created qemu process with parent PID %d", - self.process.get_pid()) + LOG.info("Created qemu process with parent PID %d", + self.process.get_pid()) self.start_time = time.time() self.start_monotonic_time = utils_misc.monotonic_time() @@ -3128,7 +3128,7 @@ def create(self, name=None, params=None, root_dir=None, # Make sure qemu is not defunct if self.process.is_defunct(): - logging.error("Bad things happened, qemu process is defunct") + LOG.error("Bad things happened, qemu process is defunct") err = ("Qemu is defunct.\nQemu output:\n%s" % self.process.get_output()) self.destroy() @@ -3159,7 +3159,7 @@ def create(self, name=None, params=None, root_dir=None, m_params, timeout) except qemu_monitor.MonitorConnectError as detail: - logging.error(detail) + LOG.error(detail) self.destroy() raise @@ -3182,7 +3182,7 @@ def create(self, name=None, params=None, root_dir=None, self.destroy() raise e - logging.debug("VM appears to be alive with PID %s", self.get_pid()) + LOG.debug("VM appears to be alive with PID %s", self.get_pid()) # Record vcpu infos in debug log is_preconfig = params.get_boolean("qemu_preconfig") if not is_preconfig: @@ -3299,7 +3299,7 @@ def _shutdown_by_sendline(): if self.params.get("shutdown_command"): # Try to destroy with shell command - logging.debug("Shutting down VM %s (shell)", self.name) + LOG.debug("Shutting down VM %s (shell)", self.name) try: if len(self.virtnet) > 0: session = self.login() @@ -3309,12 +3309,12 @@ def _shutdown_by_sendline(): try: session = self.serial_login() except (remote.LoginError, virt_vm.VMError) as e: - logging.debug(e) + LOG.debug(e) else: # Successfully get session by serial_login() _shutdown_by_sendline() except (remote.LoginError, virt_vm.VMError) as e: - logging.debug(e) + LOG.debug(e) else: # There is no exception occurs _shutdown_by_sendline() @@ -3384,7 +3384,7 @@ def _cleanup(self, free_mac_addresses): nic = port_mapping.pop(inactive_port) self._del_port_from_bridge(nic) for active_port in port_mapping.keys(): - logging.warning("Deleting %s failed during tap cleanup" % active_port) + LOG.warning("Deleting %s failed during tap cleanup" % active_port) def destroy(self, gracefully=True, free_mac_addresses=True): """ @@ -3405,50 +3405,50 @@ def destroy(self, gracefully=True, free_mac_addresses=True): if self.is_dead(): return - logging.debug("Destroying VM %s (PID %s)", self.name, - self.get_pid()) + LOG.debug("Destroying VM %s (PID %s)", self.name, + self.get_pid()) kill_timeout = int(self.params.get("kill_timeout", "60")) if gracefully: self.graceful_shutdown(kill_timeout) if self.is_dead(): - logging.debug("VM %s down (shell)", self.name) + LOG.debug("VM %s down (shell)", self.name) return else: - logging.debug("VM %s failed to go down (shell)", self.name) + LOG.debug("VM %s failed to go down (shell)", self.name) if self.monitor: # Try to finish process with a monitor command - logging.debug("Ending VM %s process (monitor)", self.name) + LOG.debug("Ending VM %s process (monitor)", self.name) try: self.monitor.quit() except Exception as e: - logging.warn(e) + LOG.warn(e) if self.is_dead(): - logging.warn("VM %s down during try to kill it " - "by monitor", self.name) + LOG.warn("VM %s down during try to kill it " + "by monitor", self.name) return else: # Wait for the VM to be really dead if self.wait_until_dead(5, 0.5, 0.5): - logging.debug("VM %s down (monitor)", self.name) + LOG.debug("VM %s down (monitor)", self.name) return else: - logging.debug("VM %s failed to go down (monitor)", - self.name) + LOG.debug("VM %s failed to go down (monitor)", + self.name) # If the VM isn't dead yet... pid = self.process.get_pid() - logging.debug("Ending VM %s process (killing PID %s)", - self.name, pid) + LOG.debug("Ending VM %s process (killing PID %s)", + self.name, pid) try: utils_misc.kill_process_tree(pid, 9, timeout=60) - logging.debug("VM %s down (process killed)", self.name) + LOG.debug("VM %s down (process killed)", self.name) except RuntimeError: # If all else fails, we've got a zombie... - logging.error("VM %s (PID %s) is a zombie!", self.name, - self.process.get_pid()) + LOG.error("VM %s (PID %s) is a zombie!", self.name, + self.process.get_pid()) finally: self._cleanup(free_mac_addresses) @@ -3505,9 +3505,9 @@ def get_peer(self, netid): netdev_peer_re = self.params.get("netdev_peer_re") if not netdev_peer_re: default_netdev_peer_re = "\s{2,}(.*?): .*?\\\s(.*?):" - logging.warning("Missing config netdev_peer_re for VM %s, " - "using default %s", self.name, - default_netdev_peer_re) + LOG.warning("Missing config netdev_peer_re for VM %s, " + "using default %s", self.name, + default_netdev_peer_re) netdev_peer_re = default_netdev_peer_re pairs = re.findall(netdev_peer_re, network_info, re.S) @@ -3625,7 +3625,7 @@ def get_shared_meminfo(self): :return: Shared memory used by VM (MB) """ if self.is_dead(): - logging.error("Could not get shared memory info from dead VM.") + LOG.error("Could not get shared memory info from dead VM.") return None filename = "/proc/%d/statm" % self.get_pid() @@ -3925,7 +3925,7 @@ def activate_netdev(self, nic_index_or_name): if nic.nettype in ['bridge', 'macvtap']: net_backend = "tap" error_context.context("Opening tap device node for %s " % nic.ifname, - logging.debug) + LOG.debug) if nic.nettype == "bridge": tun_tap_dev = "/dev/net/tun" python_tapfds = utils_net.open_tap(tun_tap_dev, nic.ifname, @@ -3942,7 +3942,7 @@ def activate_netdev(self, nic_index_or_name): openfd_list = os.listdir(qemu_fds) for i in range(int(nic.queues)): error_context.context("Assigning tap %s to qemu by fd" % - nic.tapfd_ids[i], logging.info) + nic.tapfd_ids[i], LOG.info) self.monitor.getfd(int(python_tapfds.split(':')[i]), nic.tapfd_ids[i]) n_openfd_list = os.listdir(qemu_fds) @@ -3967,12 +3967,12 @@ def activate_netdev(self, nic_index_or_name): else: netdev_args["fd"] = nic.tapfds error_context.context("Raising interface for " + msg_sfx, - logging.debug) + LOG.debug) utils_net.bring_up_ifname(nic.ifname) # assume this will puke if netdst unset if nic.netdst is not None and nic.nettype == "bridge": error_context.context("Raising bridge for " + msg_sfx, - logging.debug) + LOG.debug) utils_net.add_to_bridge(nic.ifname, nic.netdst) elif nic.nettype == 'user': net_backend = "user" @@ -3988,12 +3988,12 @@ def activate_netdev(self, nic_index_or_name): elif arg_k in ["sndbuf", "queues", "poll-us", "ipv6-prefixlen"]: arg_v = int(arg_v) netdev_args.update({arg_k: arg_v}) - error_context.context("Hotplugging " + msg_sfx, logging.debug) + error_context.context("Hotplugging " + msg_sfx, LOG.debug) self.monitor.netdev_add(net_backend, netdev_id, **netdev_args) network_info = self.monitor.info("network", debug=False) if not re.search(r'{}:'.format(netdev_id), network_info): - logging.error(network_info) + LOG.error(network_info) # Don't leave resources dangling self.deactivate_netdev(nic_index_or_name) raise virt_vm.VMAddNetDevError(("Failed to add netdev: %s for " % @@ -4084,7 +4084,7 @@ def deactivate_netdev(self, nic_index_or_name): network_info = self.monitor.info("network", debug=False) if re.search(r'{}:'.format(netdev_id), network_info): - logging.error(network_info) + LOG.error(network_info) raise virt_vm.VMDelNetDevError("Fail to remove netdev %s" % netdev_id) if nic.nettype == 'macvtap': @@ -4113,7 +4113,7 @@ def send_fd(self, fd, fd_name="migfd"): "Send fd %d like %s to VM %s" % (fd, fd_name, self.name)) - logging.debug("Send file descriptor %s to source VM.", fd_name) + LOG.debug("Send file descriptor %s to source VM.", fd_name) if self.monitor.protocol == 'human': self.monitor.cmd("getfd %s" % (fd_name), fd=fd) elif self.monitor.protocol == 'qmp': @@ -4315,7 +4315,7 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): else: dest_tls_port = "" cert_subj = "" - logging.debug("Informing migration to spice client") + LOG.debug("Informing migration to spice client") commands = ["__com.redhat_spice_migrate_info", "spice_migrate_info", "client_migrate_info"] @@ -4365,7 +4365,7 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): if (local and not (migration_exec_cmd_src and "gzip" in migration_exec_cmd_src)): - error_context.context("Set migrate capabilities.", logging.info) + error_context.context("Set migrate capabilities.", LOG.info) # XXX: Sync with migration workflow of libvirt by the latest # version, since almost no longer use the older version, but # will fix it if there are requirements testing still need @@ -4375,15 +4375,15 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): if migrate_capabilities: error_context.context( - "Set migrate capabilities.", logging.info) + "Set migrate capabilities.", LOG.info) for key, value in list(migrate_capabilities.items()): _set_migrate_capability(self, key, value, True) _set_migrate_capability(clone, key, value, False) # source qemu migration parameters dict if migrate_parameters[0]: - logging.info("Set source migrate parameters before migration: " - "%s", str(migrate_parameters[0])) + LOG.info("Set source migrate parameters before migration: " + "%s", str(migrate_parameters[0])) for parameter, value in migrate_parameters[0].items(): if (parameter == "x-multifd-page-count" and not self.DISABLE_AUTO_X_MIG_OPTS): @@ -4394,9 +4394,9 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): except qemu_monitor.MonitorNotSupportedError: # x-multifd-page-count was dropped without # replacement, ignore this param - logging.warn("Parameter x-multifd-page-count " - "not supported on src, probably " - "newer qemu, not setting it.") + LOG.warn("Parameter x-multifd-page-count " + "not supported on src, probably " + "newer qemu, not setting it.") continue else: self.monitor.set_migrate_parameter(parameter, value, @@ -4412,8 +4412,8 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): # target qemu migration parameters dict if migrate_parameters[1]: - logging.info("Set target migrate parameters before migration: " - "%s", str(migrate_parameters[1])) + LOG.info("Set target migrate parameters before migration: " + "%s", str(migrate_parameters[1])) # target qemu migration parameters configuration for parameter, value in migrate_parameters[1].items(): if (parameter == "x-multifd-page-count" and @@ -4423,9 +4423,9 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): value, True, False) except qemu_monitor.MonitorNotSupportedError: - logging.warn("Parameter x-multifd-page-count " - "not supported on dst, probably " - "newer qemu, not setting it.") + LOG.warn("Parameter x-multifd-page-count " + "not supported on dst, probably " + "newer qemu, not setting it.") # x-multifd-page-count was dropped without # replacement, ignore this param continue @@ -4441,7 +4441,7 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): % (parameter, value, s)) raise exceptions.TestError(msg) - logging.info("Migrating to %s", uri) + LOG.info("Migrating to %s", uri) if clone.deferral_incoming: _uri = uri if protocol == 'tcp': @@ -4475,7 +4475,7 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): if cancel_delay: error_context.context("Do migrate_cancel after %d seconds" % - cancel_delay, logging.info) + cancel_delay, LOG.info) time.sleep(cancel_delay) self.monitor.cmd("migrate_cancel") if not utils_misc.wait_for(self.mig_cancelled, 60, 2, 2, @@ -4502,7 +4502,7 @@ def _set_migrate_capability(vm, capability, value, is_src_vm=True): # Report migration status if self.mig_succeeded(): - logging.info("Migration completed successfully") + LOG.info("Migration completed successfully") elif self.mig_failed(): raise virt_vm.VMMigrateFailedError("Migration failed") else: @@ -4611,8 +4611,8 @@ def _go_down_qmp(): try: return bool(self.monitor.get_event("RESET")) except (qemu_monitor.MonitorSocketError, AttributeError): - logging.warn("MonitorSocketError while querying for RESET QMP " - "event, it might get lost.") + LOG.warn("MonitorSocketError while querying for RESET QMP " + "event, it might get lost.") return False def _shell_reboot(session, timeout): @@ -4623,10 +4623,10 @@ def _shell_reboot(session, timeout): else: session = self.wait_for_serial_login(timeout=timeout) reboot_cmd = self.params.get("reboot_command") - logging.debug("Send command: %s" % reboot_cmd) + LOG.debug("Send command: %s" % reboot_cmd) session.cmd(reboot_cmd, ignore_all_errors=True) - error_context.base_context("rebooting '%s'" % self.name, logging.info) + error_context.base_context("rebooting '%s'" % self.name, LOG.info) error_context.context("before reboot") error_context.context() @@ -4648,14 +4648,14 @@ def _shell_reboot(session, timeout): _check_go_down = _go_down_qmp self.monitor.clear_event("RESET") else: - logging.warning("No suitable way to check for reboot, assuming" - " it already rebooted") + LOG.warning("No suitable way to check for reboot, assuming" + " it already rebooted") _check_go_down = partial(bool, True) try: # TODO detect and handle guest crash? _reboot() - error_context.context("waiting for guest to go down", logging.info) + error_context.context("waiting for guest to go down", LOG.info) if not utils_misc.wait_for(_check_go_down, timeout=timeout): raise virt_vm.VMRebootError("Guest refuses to go down") finally: @@ -4665,7 +4665,7 @@ def _shell_reboot(session, timeout): self.monitor.clear_event("RESET") shutdown_dur = int(time.time() - start_time) - error_context.context("logging in after reboot", logging.info) + error_context.context("logging in after reboot", LOG.info) if self.params.get("mac_changeable") == "yes": utils_net.update_mac_ip_address(self) @@ -4700,7 +4700,7 @@ def screendump(self, filename, debug=True): if self.catch_monitor: self.catch_monitor.screendump(filename=filename, debug=debug) except qemu_monitor.MonitorError as e: - logging.warn(e) + LOG.warn(e) def save_to_file(self, path): """ @@ -4710,7 +4710,7 @@ def save_to_file(self, path): # Set high speed 1TB/S qemu_migration.set_speed(self, str(2 << 39)) qemu_migration.set_downtime(self, self.MIGRATE_TIMEOUT) - logging.debug("Saving VM %s to %s" % (self.name, path)) + LOG.debug("Saving VM %s to %s" % (self.name, path)) # Can only check status if background migration self.monitor.migrate("exec:cat>%s" % path, wait=False) utils_misc.wait_for( @@ -4732,7 +4732,7 @@ def restore_from_file(self, path): Override BaseVM restore_from_file method """ self.verify_status('paused') # Throws exception if not - logging.debug("Restoring VM %s from %s" % (self.name, path)) + LOG.debug("Restoring VM %s from %s" % (self.name, path)) # Rely on create() in incoming migration mode to do the 'right thing' self.create(name=self.name, params=self.params, root_dir=self.root_dir, timeout=self.MIGRATE_TIMEOUT, migration_mode="exec", @@ -4744,7 +4744,7 @@ def savevm(self, tag_name): Override BaseVM savevm method """ self.verify_status('paused') # Throws exception if not - logging.debug("Saving VM %s to %s" % (self.name, tag_name)) + LOG.debug("Saving VM %s to %s" % (self.name, tag_name)) self.monitor.send_args_cmd("savevm id=%s" % tag_name) self.monitor.cmd("system_reset") self.verify_status('paused') # Throws exception if not @@ -4754,7 +4754,7 @@ def loadvm(self, tag_name): Override BaseVM loadvm method """ self.verify_status('paused') # Throws exception if not - logging.debug("Loading VM %s from %s" % (self.name, tag_name)) + LOG.debug("Loading VM %s from %s" % (self.name, tag_name)) self.monitor.send_args_cmd("loadvm id=%s" % tag_name) self.verify_status('paused') # Throws exception if not @@ -4910,7 +4910,7 @@ def get_block(self, p_dict={}): # for new qemu we just deal with key = [removable, # file,backing_file], for other types key, we should # fixup later - logging.info("block = %s" % block) + LOG.info("block = %s" % block) if key == 'removable': if value is False: if 'Removable device' not in block: @@ -4979,7 +4979,7 @@ def live_snapshot(self, base_file, snapshot_file, output = self.monitor.live_snapshot(device, snapshot_file, format=snapshot_format) - logging.debug(output) + LOG.debug(output) device = self.get_block({"file": snapshot_file}) if device: current_file = device diff --git a/virttest/remote.py b/virttest/remote.py index c9dd43691a..0d8101c357 100644 --- a/virttest/remote.py +++ b/virttest/remote.py @@ -21,6 +21,8 @@ from virttest.remote_commander import remote_master from virttest.remote_commander import messenger +LOG = logging.getLogger('avocado.' + __name__) + def ssh_login_to_migrate(client, host, port, username, password, prompt, linesep="\n", log_filename=None, log_function=None, timeout=10, @@ -82,7 +84,7 @@ def ssh_login_to_migrate(client, host, port, username, password, prompt, linesep cmd += " %s@%s" % (username, host) if verbose: - logging.debug("Login command: '%s'", cmd) + LOG.debug("Login command: '%s'", cmd) session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt, status_test_command=status_test_command) try: @@ -115,8 +117,8 @@ def wait_for_ssh_login_to_migrate(client, host, port, username, password, prompt :raise: Whatever remote_login() raises :return: A RemoteSession object. """ - logging.debug("Attempting to log into %s:%s using %s (timeout %ds)", - host, port, client, timeout) + LOG.debug("Attempting to log into %s:%s using %s (timeout %ds)", + host, port, client, timeout) end_time = time.time() + timeout verbose = False while time.time() < end_time: @@ -126,7 +128,7 @@ def wait_for_ssh_login_to_migrate(client, host, port, username, password, prompt internal_timeout, interface, verbose=verbose, preferred_authenticaton=preferred_authenticaton) except LoginError as error: - logging.debug(error) + LOG.debug(error) verbose = True time.sleep(2) # Timeout expired; try one more time but don't catch exceptions @@ -197,7 +199,7 @@ def remote_commander(client, host, port, username, password, prompt, else: raise LoginBadClientError(client) - logging.debug("Login command: '%s'", cmd) + LOG.debug("Login command: '%s'", cmd) session = aexpect.Expect(cmd, linesep=linesep) try: handle_prompts(session, username, password, prompt, timeout) @@ -243,17 +245,17 @@ def run_remote_cmd(cmd, params, remote_runner=None, ignore_status=True): password=remote_pwd) cmdresult = remote_runner.run(cmd, ignore_status=ignore_status) - logging.debug("Remote runner run result:\n%s", cmdresult) + LOG.debug("Remote runner run result:\n%s", cmdresult) if cmdresult.exit_status and not ignore_status: raise exceptions.TestFail("Failed to run '%s' on remote: %s" % (cmd, cmdresult)) return cmdresult except (LoginError, LoginTimeoutError, LoginAuthenticationError, LoginProcessTerminatedError) as e: - logging.error(e) + LOG.error(e) raise exceptions.TestError(e) except process.CmdError as cmderr: - logging.error("Remote runner run failed:\n%s", cmderr) + LOG.error("Remote runner run failed:\n%s", cmderr) raise exceptions.TestFail("Failed to run '%s' on remote: %s" % (cmd, cmderr)) @@ -291,8 +293,8 @@ def pull_file(self, local_path, timeout=600): """ Copy file from remote to local. """ - logging.debug("Pull remote: '%s' to local: '%s'." % (self.remote_path, - local_path)) + LOG.debug("Pull remote: '%s' to local: '%s'." % (self.remote_path, + local_path)) copy_files_from(self.address, self.cp_client, self.username, self.password, self.cp_port, self.remote_path, local_path, timeout=timeout) @@ -301,8 +303,8 @@ def push_file(self, local_path, timeout=600): """ Copy file from local to remote. """ - logging.debug("Push local: '%s' to remote: '%s'." % (local_path, - self.remote_path)) + LOG.debug("Push local: '%s' to remote: '%s'." % (local_path, + self.remote_path)) copy_files_to(self.address, self.cp_client, self.username, self.password, self.cp_port, local_path, self.remote_path, timeout=timeout) @@ -633,10 +635,10 @@ def setup_ssh_auth(self): cmd = "ls %s %s" % (pri_key, pub_key) result = self.runner.run(cmd, ignore_status=True) if result.exit_status: - logging.debug("Create new SSH key pair") + LOG.debug("Create new SSH key pair") self.runner.run("ssh-keygen -t rsa -q -N '' -f %s" % pri_key) else: - logging.info("SSH key pair already exist") + LOG.info("SSH key pair already exist") session = self.runner.session # To avoid the host key checking ssh_options = "%s %s" % ("-o UserKnownHostsFile=/dev/null", @@ -654,7 +656,7 @@ def check_network(self, count=5, timeout=60): :param count: counter to ping :param timeout: seconds to wait for """ - logging.debug("Check VM network connectivity...") + LOG.debug("Check VM network connectivity...") vm_net_connectivity = False sleep_time = 5 result = "" @@ -667,7 +669,7 @@ def check_network(self, count=5, timeout=60): continue else: vm_net_connectivity = True - logging.info(result.stdout_text) + LOG.info(result.stdout_text) break if not vm_net_connectivity: @@ -694,7 +696,7 @@ def run_command(self, command, runner=None, ignore_status=False, timeout=CMD_TIM try: ret = self.runner.run(cmd, timeout=timeout, ignore_status=ignore_status) except process.CmdError as detail: - logging.debug("Failed to run '%s' in the VM: %s", cmd, detail) + LOG.debug("Failed to run '%s' in the VM: %s", cmd, detail) raise exceptions.TestFail("Failed to run '%s' in the VM: %s", cmd, detail) return ret diff --git a/virttest/remote_build.py b/virttest/remote_build.py index 75fd310fd6..f4f23201fa 100644 --- a/virttest/remote_build.py +++ b/virttest/remote_build.py @@ -8,6 +8,9 @@ from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + + class BuildError(Exception): def __init__(self, error_info): @@ -200,8 +203,8 @@ def visit(arg, dir_name, file_names): need_build = False if to_transfer: - logging.info("Need to copy files to %s on target" % - self.full_build_path) + LOG.info("Need to copy files to %s on target" % + self.full_build_path) need_build = True # Create all directories @@ -228,8 +231,8 @@ def visit(arg, dir_name, file_names): remote_path) else: - logging.info("Directory %s on target already up-to-date" % - self.full_build_path) + LOG.info("Directory %s on target already up-to-date" % + self.full_build_path) return need_build @@ -237,7 +240,7 @@ def make(self): """ Execute make on the remote system """ - logging.info("Building in %s on target" % self.full_build_path) + LOG.info("Building in %s on target" % self.full_build_path) cmd = 'make -C %s %s' % (self.full_build_path, self.make_flags) status, output = self.session.cmd_status_output(cmd) if not status == 0: diff --git a/virttest/shared/deps/run_autotest/boottool.py b/virttest/shared/deps/run_autotest/boottool.py index 1d688a5f5c..5db114399f 100755 --- a/virttest/shared/deps/run_autotest/boottool.py +++ b/virttest/shared/deps/run_autotest/boottool.py @@ -86,7 +86,7 @@ # # Default log object # -log = logging.getLogger('boottool') +LOG = logging.getLogger('avocado.vt.boottool') def find_header(hdr): @@ -218,7 +218,7 @@ class EfiToolSys(object): def __init__(self): if not os.path.exists(self.BASE_PATH): sys.exit(-1) - self.log = logging.getLogger(self.__class__.__name__) + self.log = logging.getLogger('avocado.' + self.__class__.__name__) def create_variable(self, name, data, guid=None, attributes=None): ''' @@ -682,7 +682,7 @@ def install_grubby_if_necessary(path=None): executable = find_executable(path) if executable is None: - log.info('Installing grubby because it was not found on this system') + LOG.info('Installing grubby because it was not found on this system') grubby = Grubby() path = grubby.grubby_install() installed_grubby = True @@ -690,13 +690,13 @@ def install_grubby_if_necessary(path=None): grubby = Grubby(executable) current_version = grubby.get_grubby_version() if current_version is None: - log.error('Could not find version for grubby executable "%s"', + LOG.error('Could not find version for grubby executable "%s"', executable) path = grubby.grubby_install() installed_grubby = True elif current_version < GRUBBY_REQ_VERSION: - log.info('Installing grubby because currently installed ' + LOG.info('Installing grubby because currently installed ' 'version (%s.%s) is not recent enough', current_version[0], current_version[1]) path = grubby.grubby_install() @@ -705,7 +705,7 @@ def install_grubby_if_necessary(path=None): if installed_grubby: grubby = Grubby(path) installed_version = grubby.get_grubby_version_raw() - log.debug('Installed: %s', installed_version) + LOG.debug('Installed: %s', installed_version) class GrubbyInstallException(Exception): @@ -733,7 +733,7 @@ def __init__(self, path=None, opts=None): self._set_path(path) self.bootloader = None self.opts = opts - self.log = logging.getLogger(self.__class__.__name__) + self.log = logging.getLogger('avocado.' + self.__class__.__name__) if 'BOOTTOOL_DEBUG_RUN' in os.environ: self.debug_run = True @@ -801,7 +801,7 @@ def _run_get_output(self, arguments): if result is not None: result = result.strip() if self.debug_run: - logging.debug('previous command output: "%s"', result) + self.log.debug('previous command output: "%s"', result) else: self.log.error('_run_get_output error while running: "%s"', ' '.join(arguments)) @@ -828,7 +828,7 @@ def _run_get_output_err(self, arguments): if result is not None: result = result.strip() if self.debug_run: - logging.debug('previous command output/error: "%s"', result) + self.log.debug('previous command output/error: "%s"', result) else: self.log.error('_run_get_output_err error while running: "%s"', ' '.join(arguments)) @@ -845,7 +845,7 @@ def _run_get_return(self, arguments): try: result = subprocess.call(arguments) if self.debug_run: - logging.debug('previous command result: %s', result) + self.log.debug('previous command result: %s', result) except OSError: result = -1 self.log.error('caught OSError, returning %s', result) @@ -1954,7 +1954,7 @@ def __init__(self): self.args = None self.option_parser = OptionParser() self.grubby = None - self.log = logging.getLogger(self.__class__.__name__) + self.log = logging.getLogger('avocado.' + self.__class__.__name__) def _parse_command_line(self): ''' diff --git a/virttest/shared/deps/run_autotest/kernel_install/kernelinstall.py b/virttest/shared/deps/run_autotest/kernel_install/kernelinstall.py index d3e7c61200..2841b5ebda 100644 --- a/virttest/shared/deps/run_autotest/kernel_install/kernelinstall.py +++ b/virttest/shared/deps/run_autotest/kernel_install/kernelinstall.py @@ -6,6 +6,8 @@ from autotest.client import utils from autotest.client.shared import git, error, software_manager +LOG = logging.getLogger('avocado.' + __name__) + class kernelinstall(test.test): version = 1 @@ -19,7 +21,7 @@ def _kernel_install_rpm(self, rpm_file, kernel_deps_rpms=None, directory (client/test/kernelinstall) """ if kernel_deps_rpms: - logging.info("Installing kernel dependencies.") + LOG.info("Installing kernel dependencies.") if isinstance(kernel_deps_rpms, list): kernel_deps_rpms = " ".join(kernel_deps_rpms) self.sm.install(kernel_deps_rpms) @@ -27,7 +29,7 @@ def _kernel_install_rpm(self, rpm_file, kernel_deps_rpms=None, dst = os.path.join("/tmp", os.path.basename(rpm_file)) knl = utils.get_file(rpm_file, dst) kernel = self.job.kernel(knl) - logging.info("Installing kernel %s", rpm_file) + LOG.info("Installing kernel %s", rpm_file) kernel.install(install_vmlinux=False) if need_reboot: @@ -41,7 +43,7 @@ def _kernel_install_koji(self, kernel_koji_spec, kernel_deps_koji_spec, # we avoid lookup errors due to SSL problems, so let's go with that. for koji_package in ['koji', 'brewkoji']: if not self.sm.check_installed(koji_package): - logging.debug("%s missing - trying to install", koji_package) + LOG.debug("%s missing - trying to install", koji_package) self.sm.install(koji_package) sys.path.append(self.bindir) @@ -54,7 +56,7 @@ def _kernel_install_koji(self, kernel_koji_spec, kernel_deps_koji_spec, deps_rpms = [] k_dep = utils_koji.KojiPkgSpec(text=kernel_deps_koji_spec) - logging.info('Fetching kernel dependencies: %s', kernel_deps_koji_spec) + LOG.info('Fetching kernel dependencies: %s', kernel_deps_koji_spec) c.get_pkgs(k_dep, self.bindir) rpm_file_name_list = c.get_pkg_rpm_file_names(k_dep) if len(rpm_file_name_list) == 0: @@ -64,7 +66,7 @@ def _kernel_install_koji(self, kernel_koji_spec, kernel_deps_koji_spec, deps_rpms.append(os.path.join(self.bindir, dep_rpm_basename)) k = utils_koji.KojiPkgSpec(text=kernel_koji_spec) - logging.info('Fetching kernel: %s', kernel_koji_spec) + LOG.info('Fetching kernel: %s', kernel_koji_spec) c.get_pkgs(k, self.bindir) rpm_file_name_list = c.get_pkg_rpm_file_names(k) if len(rpm_file_name_list) == 0: @@ -121,8 +123,8 @@ def _kernel_install_git(self, repo, config, repo_base=None, def execute(self, install_type="koji", params=None): need_reboot = params.get("need_reboot") == "yes" - logging.info("Chose to install kernel through '%s', proceeding", - install_type) + LOG.info("Chose to install kernel through '%s', proceeding", + install_type) if install_type == "rpm": rpm_url = params.get("kernel_rpm_path") @@ -156,5 +158,5 @@ def execute(self, install_type="koji", params=None): self._kernel_install_src(src_pkg, config, None, patch_list, need_reboot) else: - logging.error("Could not find '%s' method, " - "keep the current kernel.", install_type) + LOG.error("Could not find '%s' method, " + "keep the current kernel.", install_type) diff --git a/virttest/ssh_key.py b/virttest/ssh_key.py index d54219ac4d..f029729614 100644 --- a/virttest/ssh_key.py +++ b/virttest/ssh_key.py @@ -9,6 +9,8 @@ from virttest import remote as remote_old +LOG = logging.getLogger('avocado.' + __name__) + def get_public_key(client_user=None): """ @@ -47,15 +49,15 @@ def get_public_key(client_user=None): os.path.isfile(rsa_private_key_path)) if has_rsa_keypair: - logging.info('RSA keypair found, using it') + LOG.info('RSA keypair found, using it') public_key_path = rsa_public_key_path elif has_dsa_keypair: - logging.info('DSA keypair found, using it') + LOG.info('DSA keypair found, using it') public_key_path = dsa_public_key_path else: - logging.info('Neither RSA nor DSA keypair found, creating RSA ssh key pair') + LOG.info('Neither RSA nor DSA keypair found, creating RSA ssh key pair') if os.environ.get('USER') != 'root': process.system('ssh-keygen -t rsa -q -N \"\" -f %s' % rsa_private_key_path, shell=True) @@ -100,16 +102,16 @@ def get_remote_public_key(session, public_key="rsa"): has_rsa_keypair = rsa_public_s == 0 and rsa_private_s == 0 if has_dsa_keypair and public_key == "dsa": - logging.info('DSA keypair found on %s, using it', session) + LOG.info('DSA keypair found on %s, using it', session) public_key_path = dsa_public_key_path elif has_rsa_keypair and public_key == "rsa": - logging.info('RSA keypair found on %s, using it', session) + LOG.info('RSA keypair found on %s, using it', session) public_key_path = rsa_public_key_path else: - logging.info('Neither RSA nor DSA keypair found, ' - 'creating %s ssh key pair' % public_key) + LOG.info('Neither RSA nor DSA keypair found, ' + 'creating %s ssh key pair' % public_key) key_path = rsa_private_key_path public_key_path = rsa_public_key_path if public_key == "dsa": @@ -137,8 +139,8 @@ def setup_ssh_key(hostname, user, password, port=22, client_user=None): user to login into the server :type client_user: str """ - logging.debug('Performing SSH key setup on %s:%d as %s.' % - (hostname, port, user)) + LOG.debug('Performing SSH key setup on %s:%d as %s.' % + (hostname, port, user)) try: session = remote.remote_login(client='ssh', host=hostname, @@ -151,10 +153,10 @@ def setup_ssh_key(hostname, user, password, port=22, client_user=None): session.cmd("echo '%s' >> ~/.ssh/authorized_keys; " % public_key) session.cmd('chmod 600 ~/.ssh/authorized_keys') - logging.debug('SSH key setup complete.') + LOG.debug('SSH key setup complete.') except Exception: - logging.debug('SSH key setup has failed.') + LOG.debug('SSH key setup has failed.') finally: try: @@ -182,8 +184,8 @@ def setup_remote_ssh_key(hostname1, user1, password1, :param config_options: list of options eg: ["StrictHostKeyChecking=no"] :type config_options: list of str """ - logging.debug('Performing SSH key setup on %s:%d as %s.' % - (hostname1, port, user1)) + LOG.debug('Performing SSH key setup on %s:%d as %s.' % + (hostname1, port, user1)) try: session1 = remote.remote_login(client='ssh', host=hostname1, port=port, @@ -214,9 +216,9 @@ def setup_remote_ssh_key(hostname1, user1, password1, session2.cmd_output("echo '%s' >> ~/.ssh/authorized_keys; " % public_key) session2.cmd_output('chmod 600 ~/.ssh/authorized_keys') - logging.debug('SSH key setup on %s complete.', session2) + LOG.debug('SSH key setup on %s complete.', session2) except Exception as err: - logging.debug('SSH key setup has failed: %s', err) + LOG.debug('SSH key setup has failed: %s', err) try: session1.close() session2.close() @@ -242,13 +244,13 @@ def setup_remote_known_hosts_file(client_ip, server_ip, :rtype: remote_old.RemoteFile :return: None if required command is not found """ - logging.debug('Performing known_hosts file setup on %s from %s.' % - (server_ip, client_ip)) + LOG.debug('Performing known_hosts file setup on %s from %s.' % + (server_ip, client_ip)) abs_path = "" try: abs_path = path.find_command("ssh-keyscan") except path.CmdNotFoundError as err: - logging.debug("Failed to find the command: %s", err) + LOG.debug("Failed to find the command: %s", err) return None cmd = "%s %s" % (abs_path, client_ip) diff --git a/virttest/staging/lv_utils.py b/virttest/staging/lv_utils.py index b351f4b008..02988dbb03 100644 --- a/virttest/staging/lv_utils.py +++ b/virttest/staging/lv_utils.py @@ -49,6 +49,8 @@ from virttest import error_context +LOG = logging.getLogger('avocado.' + __name__) + @error_context.context_aware def vg_ramdisk(vg_name, ramdisk_vg_size, @@ -57,7 +59,7 @@ def vg_ramdisk(vg_name, ramdisk_vg_size, Create vg on top of ram memory to speed up lv performance. """ error_context.context("Creating virtual group on top of ram memory", - logging.info) + LOG.info) vg_size = ramdisk_vg_size vg_ramdisk_dir = os.path.join(ramdisk_basedir, vg_name) ramdisk_filename = os.path.join(vg_ramdisk_dir, @@ -69,18 +71,18 @@ def vg_ramdisk(vg_name, ramdisk_vg_size, if not os.path.exists(vg_ramdisk_dir): os.mkdir(vg_ramdisk_dir) try: - logging.info("Mounting tmpfs") + LOG.info("Mounting tmpfs") result = process.run("mount -t tmpfs tmpfs " + vg_ramdisk_dir) - logging.info("Converting and copying /dev/zero") + LOG.info("Converting and copying /dev/zero") cmd = ("dd if=/dev/zero of=" + ramdisk_filename + " bs=1M count=1 seek=" + vg_size) result = process.run(cmd, verbose=True) - logging.info("Finding free loop device") + LOG.info("Finding free loop device") result = process.run("losetup --find", verbose=True) except process.CmdError as ex: - logging.error(ex) + LOG.error(ex) vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, "") raise ex @@ -88,19 +90,19 @@ def vg_ramdisk(vg_name, ramdisk_vg_size, loop_device = result.stdout_text.rstrip() try: - logging.info("Creating loop device") + LOG.info("Creating loop device") result = process.run("losetup " + loop_device + " " + ramdisk_filename) - logging.info("Creating physical volume %s", loop_device) + LOG.info("Creating physical volume %s", loop_device) result = process.run("pvcreate " + loop_device) - logging.info("Creating volume group %s", vg_name) + LOG.info("Creating volume group %s", vg_name) result = process.run("vgcreate " + vg_name + " " + loop_device) except process.CmdError as ex: - logging.error(ex) + LOG.error(ex) vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, vg_name, loop_device) raise ex - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) def vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, @@ -110,41 +112,41 @@ def vg_ramdisk_cleanup(ramdisk_filename, vg_ramdisk_dir, """ result = process.run("vgremove " + vg_name, ignore_status=True) if result.exit_status == 0: - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) else: - logging.debug("%s -> %s", result.command, result.stderr_text) + LOG.debug("%s -> %s", result.command, result.stderr_text) result = process.run("pvremove " + loop_device, ignore_status=True) if result.exit_status == 0: - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) else: - logging.debug("%s -> %s", result.command, result.stderr_text) + LOG.debug("%s -> %s", result.command, result.stderr_text) for _ in range(10): time.sleep(0.1) result = process.run("losetup -d " + loop_device, ignore_status=True) if b"resource busy" not in result.stderr: if result.exit_status != 0: - logging.debug("%s -> %s", result.command, result.stderr_text) + LOG.debug("%s -> %s", result.command, result.stderr_text) else: - logging.info("Loop device %s deleted", loop_device) + LOG.info("Loop device %s deleted", loop_device) break if os.path.exists(ramdisk_filename): os.unlink(ramdisk_filename) - logging.info("Ramdisk filename %s deleted", ramdisk_filename) + LOG.info("Ramdisk filename %s deleted", ramdisk_filename) process.run("umount " + vg_ramdisk_dir, ignore_status=True) if result.exit_status == 0: if loop_device != "": - logging.info("Loop device %s unmounted", loop_device) + LOG.info("Loop device %s unmounted", loop_device) else: - logging.debug("%s -> %s", result.command, result.stderr_text) + LOG.debug("%s -> %s", result.command, result.stderr_text) if os.path.exists(vg_ramdisk_dir): try: shutil.rmtree(vg_ramdisk_dir) - logging.info("Ramdisk directory %s deleted", vg_ramdisk_dir) + LOG.info("Ramdisk directory %s deleted", vg_ramdisk_dir) except OSError: pass @@ -156,7 +158,7 @@ def vg_check(vg_name): cmd = "vgdisplay " + vg_name try: process.run(cmd) - logging.debug("Provided volume group exists: " + vg_name) + LOG.debug("Provided volume group exists: " + vg_name) return True except process.CmdError: return False @@ -198,7 +200,7 @@ def vg_create(vg_name, pv_list): """ error_context.context( "Creating volume group '%s' by using '%s'" % - (vg_name, pv_list), logging.info) + (vg_name, pv_list), LOG.info) if vg_check(vg_name): raise exceptions.TestError("Volume group '%s' already exist" % vg_name) @@ -207,7 +209,7 @@ def vg_create(vg_name, pv_list): result = process.run(cmd, ignore_status=True) cmd = "vgcreate %s %s" % (vg_name, pv_list) result = process.run(cmd) - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) @error_context.context_aware @@ -215,14 +217,14 @@ def vg_remove(vg_name): """ Remove a volume group. """ - error_context.context("Removing volume '%s'" % vg_name, logging.info) + error_context.context("Removing volume '%s'" % vg_name, LOG.info) if not vg_check(vg_name): raise exceptions.TestError( "Volume group '%s' could not be found" % vg_name) cmd = "vgremove -f %s" % vg_name result = process.run(cmd) - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) # Update cached state after remove VG cmd = "vgscan --cache" result = process.run(cmd, ignore_status=True) @@ -239,8 +241,8 @@ def lv_check(vg_name, lv_name): lvpattern = r"LV Path\s+/dev/" + vg_name + r"/" + lv_name + "\s+" match = re.search(lvpattern, result.stdout_text.rstrip()) if match: - logging.debug("Provided logical volume exists: /dev/" + - vg_name + "/" + lv_name) + LOG.debug("Provided logical volume exists: /dev/" + + vg_name + "/" + lv_name) return True else: return False @@ -252,7 +254,7 @@ def lv_remove(vg_name, lv_name): Remove a logical volume. """ error_context.context("Removing volume /dev/%s/%s" % - (vg_name, lv_name), logging.info) + (vg_name, lv_name), LOG.info) if not vg_check(vg_name): raise exceptions.TestError("Volume group could not be found") @@ -261,7 +263,7 @@ def lv_remove(vg_name, lv_name): cmd = "lvremove -f " + vg_name + "/" + lv_name result = process.run(cmd) - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) @error_context.context_aware @@ -272,7 +274,7 @@ def lv_create(vg_name, lv_name, lv_size, force_flag=True): The volume group must already exist. """ error_context.context("Creating original lv to take a snapshot from", - logging.info) + LOG.info) if not vg_check(vg_name): raise exceptions.TestError("Volume group could not be found") @@ -283,7 +285,7 @@ def lv_create(vg_name, lv_name, lv_size, force_flag=True): cmd = ("lvcreate --size " + lv_size + " --name " + lv_name + " " + vg_name) result = process.run(cmd) - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) @error_context.context_aware @@ -293,7 +295,7 @@ def lv_take_snapshot(vg_name, lv_name, Take a snapshot of the original logical volume. """ error_context.context("Taking snapshot from original logical volume", - logging.info) + LOG.info) if not vg_check(vg_name): raise exceptions.TestError("Volume group could not be found") @@ -312,13 +314,13 @@ def lv_take_snapshot(vg_name, lv_name, re.search(re.escape(lv_snapshot_name + " [active]"), process.run("lvdisplay").stdout_text)): # the above conditions detect if merge of snapshot was postponed - logging.warning(("Logical volume %s is still active! " + - "Attempting to deactivate..."), lv_name) + LOG.warning(("Logical volume %s is still active! " + + "Attempting to deactivate..."), lv_name) lv_reactivate(vg_name, lv_name) result = process.run(cmd) else: raise ex - logging.info(result.stdout_text.rstrip()) + LOG.info(result.stdout_text.rstrip()) @error_context.context_aware @@ -327,7 +329,7 @@ def lv_revert(vg_name, lv_name, lv_snapshot_name): Revert the origin to a snapshot. """ error_context.context("Reverting original logical volume to snapshot", - logging.info) + LOG.info) try: if not vg_check(vg_name): raise exceptions.TestError("Volume group could not be found") @@ -357,16 +359,16 @@ def lv_revert(vg_name, lv_name, lv_snapshot_name): re.search(re.escape(lv_snapshot_name + " [active]"), process.run("lvdisplay").stdout_text)) or ("The logical volume %s is still active" % lv_name) in ex_str): - logging.warning(("Logical volume %s is still active! " + - "Attempting to deactivate..."), lv_name) + LOG.warning(("Logical volume %s is still active! " + + "Attempting to deactivate..."), lv_name) lv_reactivate(vg_name, lv_name) result = "Continuing after reactivation" elif 'Snapshot could not be found' in ex_str: - logging.error(ex) + LOG.error(ex) result = "Could not revert to snapshot" else: raise ex - logging.info(result) + LOG.info(result) @error_context.context_aware @@ -377,7 +379,7 @@ def lv_revert_with_snapshot(vg_name, lv_name, """ error_context.context("Reverting to snapshot and taking a new one", - logging.info) + LOG.info) lv_revert(vg_name, lv_name, lv_snapshot_name) lv_take_snapshot(vg_name, lv_name, lv_snapshot_name, lv_snapshot_size) @@ -396,8 +398,8 @@ def lv_reactivate(vg_name, lv_name, timeout=10): process.run("lvchange -ay /dev/%s/%s" % (vg_name, lv_name)) time.sleep(timeout) except process.CmdError: - logging.error(("Failed to reactivate %s - please, " + - "nuke the process that uses it first."), lv_name) + LOG.error(("Failed to reactivate %s - please, " + + "nuke the process that uses it first."), lv_name) raise exceptions.TestError( "The logical volume %s is still active" % lv_name) diff --git a/virttest/staging/service.py b/virttest/staging/service.py index c1a4d670fb..9563770ca5 100644 --- a/virttest/staging/service.py +++ b/virttest/staging/service.py @@ -86,6 +86,8 @@ """ +LOG = logging.getLogger('avocado.' + __name__) + def sysvinit_status_parser(cmdResult=None): """ @@ -394,7 +396,7 @@ def default_method(cmdResult): return True if command was executed successfully. """ if cmdResult.exit_status: - logging.debug(cmdResult) + LOG.debug(cmdResult) return False else: return True @@ -469,7 +471,7 @@ def run(**kwargs): We will not let the CmdError out. :return: result of parse_func. """ - logging.debug("Setting ignore_status to True.") + LOG.debug("Setting ignore_status to True.") kwargs["ignore_status"] = True result = run_func(" ".join(command(service_name)), **kwargs) result.stdout = result.stdout_text @@ -531,7 +533,7 @@ def run(service="", **kwargs): We will not let the CmdError out. :return: result of parse_func. """ - logging.debug("Setting ignore_status to True.") + LOG.debug("Setting ignore_status to True.") kwargs["ignore_status"] = True result = run_func(" ".join(command(service)), **kwargs) return parse_func(result) @@ -922,7 +924,7 @@ def command(self, operate, output=False): cmd = "systemctl %s %s" % (operate, self.service_name) status, output = self.session.cmd_status_output(cmd, timeout=self.timeout) if status != 0: - logging.error("%s returned unexpected status %s", cmd, status) + LOG.error("%s returned unexpected status %s", cmd, status) if output: return output return status diff --git a/virttest/staging/utils_cgroup.py b/virttest/staging/utils_cgroup.py index a43a94f190..65782db872 100755 --- a/virttest/staging/utils_cgroup.py +++ b/virttest/staging/utils_cgroup.py @@ -22,6 +22,8 @@ from . import service +LOG = logging.getLogger('avocado.' + __name__) + class Cgroup(object): @@ -192,8 +194,8 @@ def rm_cgroup(self, pwd): os.rmdir(pwd) self.cgroups.remove(pwd) except ValueError: - logging.warn("cg.rm_cgroup(): Removed cgroup which wasn't created" - "using this Cgroup") + LOG.warn("cg.rm_cgroup(): Removed cgroup which wasn't created" + "using this Cgroup") except Exception as inst: raise exceptions.TestError("cg.rm_cgroup(): %s" % inst) @@ -293,7 +295,7 @@ def test(self, cmd): :param cmd: command to be executed :return: subprocess.Popen() process """ - logging.debug("cg.test(): executing parallel process '%s'", cmd) + LOG.debug("cg.test(): executing parallel process '%s'", cmd) cmd = self._client + ' ' + cmd process = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, @@ -393,7 +395,7 @@ def set_property_h(self, prop, value, pwd=None, check=True, checkprop=None): if value[-1] in human: value = int(value[:-1]) * human[value[-1]] except Exception: - logging.warn("cg.set_prop() fallback into cg.set_property.") + LOG.warn("cg.set_prop() fallback into cg.set_property.") value = _value self.set_property(prop, value, pwd, check, checkprop) @@ -539,14 +541,14 @@ def __del__(self): try: process.system('umount %s -l' % self.modules[1][i]) except Exception as failure_detail: - logging.warn("CGM: Couldn't unmount %s directory: %s", - self.modules[1][i], failure_detail) + LOG.warn("CGM: Couldn't unmount %s directory: %s", + self.modules[1][i], failure_detail) try: if self.rm_mountdir: # If delete /cgroup/, this action will break cgroup service. shutil.rmtree(self.mountdir) except Exception: - logging.warn( + LOG.warn( "CGM: Couldn't remove the %s directory", self.mountdir) def init(self, _modules): @@ -557,7 +559,7 @@ def init(self, _modules): :param _modules: Desired modules.'memory','cpu,cpuset'... :return: Number of initialized modules. """ - logging.debug("Desired cgroup modules: %s", _modules) + LOG.debug("Desired cgroup modules: %s", _modules) mounts = [] with open('/proc/mounts', 'r') as proc_mounts: line = proc_mounts.readline().split() @@ -591,9 +593,9 @@ def init(self, _modules): self.modules[1].append(module_path) self.modules[2].append(True) except process.CmdError: - logging.info("Cgroup module '%s' not available", module) + LOG.info("Cgroup module '%s' not available", module) - logging.debug("Initialized cgroup modules: %s", self.modules[0]) + LOG.debug("Initialized cgroup modules: %s", self.modules[0]) return len(self.modules[0]) def get_pwd(self, module): @@ -605,7 +607,7 @@ def get_pwd(self, module): try: i = self.modules[0].index(module) except Exception as inst: - logging.error("module %s not found: %s", module, inst) + LOG.error("module %s not found: %s", module, inst) return None return self.modules[1][i] diff --git a/virttest/staging/utils_koji.py b/virttest/staging/utils_koji.py index b24fe7e8bb..a97e0f2616 100644 --- a/virttest/staging/utils_koji.py +++ b/virttest/staging/utils_koji.py @@ -25,6 +25,8 @@ DEFAULT_KOJI_TAG = None +LOG = logging.getLogger('avocado.' + __name__) + class KojiDownloadError(IOError): @@ -197,9 +199,8 @@ def _get(self, url, dst): break except Exception as e: last_error = str(e) - logging.error("Download failed: %s", last_error) - logging.error("Retrying after %s seconds...", - self.RETRY_STEP) + LOG.error("Download failed: %s", last_error) + LOG.error("Retrying after %s seconds...", self.RETRY_STEP) if os.path.isfile(dst): os.unlink(dst) time.sleep(self.RETRY_STEP) @@ -250,18 +251,17 @@ def is_command_valid(self): koji_command_ok = True if not os.path.isfile(self.command): - logging.error('Koji command "%s" is not a regular file', - self.command) + LOG.error('Koji command "%s" is not a regular file', self.command) koji_command_ok = False if not os.access(self.command, os.X_OK): - logging.warn('Koji command "%s" is not executable: this is ' - 'not fatal but indicates an unexpected situation', - self.command) + LOG.warn('Koji command "%s" is not executable: this is ' + 'not fatal but indicates an unexpected situation', + self.command) if self.command not in list(self.CONFIG_MAP.keys()): - logging.error('Koji command "%s" does not have a configuration ' - 'file associated to it', self.command) + LOG.error('Koji command "%s" does not have a configuration ' + 'file associated to it', self.command) koji_command_ok = False return koji_command_ok @@ -275,22 +275,21 @@ def is_config_valid(self): koji_config_ok = True if not os.path.isfile(self.config): - logging.error( - 'Koji config "%s" is not a regular file', self.config) + LOG.error('Koji config "%s" is not a regular file', self.config) koji_config_ok = False if not os.access(self.config, os.R_OK): - logging.error('Koji config "%s" is not readable', self.config) + LOG.error('Koji config "%s" is not readable', self.config) koji_config_ok = False config = ConfigParser.ConfigParser() config.read(self.config) basename = os.path.basename(self.command) if not config.has_section(basename): - logging.error('Koji configuration file "%s" does not have a ' - 'section "%s", named after the base name of the ' - 'currently set koji command "%s"', self.config, - basename, self.command) + LOG.error('Koji configuration file "%s" does not have a ' + 'section "%s", named after the base name of the ' + 'currently set koji command "%s"', self.config, + basename, self.command) koji_config_ok = False return koji_config_ok diff --git a/virttest/staging/utils_memory.py b/virttest/staging/utils_memory.py index ae113b09f6..6b0a12b4e2 100644 --- a/virttest/staging/utils_memory.py +++ b/virttest/staging/utils_memory.py @@ -9,6 +9,8 @@ from virttest import kernel_interface +LOG = logging.getLogger('avocado.' + __name__) + # Returns total memory in kb def read_from_meminfo(key, session=None): @@ -358,7 +360,7 @@ def get_buddy_info(chunk_sizes, nodes="all", zones="all", session=None): re_buddyinfo += "(%s)" % "|".join(nodes.split()) if not re.findall(re_buddyinfo, buddy_info_content): - logging.warn("Can not find Nodes %s" % nodes) + LOG.warn("Can not find Nodes %s" % nodes) return None re_buddyinfo += ".*?zone\s+" if zones == "all": @@ -366,7 +368,7 @@ def get_buddy_info(chunk_sizes, nodes="all", zones="all", session=None): else: re_buddyinfo += "(%s)" % "|".join(zones.split()) if not re.findall(re_buddyinfo, buddy_info_content): - logging.warn("Can not find zones %s" % zones) + LOG.warn("Can not find zones %s" % zones) return None re_buddyinfo += "\s+([\s\d]+)" diff --git a/virttest/step_editor.py b/virttest/step_editor.py index e5d9e9fa03..9ec4d4f8b6 100755 --- a/virttest/step_editor.py +++ b/virttest/step_editor.py @@ -24,8 +24,10 @@ from virttest import ppm_utils -# General utilities +LOG = logging.getLogger('avocado.' + __name__) + +# General utilities def corner_and_size_clipped(startpoint, endpoint, width, height): limits = width, height c0 = startpoint[:] @@ -551,8 +553,8 @@ def set_image(self, w, h, data): def set_image_from_file(self, filename): if not ppm_utils.image_verify_ppm_file(filename): - logging.warning("set_image_from_file: Warning: received invalid" - "screendump file") + LOG.warning("set_image_from_file: Warning: received invalid" + "screendump file") return self.clear_image() (w, h, data) = ppm_utils.image_read_from_ppm_file(filename) self.set_image(w, h, data) diff --git a/virttest/storage.py b/virttest/storage.py index 16e12adefd..d1af2710f8 100644 --- a/virttest/storage.py +++ b/virttest/storage.py @@ -32,6 +32,8 @@ from virttest import nvme from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + def preprocess_images(bindir, params, env): # Clone master image form vms. @@ -233,7 +235,7 @@ def get_image_filename(params, root_dir, basename=False): user, port, host_key_check) return get_image_filename_filesytem(params, root_dir, basename=basename) else: - logging.warn("image_name parameter not set.") + LOG.warn("image_name parameter not set.") def get_image_filename_filesytem(params, root_dir, basename=False): @@ -722,13 +724,12 @@ def copy_nfs_image(params, root_dir, basename=False): if(not os.path.isfile(dst) or utils_misc.get_image_info(dst)['lcounts'].lower() == "true"): source = get_image_filename(params, root_dir) - logging.debug("Checking for image available in image data " - "path - %s", source) + LOG.debug("Checking for image available in image data " + "path - %s", source) # check for image availability in images data directory if(os.path.isfile(source) and not utils_misc.get_image_info(source)['lcounts'].lower() == "true"): - logging.debug("Copying guest image from %s to %s", source, - dst) + LOG.debug("Copying guest image from %s to %s", source, dst) shutil.copy(source, dst) else: raise exceptions.TestSetupFail("Guest image is unavailable" @@ -874,8 +875,8 @@ def get_backup_set(filename, backup_dir, action, good): basename = os.path.basename(filename) bkp_set = [] if action not in ('backup', 'restore'): - logging.error("No backup sets for action: %s, state: %s", - action, good) + LOG.error("No backup sets for action: %s, state: %s", + action, good) return bkp_set if good: src = filename @@ -927,8 +928,8 @@ def get_backup_set(filename, backup_dir, action, good): s = os.statvfs(backup_dir) image_dir_free_disk_size = s.f_bavail * s.f_bsize - logging.info("backup image size: %d, available size: %d.", - backup_size, image_dir_free_disk_size) + LOG.info("backup image size: %d, available size: %d.", + backup_size, image_dir_free_disk_size) if not self.is_disk_size_enough(backup_size, image_dir_free_disk_size): return @@ -947,8 +948,7 @@ def get_backup_set(filename, backup_dir, action, good): for src, dst in backup_set: if action == 'backup' and skip_existing and os.path.exists(dst): - logging.debug("Image backup %s already exists, skipping...", - dst) + LOG.debug("Image backup %s already exists, skipping...", dst) continue backup_func(src, dst) @@ -964,11 +964,11 @@ def rm_backup_image(self): self.params.get("backup_dir", "")) image_name = os.path.join(backup_dir, "%s.backup" % os.path.basename(self.image_filename)) - logging.debug("Removing image file %s as requested", image_name) + LOG.debug("Removing image file %s as requested", image_name) if os.path.exists(image_name): os.unlink(image_name) else: - logging.warning("Image file %s not found", image_name) + LOG.warning("Image file %s not found", image_name) def save_image(self, params, filename, root_dir=None): """ @@ -1000,7 +1000,7 @@ def save_image(self, params, filename, root_dir=None): ) s = os.statvfs(root_dir) image_dir_free_disk_size = s.f_bavail * s.f_bsize - logging.info("Checking disk size on %s.", root_dir) + LOG.info("Checking disk size on %s.", root_dir) if not self.is_disk_size_enough(backup_size, image_dir_free_disk_size): return @@ -1012,11 +1012,11 @@ def is_disk_size_enough(required, available): """Check if available disk size is enough for the data copy.""" minimum_disk_free = 1.2 * required if available < minimum_disk_free: - logging.error("Free space: %s MB", (available / 1048576.)) - logging.error("Backup size: %s MB", (required / 1048576.)) - logging.error("Minimum free space acceptable: %s MB", - (minimum_disk_free / 1048576.)) - logging.error("Available disk space is not enough. Skipping...") + LOG.error("Free space: %s MB", (available / 1048576.)) + LOG.error("Backup size: %s MB", (required / 1048576.)) + LOG.error("Minimum free space acceptable: %s MB", + (minimum_disk_free / 1048576.)) + LOG.error("Available disk space is not enough. Skipping...") return False return True @@ -1029,18 +1029,18 @@ def copy_data_raw(src, dst): if os.path.exists(src): process.system("dd if=%s of=%s bs=4k conv=sync" % (src, dst)) else: - logging.info("No source %s, skipping dd...", src) + LOG.info("No source %s, skipping dd...", src) @staticmethod def copy_data_file(src, dst): """Copy for files.""" if os.path.isfile(src): - logging.debug("Copying %s -> %s", src, dst) + LOG.debug("Copying %s -> %s", src, dst) _dst = dst + '.part' shutil.copy(src, _dst) os.rename(_dst, dst) else: - logging.info("No source file %s, skipping copy...", src) + LOG.info("No source file %s, skipping copy...", src) @staticmethod def clone_image(params, vm_name, image_name, root_dir): @@ -1069,7 +1069,7 @@ def clone_image(params, vm_name, image_name, root_dir): image_fn = get_image_filename(image_params, root_dir) force_clone = params.get("force_image_clone", "no") if not os.path.exists(image_fn) or force_clone == "yes": - logging.info("Clone master image for vms.") + LOG.info("Clone master image for vms.") process.run(params.get("image_clone_command") % (m_image_fn, image_fn)) params["image_name_%s" % vm_name] = vm_image_name @@ -1094,11 +1094,11 @@ def rm_cloned_image(params, vm_name, image_name, root_dir): image_fn = get_image_filename(image_params, root_dir) - logging.debug("Removing vm specific image file %s", image_fn) + LOG.debug("Removing vm specific image file %s", image_fn) if os.path.exists(image_fn): process.run(params.get("image_remove_command") % (image_fn)) else: - logging.debug("Image file %s not found", image_fn) + LOG.debug("Image file %s not found", image_fn) class Rawdev(object): diff --git a/virttest/syslog_server.py b/virttest/syslog_server.py index 493a3a5489..478b3a6fd9 100644 --- a/virttest/syslog_server.py +++ b/virttest/syslog_server.py @@ -10,6 +10,8 @@ SYSLOG_PORT = 514 DEFAULT_FORMAT = '[AutotestSyslog (%s.%s)] %s' +LOG = logging.getLogger('avocado.' + __name__) + def set_default_format(message_format): ''' @@ -128,7 +130,7 @@ def log(self, data, message_format=None): pri = int(match.groups()[0]) msg = match.groups()[1] (facility_name, priority_name) = self.decodeFacilityPriority(pri) - logging.debug(message_format, facility_name, priority_name, msg) + LOG.debug(message_format, facility_name, priority_name, msg) class RequestHandlerTcp(RequestHandler): @@ -182,5 +184,5 @@ def syslog_server(address='', port=SYSLOG_PORT, if __name__ == '__main__': - logging.basicConfig(level=logging.DEBUG) + LOG.setLevel(logging.DEBUG) syslog_server() diff --git a/virttest/test_setup.py b/virttest/test_setup.py index b4c5bbd7f5..9017dc136a 100644 --- a/virttest/test_setup.py +++ b/virttest/test_setup.py @@ -44,6 +44,8 @@ ARCH = platform.machine() +LOG = logging.getLogger('avocado.' + __name__) + class THPError(Exception): @@ -203,7 +205,7 @@ def do_cleanup(self): try: self.__setupers.pop().cleanup() except Exception as err: - logging.error(str(err)) + LOG.error(str(err)) errors.append(str(err)) return errors @@ -244,7 +246,7 @@ def __init__(self, test, params, session=None): self.file_list_str = [] # List of files that contain integer config values self.file_list_num = [] - logging.info("Scanning THP base path and recording base values") + LOG.info("Scanning THP base path and recording base values") for f in os.walk(self.thp_path): base_dir = f[0] if f[2]: @@ -252,8 +254,7 @@ def __init__(self, test, params, session=None): f_dir = kernel_interface.SysFS(os.path.join(base_dir, name), session=self.session) parameter = str(f_dir.sys_fs_value).strip("[]") - logging.debug("Reading path %s: %s", f_dir.sys_fs, - parameter) + LOG.debug("Reading path %s: %s", f_dir.sys_fs, parameter) try: # Verify if the path in question is writable f = open(f_dir.sys_fs, 'w') @@ -274,10 +275,9 @@ def set_env(self): Applies test configuration on the host. """ if self.test_config: - logging.info("Applying custom THP test configuration") + LOG.info("Applying custom THP test configuration") for path in list(self.test_config.keys()): - logging.info("Writing path %s: %s", path, - self.test_config[path]) + LOG.info("Writing path %s: %s", path, self.test_config[path]) cfg_f = kernel_interface.SysFS(path, session=self.session) cfg_f.sys_fs_value = self.test_config[path] @@ -290,8 +290,8 @@ def check_status_with_value(action_list, file_name): Check the status of khugepaged when set value to specify file. """ for (act, ret) in action_list: - logging.info("Writing path %s: %s, expected khugepage rc: %s ", - file_name, act, ret) + LOG.info("Writing path %s: %s, expected khugepage rc: %s ", + file_name, act, ret) khugepage = kernel_interface.SysFS(file_name, session=self.session) khugepage.sys_fs_value = act try: @@ -307,7 +307,7 @@ def check_status_with_value(action_list, file_name): raise THPKhugepagedError("Khugepaged still alive when" "transparent huge page is " "disabled") - logging.info("Testing khugepaged") + LOG.info("Testing khugepaged") for file_path in self.file_list_str: action_list = [] thp = kernel_interface.SysFS(file_path, session=self.session) @@ -353,10 +353,9 @@ def cleanup(self): """: Restore the host's original configuration after test """ - logging.info("Restoring host's original THP configuration") + LOG.info("Restoring host's original THP configuration") for path in self.original_config: - logging.info("Writing path %s: %s", path, - self.original_config[path]) + LOG.info("Writing path %s: %s", path, self.original_config[path]) p_file = kernel_interface.SysFS(path, session=self.session) p_file.sys_fs_value = str(self.original_config[path]) @@ -389,8 +388,8 @@ def __init__(self, params): self.expected_hugepage_size = int( params.get("expected_hugepage_size", 0)) except TypeError: - logging.warn("Invalid value 'expected_hugepage_size=%s'", - params.get("expected_hugepage_size")) + LOG.warn("Invalid value 'expected_hugepage_size=%s'", + params.get("expected_hugepage_size")) self.expected_hugepage_size = 0 self.hugepage_cpu_flag = params.get("hugepage_cpu_flag") self.hugepage_match_str = params.get("hugepage_match_str") @@ -515,9 +514,9 @@ def get_target_hugepages(self): available_hugepages = available_hugepages - decreased_pages if target_hugepages > available_hugepages: - logging.warn("This test requires more huge pages than we" - " currently have, we'll try to allocate the" - " biggest number the system can support.") + LOG.warn("This test requires more huge pages than we" + " currently have, we'll try to allocate the" + " biggest number the system can support.") target_hugepages = available_hugepages available_mem = available_hugepages * self.hugepage_size self.suggest_mem = int(available_mem // self.vms // 1024 - @@ -547,10 +546,10 @@ def get_multi_supported_hugepage_size(self): hugepage_size = [] if os.path.isdir(self.pool_path): for path_name in os.listdir(self.pool_path): - logging.debug("path name is %s" % path_name) + LOG.debug("path name is %s" % path_name) if os.path.isdir("%s/%s" % (self.pool_path, path_name)): hugepage_size.append(path_name.split('-')[1][:-2]) - logging.debug(path_name.split('-')[1][:-2]) + LOG.debug(path_name.split('-')[1][:-2]) return hugepage_size else: raise ValueError("Root hugepage control sysfs directory %s did not" @@ -646,8 +645,8 @@ def set_hugepages(self): raise ValueError("Cannot set the kernel hugepage setting " "to the target value of %d hugepages." % self.target_hugepages) - logging.debug("Successfully set %s large memory pages on host ", - self.target_hugepages) + LOG.debug("Successfully set %s large memory pages on host ", + self.target_hugepages) @error_context.context_aware def mount_hugepage_fs(self): @@ -665,15 +664,15 @@ def mount_hugepage_fs(self): process.system(cmd) def setup(self): - logging.debug("Number of VMs this test will use: %d", self.vms) - logging.debug("Amount of memory used by each vm: %s", self.mem) - logging.debug("System setting for large memory page size: %s", - self.hugepage_size) + LOG.debug("Number of VMs this test will use: %d", self.vms) + LOG.debug("Amount of memory used by each vm: %s", self.mem) + LOG.debug("System setting for large memory page size: %s", + self.hugepage_size) if self.over_commit.proc_fs_value > 0: - logging.debug("Number of overcommit large memory pages will be set" - " for this test: %s", self.over_commit.proc_fs_value) - logging.debug("Number of large memory pages needed for this test: %s", - self.target_hugepages) + LOG.debug("Number of overcommit large memory pages will be set" + " for this test: %s", self.over_commit.proc_fs_value) + LOG.debug("Number of large memory pages needed for this test: %s", + self.target_hugepages) # Drop caches to clean some usable memory with open("/proc/sys/vm/drop_caches", "w") as caches: caches.write('3') @@ -697,7 +696,7 @@ def cleanup(self): process.system("echo 0 > %s" % self.kernel_hp_file, shell=True) self.over_commit.proc_fs_value = 0 self.ext_hugepages_surp = utils_memory.get_num_huge_pages_surp() - logging.debug("Hugepage memory successfully deallocated") + LOG.debug("Hugepage memory successfully deallocated") class KSMConfig(object): @@ -892,8 +891,8 @@ def _start_dhcp_server(self): data_dir.get_tmp_dir(), 'r').read()) except ValueError: raise PrivateBridgeError(self.brname) - logging.debug("Started internal DHCP server with PID %s", - self.dhcp_server_pid) + LOG.debug("Started internal DHCP server with PID %s", + self.dhcp_server_pid) def _verify_bridge(self): if not self._br_exist(): @@ -911,7 +910,7 @@ def setup(self): self._bring_bridge_down() self._remove_bridge() if not self._br_exist(): - logging.info("Configuring KVM test private bridge %s", self.brname) + LOG.info("Configuring KVM test private bridge %s", self.brname) try: self._add_bridge() except Exception: @@ -978,11 +977,11 @@ def _remove_bridge(self): try: self.bridge_manager.del_bridge(self.brname) except: - logging.warning("Failed to delete private bridge") + LOG.warning("Failed to delete private bridge") def cleanup(self): if self._br_exist() and not self._br_in_use(): - logging.debug( + LOG.debug( "Cleaning up KVM test private bridge %s", self.brname) self._stop_dhcp_server() self._disable_nat() @@ -1195,29 +1194,29 @@ def _release_dev(self, pci_id): drv_path = os.path.join(base_dir, "devices/%s/driver" % pci_id) if self.device_driver in os.readlink(drv_path): error_context.context( - "Release device %s to host" % pci_id, logging.info) + "Release device %s to host" % pci_id, LOG.info) stub_path = os.path.join(base_dir, "drivers/%s" % self.device_driver) cmd = "echo '%s' > %s/unbind" % (pci_id, stub_path) - logging.info("Run command in host: %s" % cmd) + LOG.info("Run command in host: %s" % cmd) try: output = None output = process.run(cmd, shell=True, timeout=60).stdout_text except Exception: msg = "Command %s fail with output %s" % (cmd, output) - logging.error(msg) + LOG.error(msg) return False drivers_probe = os.path.join(base_dir, "drivers_probe") cmd = "echo '%s' > %s" % (pci_id, drivers_probe) - logging.info("Run command in host: %s" % cmd) + LOG.info("Run command in host: %s" % cmd) try: output = None output = process.run(cmd, shell=True, timeout=60).stdout_text except Exception: msg = "Command %s fail with output %s" % (cmd, output) - logging.error(msg) + LOG.error(msg) return False if self.is_binded_to_stub(pci_id): return False @@ -1328,12 +1327,12 @@ def get_vf_devs(self, devices=None): vf_ids = [] if not devices: devices = self.devices - logging.info("devices = %s", devices) + LOG.info("devices = %s", devices) for device in devices: if device['type'] == 'vf': name = device.get('name', None) vf_id = self._get_vf_pci_id(name) - logging.info("vf_id = %s", vf_id) + LOG.info("vf_id = %s", vf_id) if not vf_id: continue vf_ids.append(vf_id) @@ -1379,9 +1378,9 @@ def get_devs(self, devices=None): if isinstance(devices, dict): devices = [devices] pf_ids = self.get_pf_devs(devices) - logging.info("pf_ids = %s", pf_ids) + LOG.info("pf_ids = %s", pf_ids) vf_ids = self.get_vf_devs(devices) - logging.info("vf_ids = %s", vf_ids) + LOG.info("vf_ids = %s", vf_ids) vf_ids.sort() dev_ids = [] @@ -1402,7 +1401,7 @@ def get_devs(self, devices=None): "devices/%s/driver" % dev_id)) self.dev_unbind_drivers[dev_id] = unbind_driver if len(dev_ids) != len(devices): - logging.error("Did not get enough PCI Device") + LOG.error("Did not get enough PCI Device") return dev_ids def get_vfs_count(self): @@ -1414,7 +1413,7 @@ def get_vfs_count(self): # that if the host has more than one 82576 card. PCI_ID? cmd = "lspci | grep '%s' | wc -l" % self.vf_filter_re vf_num = int(process.run(cmd, shell=True, verbose=False).stdout_text) - logging.info("Found %s vf in host", vf_num) + LOG.info("Found %s vf in host", vf_num) return vf_num def get_same_group_devs(self, pci_id): @@ -1465,8 +1464,7 @@ def assign_static_ip(self): ifname = utils_misc.get_interface_from_pci_id(PF) ip_assign = "ifconfig %s %s netmask %s up" % ( ifname, ip_addr, self.net_mask) - logging.info("assign IP to PF device %s : %s", PF, - ip_assign) + LOG.info("assign IP to PF device %s : %s", PF, ip_assign) cmd = process.system(ip_assign, shell=True, ignore_status=True) if cmd: raise exceptions.TestSetupFail("Failed to assign IP : %s" @@ -1491,7 +1489,7 @@ def get_controller_type(self): cmd = "lspci | grep '%s'| grep -o '\s[A-Z].*:\s'" % self.pf_filter_re return process.run(cmd, shell=True).stdout_text.split("\n")[-1].strip().strip(':') except IndexError: - logging.debug("Unable to fetch the controller details") + LOG.debug("Unable to fetch the controller details") return None def is_binded_to_stub(self, full_id): @@ -1522,8 +1520,8 @@ def set_linkvf_ib(self): cmd = "ls -R /sys/class/infiniband/*/device/sriov/*" dev = process.run(cmd, shell=True).stdout_text except process.CmdError as detail: - logging.error("No VF's found for set-up, command-failed as: %s", - str(detail)) + LOG.error("No VF's found for set-up, command-failed as: %s", + str(detail)) return False for line in dev.split('\n\n'): key = line.split(':')[0] @@ -1535,7 +1533,7 @@ def set_linkvf_ib(self): value[attr] = self.generate_ib_port_id() pids[key] = value for key in pids.keys(): - logging.info("The key %s corresponds to %s", key, pids[key]) + LOG.info("The key %s corresponds to %s", key, pids[key]) for subkey in pids[key].keys(): status = process.system("echo %s > %s" % (pids[key][subkey], os.path.join(key, subkey)), shell=True) @@ -1553,7 +1551,7 @@ def set_vf(self, pci_pf, vf_no="0"): """ cmd = "echo %s > /sys/bus/pci/devices/%s/sriov_numvfs" % (vf_no, pci_pf) if process.system(cmd, shell=True, ignore_status=True): - logging.debug("Failed to set %s vfs in %s", vf_no, pci_pf) + LOG.debug("Failed to set %s vfs in %s", vf_no, pci_pf) return False # When the VFs loaded on a PF are > 10 [I have tested till 63 which is # max VF supported by Mellanox CX4 cards],VFs probe on host takes bit @@ -1578,13 +1576,13 @@ def remove_driver(self, driver=None): cmd = "modprobe -r %s" % driver if ARCH == 'ppc64le' and driver == 'mlx5_core': pf_devices = self.get_pf_ids() - logging.info("Mellanox PF devices '%s'", pf_devices) + LOG.info("Mellanox PF devices '%s'", pf_devices) for PF in pf_devices: if not self.set_vf(PF): return False cmd = "rmmod mlx5_ib;modprobe -r mlx5_core;modprobe mlx5_ib" if process.system(cmd, ignore_status=True, shell=True): - logging.debug("Failed to remove driver: %s", driver) + LOG.debug("Failed to remove driver: %s", driver) return False return True @@ -1599,10 +1597,10 @@ def modprobe_driver(self, driver=None): if not driver: driver = self.driver msg = "Loading the driver '%s'" % driver - error_context.context(msg, logging.info) + error_context.context(msg, LOG.info) cmd = "modprobe %s" % driver if process.system(cmd, ignore_status=True, shell=True): - logging.debug("Failed to modprobe driver: %s", driver) + LOG.debug("Failed to modprobe driver: %s", driver) return False return True @@ -1620,13 +1618,13 @@ def sr_iov_setup(self): # Check if the host support interrupt remapping. On PowerPC interrupt # remapping is not required error_context.context("Set up host env for PCI assign test", - logging.info) + LOG.info) if ARCH != 'ppc64le': kvm_re_probe = True dmesg = process.run("dmesg", verbose=False).stdout_text ecap = re.findall("ecap\s+(.\w+)", dmesg) if not ecap: - logging.error("Fail to check host interrupt remapping support") + LOG.error("Fail to check host interrupt remapping support") else: if int(ecap[0], 16) & 8 == 8: # host support interrupt remapping. @@ -1639,11 +1637,11 @@ def sr_iov_setup(self): if kvm_re_probe and self.auai_path: cmd = "echo Y > %s" % self.auai_path error_context.context("enable PCI passthrough with '%s'" % cmd, - logging.info) + LOG.info) try: process.system(cmd) except Exception: - logging.debug( + LOG.debug( "Can not enable the interrupt remapping support") lnk = "/sys/module/vfio_iommu_type1/parameters/allow_unsafe_interrupts" if self.device_driver == "vfio-pci": @@ -1655,7 +1653,7 @@ def sr_iov_setup(self): if not ecap or (int(ecap[0], 16) & 8 != 8): cmd = "echo Y > %s" % lnk error_context.context("enable PCI passthrough with '%s'" % cmd, - logging.info) + LOG.info) process.run(cmd) else: if self.device_driver == "vfio-pci": @@ -1701,8 +1699,8 @@ def sr_iov_setup(self): ignore_status=True, verbose=False).stdout_text file_name = "host_dmesg_after_load_%s.txt" % self.driver - logging.info("Log dmesg after loading '%s' to '%s'.", self.driver, - file_name) + LOG.info("Log dmesg after loading '%s' to '%s'.", self.driver, + file_name) utils_misc.log_line(file_name, dmesg) self.setup = None return True @@ -1720,18 +1718,18 @@ def sr_iov_cleanup(self): # Check if the host support interrupt remapping. On PowerPC interrupt # remapping is not required error_context.context( - "Clean up host env after PCI assign test", logging.info) + "Clean up host env after PCI assign test", LOG.info) if ARCH != 'ppc64le': if self.kvm_params is not None: for kvm_param, value in list(self.kvm_params.items()): if open(kvm_param, "r").read().strip() != value: cmd = "echo %s > %s" % (value, kvm_param) - logging.info("Write '%s' to '%s'", value, kvm_param) + LOG.info("Write '%s' to '%s'", value, kvm_param) try: process.system(cmd) except Exception: - logging.error("Failed to write '%s' to '%s'", value, - kvm_param) + LOG.error("Failed to write '%s' to '%s'", value, + kvm_param) re_probe = False # if lsmod lists the driver then remove it to clean up @@ -1763,14 +1761,14 @@ def request_devs(self, devices=None): base_dir = "/sys/bus/pci" stub_path = os.path.join(base_dir, "drivers/%s" % self.device_driver) self.pci_ids = self.get_devs(devices) - logging.info("The following pci_ids were found: %s", self.pci_ids) + LOG.info("The following pci_ids were found: %s", self.pci_ids) requested_pci_ids = [] # Setup all devices specified for assignment to guest for p_id in self.pci_ids: if self.device_driver == "vfio-pci": pci_ids = self.get_same_group_devs(p_id) - logging.info( + LOG.info( "Following devices are in same group: %s", pci_ids) else: pci_ids = [p_id] @@ -1791,7 +1789,7 @@ def request_devs(self, devices=None): # Judge whether the device driver has been binded to stub if not self.is_binded_to_stub(pci_id): error_context.context("Bind device %s to stub" % pci_id, - logging.info) + LOG.info) # On Power architecture using short id would result in # pci device lookup failure while writing vendor id to # stub_new_id/stub_remove_id. Instead we should be using @@ -1813,21 +1811,20 @@ def request_devs(self, devices=None): for content, f_name in info_write_to_files: try: - logging.info("Write '%s' to file '%s'", content, - f_name) + LOG.info("Write '%s' to file '%s'", content, f_name) with open(f_name, 'w') as fn: fn.write(content) except IOError: - logging.debug("Failed to write %s to file %s", - content, f_name) + LOG.debug("Failed to write %s to file %s", + content, f_name) continue if not self.is_binded_to_stub(pci_id): - logging.error( + LOG.error( "Binding device %s to stub failed", pci_id) continue else: - logging.debug("Device %s already binded to stub", pci_id) + LOG.debug("Device %s already binded to stub", pci_id) requested_pci_ids.append(p_id) return requested_pci_ids @@ -1840,10 +1837,10 @@ def release_devs(self): try: for pci_id in self.dev_drivers: if not self._release_dev(pci_id): - logging.error( + LOG.error( "Failed to release device %s to host", pci_id) else: - logging.info("Released device %s successfully", pci_id) + LOG.info("Released device %s successfully", pci_id) if self.cleanup: self.sr_iov_cleanup() self.devices = [] @@ -2047,7 +2044,7 @@ def _get_one_rule(action_lookup_list, lookup_oper): # replace 'RULE' with rules in polkit template string self.template = template.replace('RULE', rules) - logging.debug("The polkit config rule is:\n%s" % self.template) + LOG.debug("The polkit config rule is:\n%s" % self.template) # write the config file genio.write_file(self.polkit_rules_path, self.template) @@ -2067,7 +2064,7 @@ def setup(self): cmd = "id %s" % self.user if process.system(cmd, ignore_status=True): self.params['add_polkit_user'] = 'yes' - logging.debug("Create new user '%s' on host." % self.user) + LOG.debug("Create new user '%s' on host." % self.user) cmd = "useradd %s" % self.user process.system(cmd, ignore_status=True) self._set_polkit_conf() @@ -2087,7 +2084,7 @@ def cleanup(self): if self.user.count('EXAMPLE'): self.user = 'testacl' if self.params.get('add_polkit_user'): - logging.debug("Delete the created user '%s'." % self.user) + LOG.debug("Delete the created user '%s'." % self.user) cmd = "userdel -r %s" % self.user process.system(cmd, ignore_status=True) del self.params['add_polkit_user'] @@ -2143,7 +2140,7 @@ def startup(self, socket): msg = "Unable to start egd.pl on localhost '%s'" % details raise EGDConfigError(msg) pid = self.get_pid(socket) - logging.info("egd.pl started as pid: %s" % pid) + LOG.info("egd.pl started as pid: %s" % pid) return pid def install(self): @@ -2192,7 +2189,7 @@ def setup(self): def cleanup(self): try: for pid in self.env.data["egd_pids"]: - logging.info("Stop egd.pl(%s)" % pid) + LOG.info("Stop egd.pl(%s)" % pid) utils_misc.signal_pid(pid, 15) def _all_killed(): @@ -2203,7 +2200,7 @@ def _all_killed(): # wait port released by egd.pl wait.wait_for(_all_killed, timeout=60) except OSError: - logging.warn("egd.pl is running") + LOG.warn("egd.pl is running") class StraceQemu(object): @@ -2274,7 +2271,7 @@ def stop(self): while self.env.get("strace_processes"): pid = self.env.get("strace_processes").pop() if process.pid_exists(pid): - logging.info("stop strace process: %s" % pid) + LOG.info("stop strace process: %s" % pid) process.kill_process_tree(pid) self._compress_log() @@ -2338,14 +2335,13 @@ def switch_indep_threads_mode(state="Y", params=None): "to %s: %s" % (state, cmd_output[1])) else: - logging.debug("indep_thread_mode turned %s successfully " - "in remote server", state) + LOG.debug("indep_thread_mode turned %s successfully " + "in remote server", state) else: try: utils_misc.verify_running_as_root() process.run(cmd, verbose=True, shell=True) - logging.debug("indep_thread_mode turned %s successfully", - state) + LOG.debug("indep_thread_mode turned %s successfully", state) except process.CmdError as info: raise exceptions.TestSetupFail("Unable to turn " "indep_thread_mode to " @@ -2395,13 +2391,12 @@ def switch_smt(state="off", params=None): raise exceptions.TestSetupFail("Unable to turn %s SMT :%s" % (state, cmd_output[1])) else: - logging.debug("SMT turned %s successfully in remote server", - state) + LOG.debug("SMT turned %s successfully in remote server", state) else: try: utils_misc.verify_running_as_root() process.run(cmd, verbose=True, shell=True) - logging.debug("SMT turned %s successfully", state) + LOG.debug("SMT turned %s successfully", state) except process.CmdError as info: raise exceptions.TestSetupFail("Unable to turn %s SMT :%s" % (state, info)) @@ -2466,7 +2461,7 @@ def enable(self): "libvirtd.log") # param used during libvirtd cleanup self.test.params["libvirtd_debug_file"] = self.log_file - logging.debug("libvirtd debug log stored in: %s", self.log_file) + LOG.debug("libvirtd debug log stored in: %s", self.log_file) for value in self.daemons_dict.values(): if os.path.isfile(value.get("backupfile")): @@ -2515,7 +2510,7 @@ def _set(self): # get default ulimit values in tuple (soft, hard) self.ulimit[key] = resource.getrlimit(self.ulimit_options[key]) - logging.info("Setting ulimit %s to %s." % (key, set_value)) + LOG.info("Setting ulimit %s to %s." % (key, set_value)) if set_value == "ulimited": set_value = resource.RLIM_INFINITY elif set_value.isdigit(): @@ -2531,7 +2526,7 @@ def _set(self): def _restore(self): for key in self.ulimit: - logging.info("Setting ulimit %s back to its default." % key) + LOG.info("Setting ulimit %s back to its default." % key) resource.setrlimit(self.ulimit_options[key], self.ulimit[key]) def setup(self): diff --git a/virttest/tests/unattended_install.py b/virttest/tests/unattended_install.py index eb962a4375..420813fa37 100644 --- a/virttest/tests/unattended_install.py +++ b/virttest/tests/unattended_install.py @@ -50,6 +50,8 @@ _syslog_server_thread = None _syslog_server_thread_event = None +LOG = logging.getLogger('avocado.' + __name__) + def start_auto_content_server_thread(port, path): global _url_auto_content_server_thread @@ -129,8 +131,8 @@ def get_answer_file_path(self, filename): def close(self): os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) - logging.debug("unattended http server %s successfully created", - self.get_url()) + LOG.debug("unattended http server %s successfully created", + self.get_url()) class UnattendedInstallConfig(object): @@ -326,7 +328,7 @@ def get_driver_hardware_id(self, driver, run_cmd=True): hwid = '^&'.join(hwid.split('&')) return hwid except Exception as e: - logging.error("Fail to get hardware id with exception: %s" % e) + LOG.error("Fail to get hardware id with exception: %s" % e) @error_context.context_aware def update_driver_hardware_id(self, driver): @@ -453,9 +455,9 @@ def answer_kickstart(self, answer_path): if self.params.get("cmd_only_use_disk"): insert_info = self.params.get("cmd_only_use_disk") + '\n' contents += insert_info - logging.debug("Unattended install contents:") + LOG.debug("Unattended install contents:") for line in contents.splitlines(): - logging.debug(line) + LOG.debug(line) with open(answer_path, 'w') as answer_file: answer_file.write(contents) @@ -466,8 +468,8 @@ def answer_windows_ini(self, answer_path): if self.cdkey: parser.set('UserData', 'ProductKey', self.cdkey) else: - logging.error("Param 'cdkey' required but not specified for " - "this unattended installation") + LOG.error("Param 'cdkey' required but not specified for " + "this unattended installation") # Now, replacing the virtio network driver path, under double quotes if self.install_virtio == 'yes': @@ -518,9 +520,9 @@ def answer_windows_ini(self, answer_path): fp = open(answer_path, 'r') contents = fp.read() fp.close() - logging.debug("Unattended install contents:") + LOG.debug("Unattended install contents:") for line in contents.splitlines(): - logging.debug(line) + LOG.debug(line) def answer_windows_xml(self, answer_path): doc = xml.dom.minidom.parse(self.unattended_file) @@ -537,8 +539,8 @@ def answer_windows_xml(self, answer_path): assert key_text.nodeType == doc.TEXT_NODE key_text.data = self.cdkey else: - logging.error("Param 'cdkey' required but not specified for " - "this unattended installation") + LOG.error("Param 'cdkey' required but not specified for " + "this unattended installation") # Now, replacing the virtio driver paths or removing the entire # component PnpCustomizationsWinPE Element Node @@ -609,9 +611,9 @@ def answer_windows_xml(self, answer_path): command_line_text.data = t contents = doc.toxml() - logging.debug("Unattended install contents:") + LOG.debug("Unattended install contents:") for line in contents.splitlines(): - logging.debug(line) + LOG.debug(line) fp = open(answer_path, 'w') doc.writexml(fp) @@ -622,9 +624,9 @@ def answer_suse_xml(self, answer_path): doc = xml.dom.minidom.parse(self.unattended_file) contents = doc.toxml() - logging.debug("Unattended install contents:") + LOG.debug("Unattended install contents:") for line in contents.splitlines(): - logging.debug(line) + LOG.debug(line) fp = open(answer_path, 'w') doc.writexml(fp) @@ -638,7 +640,7 @@ def preseed_initrd(self): way to get fully automated setup without resorting to kernel params is to add a preseed.cfg file at the root of the initrd image. """ - logging.debug("Remastering initrd.gz file with preseed file") + LOG.debug("Remastering initrd.gz file with preseed file") dest_fname = 'preseed.cfg' remaster_path = os.path.join(self.image_path, "initrd_remaster") if not os.path.isdir(remaster_path): @@ -660,9 +662,9 @@ def preseed_initrd(self): process.run("rm -rf initrd_remaster", verbose=DEBUG) contents = open(self.unattended_file).read() - logging.debug("Unattended install contents:") + LOG.debug("Unattended install contents:") for line in contents.splitlines(): - logging.debug(line) + LOG.debug(line) def set_unattended_param_in_kernel(self, unattended_file_url): ''' @@ -919,7 +921,7 @@ def setup_cdrom(self): utils_disk.cleanup(self.cdrom_cd1_mount) elif ((self.vm.driver_type == 'xen') and (self.params.get('hvm_or_pv') == 'pv')): - logging.debug("starting unattended content web server") + LOG.debug("starting unattended content web server") self.url_auto_content_port = utils_misc.find_free_port(8100, 8199, @@ -1003,8 +1005,7 @@ def setup_url(self): None): if os.path.isfile(self.kernel): os.remove(self.kernel) - logging.info('Downloading %s -> %s', url_kernel, - self.image_path) + LOG.info('Downloading %s -> %s', url_kernel, self.image_path) download.get_file(url_kernel, os.path.join(self.image_path, os.path.basename(self.kernel))) @@ -1012,8 +1013,7 @@ def setup_url(self): None): if os.path.isfile(self.initrd): os.remove(self.initrd) - logging.info('Downloading %s -> %s', url_initrd, - self.image_path) + LOG.info('Downloading %s -> %s', url_initrd, self.image_path) download.get_file(url_initrd, os.path.join(self.image_path, os.path.basename(self.initrd))) @@ -1028,12 +1028,12 @@ def setup_url(self): self.kernel_params + " ip=dhcp install=" + self.url) elif self.vm_type == 'libvirt': - logging.info("Not downloading vmlinuz/initrd.img from %s, " - "letting virt-install do it instead") + LOG.info("Not downloading vmlinuz/initrd.img from %s, " + "letting virt-install do it instead") else: - logging.info("No action defined/needed for the current virt " - "type: '%s'" % self.vm_type) + LOG.info("No action defined/needed for the current virt " + "type: '%s'" % self.vm_type) def setup_nfs(self): """ @@ -1076,7 +1076,7 @@ def setup(self): Uses an appropriate strategy according to each install model. """ - logging.info("Starting unattended install setup") + LOG.info("Starting unattended install setup") if DEBUG: utils_misc.display_attributes(self) @@ -1102,8 +1102,8 @@ def setup(self): if self.floppy or self.cdrom_unattended: self.setup_boot_disk() if self.params.get("store_boot_disk") == "yes": - logging.info("Storing the boot disk to result directory " - "for further debug") + LOG.info("Storing the boot disk to result directory " + "for further debug") src_dir = self.floppy or self.cdrom_unattended dst_dir = self.results_dir shutil.copy(src_dir, dst_dir) @@ -1145,8 +1145,8 @@ def terminate_syslog_server_thread(): def copy_file_from_nfs(src, dst, mount_point, image_name): - logging.info("Test failed before the install process start." - " So just copy a good image from nfs for following tests.") + LOG.info("Test failed before the install process start." + " So just copy a good image from nfs for following tests.") utils_misc.mount(src, mount_point, "nfs", perm="ro") image_src = utils_misc.get_path(mount_point, image_name) shutil.copy(image_src, dst) @@ -1169,7 +1169,7 @@ def string_in_serial_log(serial_log_file_path, string): serial_log_msg = serial_log_file.read() if string in serial_log_msg: - logging.debug("Message read from serial console log: %s", string) + LOG.debug("Message read from serial console log: %s", string) return True else: return False @@ -1198,7 +1198,7 @@ def attempt_to_log_useful_files(test, vm): try: console.cmd("true") except Exception as details: - logging.info("Skipping log_useful_files #%s: %s", i, details) + LOG.info("Skipping log_useful_files #%s: %s", i, details) continue failures = False for path_glob in ["/*.log", "/tmp/*.log", "/var/tmp/*.log", "/var/log/messages"]: @@ -1225,12 +1225,12 @@ def attempt_to_log_useful_files(test, vm): with open(dst, 'w') as fd_dst: try: fd_dst.write(console.cmd("cat %s" % path)) - logging.info('Attached "%s" log file from guest ' - 'at "%s"', path, base_dst_dir) + LOG.info('Attached "%s" log file from guest ' + 'at "%s"', path, base_dst_dir) except Exception as details: - logging.warning("Unknown exception while " - "attempt_to_log_useful_files(): " - "%s", details) + LOG.warning("Unknown exception while " + "attempt_to_log_useful_files(): " + "%s", details) fd_dst.write("Unknown exception while getting " "content: %s" % details) failures = True @@ -1240,12 +1240,11 @@ def attempt_to_log_useful_files(test, vm): with open(dst, 'w') as fd_dst: try: fd_dst.write(console.cmd(cmd)) - logging.info('Attached "%s" cmd output at "%s"', - cmd, dst) + LOG.info('Attached "%s" cmd output at "%s"', cmd, dst) except Exception as details: - logging.warning("Unknown exception while " - "attempt_to_log_useful_files(): " - "%s", details) + LOG.warning("Unknown exception while " + "attempt_to_log_useful_files(): " + "%s", details) fd_dst.write("Unknown exception while getting " "cmd output: %s" % details) failures = True @@ -1274,13 +1273,13 @@ def copy_images(): "Copy image from NFS after installation failure") image_copy_on_error = params.get("image_copy_on_error", "no") if image_copy_on_error == "yes": - logging.info("Running image_copy to copy pristine image from NFS.") + LOG.info("Running image_copy to copy pristine image from NFS.") try: error_context.context( "Quit qemu-kvm before copying guest image") vm.monitor.quit() except Exception as e: - logging.warn(e) + LOG.warn(e) from virttest import utils_test error_context.context("Copy image from NFS Server") image = params.get("images").split()[0] @@ -1303,7 +1302,7 @@ def copy_images(): dd_cmd = "dd if=/dev/zero of=%s bs=1M count=1" % dst txt = "iscsi used, need destroy data in %s" % dst txt += " by command: %s" % dd_cmd - logging.info(txt) + LOG.info(txt) process.system(dd_cmd) image_name = os.path.basename(dst) mount_point = params.get("dst_dir") @@ -1316,8 +1315,8 @@ def copy_images(): for media in params.get("copy_to_local", "").split(): media_path = params.get(media) if not media_path: - logging.warn("Media '%s' is not available, will not " - "be copied into local directory", media) + LOG.warn("Media '%s' is not available, will not " + "be copied into local directory", media) continue media_name = os.path.basename(media_path) nfs_link = utils_misc.get_path(vt_data_dir, media_path) @@ -1328,7 +1327,7 @@ def copy_images(): if file_hash == expected_hash: continue msg = "Copy %s to %s in local host." % (media_name, local_link) - error_context.context(msg, logging.info) + error_context.context(msg, LOG.info) download.get_file(nfs_link, local_link) params[media] = local_link @@ -1354,8 +1353,8 @@ def copy_images(): mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") - logging.info("Waiting for installation to finish. Timeout set to %d s " - "(%d min)", install_timeout, install_timeout // 60) + LOG.info("Waiting for installation to finish. Timeout set to %d s " + "(%d min)", install_timeout, install_timeout // 60) error_context.context("waiting for installation to finish") start_time = time.time() @@ -1364,8 +1363,8 @@ def copy_images(): if log_file is None: raise virt_vm.VMConfigMissingError(vm.name, "serial") - logging.debug("Monitoring serial console log for completion message: %s", - log_file) + LOG.debug("Monitoring serial console log for completion message: %s", + log_file) serial_read_fails = 0 # As the install process start, we may need collect information from @@ -1395,7 +1394,7 @@ def copy_images(): post_finish_str_found = string_in_serial_log( log_file, post_finish_str) except IOError: - logging.warn("Could not read final serial log file") + LOG.warn("Could not read final serial log file") else: if install_error_str_found: raise exceptions.TestFail(install_error_exception_str) @@ -1416,12 +1415,12 @@ def copy_images(): vm.start() break except: - logging.warn("Failed to start unattended install " - "image workaround reboot kickstart " - "parameter bug") + LOG.warn("Failed to start unattended install " + "image workaround reboot kickstart " + "parameter bug") # Print out the original exception before copying images. - logging.error(e) + LOG.error(e) copy_images() raise e else: @@ -1446,7 +1445,7 @@ def copy_images(): # Only make noise after several failed reads serial_read_fails += 1 if serial_read_fails > 10: - logging.warn( + LOG.warn( "Cannot read from serial log file after %d tries", serial_read_fails) else: @@ -1473,13 +1472,13 @@ def copy_images(): else: time.sleep(1) else: - logging.warn("Timeout elapsed while waiting for install to finish ") + LOG.warn("Timeout elapsed while waiting for install to finish ") attempt_to_log_useful_files(test, vm) copy_images() raise exceptions.TestFail("Timeout elapsed while waiting for install to " "finish") - logging.debug('cleaning up threads and mounts that may be active') + LOG.debug('cleaning up threads and mounts that may be active') global _url_auto_content_server_thread global _url_auto_content_server_thread_event if _url_auto_content_server_thread is not None: @@ -1503,18 +1502,18 @@ def copy_images(): _syslog_server_thread = None time_elapsed = time.time() - start_time - logging.info("Guest reported successful installation after %d s (%d min)", - time_elapsed, time_elapsed // 60) + LOG.info("Guest reported successful installation after %d s (%d min)", + time_elapsed, time_elapsed // 60) if params.get("shutdown_cleanly", "yes") == "yes": shutdown_cleanly_timeout = int(params.get("shutdown_cleanly_timeout", 120)) - logging.info("Wait for guest to shutdown cleanly") + LOG.info("Wait for guest to shutdown cleanly") if params.get("medium", "cdrom") == "import": vm.shutdown() try: if utils_misc.wait_for(vm.is_dead, shutdown_cleanly_timeout, 1, 1): - logging.info("Guest managed to shutdown cleanly") + LOG.info("Guest managed to shutdown cleanly") except qemu_monitor.MonitorError as e: - logging.warning("Guest apparently shut down, but got a " - "monitor error: %s", e) + LOG.warning("Guest apparently shut down, but got a " + "monitor error: %s", e) diff --git a/virttest/unittests/test_utils_test__init__.py b/virttest/unittests/test_utils_test__init__.py index f8f17ad4b8..f846684c3c 100644 --- a/virttest/unittests/test_utils_test__init__.py +++ b/virttest/unittests/test_utils_test__init__.py @@ -12,6 +12,8 @@ check_kernel_cmdline_mock = mock.MagicMock(return_value=["3", None]) +LOG = logging.getLogger('avocado.' + __name__) + @mock.patch('virttest.utils_package.package_install') @mock.patch.object(utils_test, 'check_kernel_cmdline', check_kernel_cmdline_mock) @@ -50,7 +52,7 @@ def test_cmd_fail(self, *mocks): with self.assertRaises(exceptions.TestError) as e: update_boot_option(self.vm, args_added="3", need_reboot=False) self.assertIsNotNone(e.exception.args[0]) - logging.error.assert_called_with(self.some_error_message) + LOG.error.assert_called_with(self.some_error_message) if __name__ == '__main__': diff --git a/virttest/utils_backup.py b/virttest/utils_backup.py index 3a3efd647f..2096f0fa92 100644 --- a/virttest/utils_backup.py +++ b/virttest/utils_backup.py @@ -17,6 +17,9 @@ from virttest.libvirt_xml.checkpoint_xml import CheckpointXML +LOG = logging.getLogger('avocado.' + __name__) + + class BackupError(Exception): """ @@ -195,7 +198,7 @@ def pull_incremental_backup_to_file(nbd_params, target_path, nbd_export = nbd_params.get("nbd_export", "vdb") tls_dir = nbd_params.get("tls_dir") cmd = "qemu-img create -f qcow2 %s %s" % (target_path, file_size) - logging.debug(process.run(cmd, shell=True).stdout_text) + LOG.debug(process.run(cmd, shell=True).stdout_text) map_from = "--image-opts driver=nbd,export=%s,server.type=%s" map_from += ",%s" map_from += ",x-dirty-bitmap=qemu:dirty-bitmap:%s" @@ -424,13 +427,13 @@ def enable_inc_backup_for_vm(vm, libvirt_ver=(7, 0, 0)): """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) if libvirt_version.version_compare(*libvirt_ver): - logging.debug("Incremental backup is enabled by default " - "in current libvirt version, no need to " - "update vm xml.") + LOG.debug("Incremental backup is enabled by default " + "in current libvirt version, no need to " + "update vm xml.") return vmxml - logging.debug("We need to redefine and start the vm to enable " - "incremental backup, please confirm if this effects your " - "other verification points.") + LOG.debug("We need to redefine and start the vm to enable " + "incremental backup, please confirm if this effects your " + "other verification points.") tree = ET.parse(vmxml.xml) root = tree.getroot() for elem in root.iter('domain'): diff --git a/virttest/utils_config.py b/virttest/utils_config.py index fd421136f5..534630c83b 100644 --- a/virttest/utils_config.py +++ b/virttest/utils_config.py @@ -10,6 +10,8 @@ from avocado.utils import distro +LOG = logging.getLogger('avocado.' + __name__) + class ConfigError(Exception): @@ -293,8 +295,8 @@ def __setattr__(self, key, value): try: set_func(key, value) except ValueError: - logging.warning("Key %s might not have type %s. Set raw " - "string instead.", key, key_type) + LOG.warning("Key %s might not have type %s. Set raw " + "string instead.", key, key_type) self.set_raw(key, value) super(LibvirtConfigCommon, self).__setattr__(key, value) diff --git a/virttest/utils_conn.py b/virttest/utils_conn.py index 7685f28aec..6448b52674 100644 --- a/virttest/utils_conn.py +++ b/virttest/utils_conn.py @@ -22,6 +22,8 @@ from virttest import utils_split_daemons from virttest import utils_iptables +LOG = logging.getLogger('avocado.' + __name__) + class ConnectionError(Exception): @@ -563,9 +565,9 @@ def __init__(self, *args, **dargs): try: tool = path.find_command(toolName) except path.CmdNotFoundError: - logging.debug("%s executable not set or found on path," - "some function of connection will fail.", - toolName) + LOG.debug("%s executable not set or found on path," + "some function of connection will fail.", + toolName) tool = '/bin/true' self.__dict_set__(key, tool) @@ -600,7 +602,7 @@ def conn_check(self): except aexpect.ShellError as detail: client_session.close() raise SSHCheckError(server_ip, detail) - logging.debug("Check the SSH to %s OK.", server_ip) + LOG.debug("Check the SSH to %s OK.", server_ip) def conn_recover(self): """ @@ -627,7 +629,7 @@ def conn_recover(self): server_session.close() raise ConnServerRestartError(detail) - logging.debug("SSH authentication recover successfully.") + LOG.debug("SSH authentication recover successfully.") def conn_setup(self, timeout=10): """ @@ -693,7 +695,7 @@ def conn_setup(self, timeout=10): raise ConnCmdClientError(cmd, detail) client_session.close() - logging.debug("SSH connection setup successfully.") + LOG.debug("SSH connection setup successfully.") class TCPConnection(ConnectionBase): @@ -808,7 +810,7 @@ def conn_recover(self): except (remote.LoginError, aexpect.ShellError) as detail: raise ConnServerRestartError(detail) - logging.debug("TCP connection recover successfully.") + LOG.debug("TCP connection recover successfully.") def conn_setup(self): """ @@ -899,7 +901,7 @@ def conn_setup(self): except (remote.LoginError, aexpect.ShellError) as detail: raise ConnServerRestartError(detail) - logging.debug("TCP connection setup successfully.") + LOG.debug("TCP connection setup successfully.") class TLSConnection(ConnectionBase): @@ -986,8 +988,8 @@ def __init__(self, *args, **dargs): try: CERTTOOL = path.find_command("certtool") except path.CmdNotFoundError: - logging.warning("certtool executable not set or found on path, " - "TLS connection will not setup normally") + LOG.warning("certtool executable not set or found on path, " + "TLS connection will not setup normally") CERTTOOL = '/bin/true' self.CERTTOOL = CERTTOOL @@ -1163,7 +1165,7 @@ def conn_recover(self): libvirtd_service.restart() except (remote.LoginError, aexpect.ShellError) as detail: raise ConnServerRestartError(detail) - logging.debug("TLS connection recover successfully.") + LOG.debug("TLS connection recover successfully.") def cert_recover(self): """ @@ -1204,7 +1206,7 @@ def cert_recover(self): raise ConnRmCertError(cert_path, output) server_session.close() - logging.debug("TLS certifications recover successfully.") + LOG.debug("TLS certifications recover successfully.") def conn_setup(self, server_setup=True, client_setup=True): """ @@ -1233,7 +1235,7 @@ def conn_setup(self, server_setup=True, client_setup=True): self.server_setup(on_local=True) self.close_session() - logging.debug("TLS connection setup successfully.") + LOG.debug("TLS connection setup successfully.") def server_setup(self, on_local=False): """ @@ -1823,14 +1825,14 @@ def __init__(self, *args, **dargs): session_user = self.server_user session_pwd = self.server_pwd self.run_on_remote = True - logging.debug('Unix Connection will be setup on remote host: {}.' - .format(session_ip)) + LOG.debug('Unix Connection will be setup on remote host: {}.' + .format(session_ip)) else: session_ip = self.client_ip session_user = self.client_user session_pwd = self.client_pwd - logging.debug('Unix Connection will be setup on local host: {}.' - .format(session_ip)) + LOG.debug('Unix Connection will be setup on local host: {}.' + .format(session_ip)) self.run_on_remote = False # Unable to get libvirt version via libvirt_version.version_compare @@ -1914,7 +1916,7 @@ def conn_recover(self): process.CmdError) as detail: raise ConnServerRestartError(detail) - logging.debug("UNIX connection recover successfully.") + LOG.debug("UNIX connection recover successfully.") def session_creator(self): """ @@ -2092,7 +2094,7 @@ def conn_setup(self): process.CmdError) as detail: raise ConnServerRestartError(detail) - logging.debug("UNIX connection setup successfully.") + LOG.debug("UNIX connection setup successfully.") class UNIXSocketConnection(ConnectionBase): @@ -2218,7 +2220,7 @@ def add_firewall_ports(self, session): """ firewall_cmd = utils_iptables.Firewall_cmd(session) for port_to_add in [self.desturi_port, self.migrateuri_port, self.disks_uri_port]: - logging.debug("add port: %s", port_to_add) + LOG.debug("add port: %s", port_to_add) firewall_cmd.add_port(port_to_add, 'tcp', firewalld_reload=False) def del_firewall_ports(self, session): @@ -2242,13 +2244,12 @@ def install_qemu_kvm_pp(self, session, qemu_kvm_pp_path, timeout=240): cmd = "semodule -l|grep qemu-kvm" status, output = session.cmd_status_output(cmd) if status: - logging.debug("Active qemu-kvm policy.") + LOG.debug("Active qemu-kvm policy.") cmd = "semodule -i %s" % qemu_kvm_pp_path status, output = session.cmd_status_output(cmd, timeout=timeout) if status: - logging.error("Unable to active SELinux policy module - " - "qemu-kvm! cmd: {} output: {}" - .format(cmd, output)) + LOG.error("Unable to active SELinux policy module - " + "qemu-kvm! cmd: {} output: {}".format(cmd, output)) else: self.remove_qemu_kvm_policy = True @@ -2261,7 +2262,7 @@ def uninstall_qemu_kvm_pp(self, session, timeout=240): :param timeout: Timeout to execute command lines """ if self.remove_qemu_kvm_policy: - logging.debug("Remove qemu-kvm policy.") + LOG.debug("Remove qemu-kvm policy.") cmd = "semodule -r qemu-kvm" status, output = session.cmd_status_output(cmd, timeout=timeout) if status: @@ -2286,7 +2287,7 @@ def conn_recover(self): self.clear_pmsocat(ignore_status=True) self.uninstall_qemu_kvm_pp(unix2tcp_session) - logging.debug("UNIX sockets recover successfully.") + LOG.debug("UNIX sockets recover successfully.") def conn_setup(self): """ @@ -2317,8 +2318,8 @@ def conn_setup(self): destination_ip) # FIXME: Need to modify SELinux context through pmsocat36.py? self.install_qemu_kvm_pp(unix2tcp_session, qemu_kvm_pp_path) - logging.debug("UNIX2TCP setup successfully.") + LOG.debug("UNIX2TCP setup successfully.") self.connect_to_unix_socket(tcp2unix_session, dest_pmsocat_path) self.add_firewall_ports(tcp2unix_session) - logging.debug("TCP2UNIX setup successfully.") + LOG.debug("TCP2UNIX setup successfully.") diff --git a/virttest/utils_disk.py b/virttest/utils_disk.py index 9b76d5d36e..b1418f1cbf 100644 --- a/virttest/utils_disk.py +++ b/virttest/utils_disk.py @@ -39,6 +39,8 @@ # Whether to print all shell commands called DEBUG = False +LOG = logging.getLogger('avocado.' + __name__) + def copytree(src, dst, overwrite=True, ignore=''): """ @@ -89,7 +91,7 @@ def is_mount(src, dst=None, fstype=None, options=None, verbose=False, else: mount_result = process.run(mount_list_cmd, shell=True).stdout_text if verbose: - logging.debug("/proc/mounts contents:\n%s", mount_result) + LOG.debug("/proc/mounts contents:\n%s", mount_result) for result in mount_result.splitlines(): if mount_str in result: @@ -99,14 +101,14 @@ def is_mount(src, dst=None, fstype=None, options=None, verbose=False, for op in options: if op not in options_result: if verbose: - logging.info("%s is not mounted with given" - " option %s", src, op) + LOG.info("%s is not mounted with given" + " option %s", src, op) return False if verbose: - logging.info("%s is mounted", src) + LOG.info("%s is mounted", src) return True if verbose: - logging.info("%s is not mounted", src) + LOG.info("%s is not mounted", src) return False @@ -155,7 +157,7 @@ def umount(src, dst, fstype=None, verbose=False, session=None): package = "psmisc" # check package is available, if not try installing it if not utils_package.package_install(package): - logging.error("%s is not available/installed for fuser", package) + LOG.error("%s is not available/installed for fuser", package) fuser_cmd = "fuser -km %s" % dst umount_cmd = "umount %s" % dst if session: @@ -308,21 +310,21 @@ def update_windows_disk_attributes(session, dids, timeout=120): online_cmd = ' echo online disk' online_cmd = _wrap_windows_cmd(online_cmd) for did in dids: - logging.info("Detail for 'Disk%s'" % did) + LOG.info("Detail for 'Disk%s'" % did) details = session.cmd_output(detail_cmd % did) if re.search("Read.*Yes", details, re.I | re.M): - logging.info("Clear readonly bit on 'Disk%s'" % did) + LOG.info("Clear readonly bit on 'Disk%s'" % did) status, output = session.cmd_status_output(set_rw_cmd % did, timeout=timeout) if status != 0: - logging.error("Can not clear readonly bit: %s" % output) + LOG.error("Can not clear readonly bit: %s" % output) return False if re.search("Status.*Offline", details, re.I | re.M): - logging.info("Online 'Disk%s'" % did) + LOG.info("Online 'Disk%s'" % did) status, output = session.cmd_status_output(online_cmd % did, timeout=timeout) if status != 0: - logging.error("Can not online disk: %s" % output) + LOG.error("Can not online disk: %s" % output) return False return True @@ -539,11 +541,11 @@ def clean_partition_linux(session, did, timeout=360): partition_numbers = session.cmd_output(list_partition_number % did) ignore_err_msg = "unrecognised disk label" if ignore_err_msg in partition_numbers: - logging.info("no partition to clean on %s" % did) + LOG.info("no partition to clean on %s" % did) else: partition_numbers = partition_numbers.splitlines() for number in partition_numbers: - logging.info("remove partition %s on %s" % (number, did)) + LOG.info("remove partition %s on %s" % (number, did)) session.cmd(rm_cmd % (did, number)) session.cmd("partprobe /dev/%s" % did, timeout=timeout) regex = r'/block/%s/\S+\s^' % did @@ -1098,7 +1100,7 @@ def get_parts_list(session=None): parts_line = line.rsplit() if len(parts_line) == 4: parts.append(parts_line[3]) - logging.debug("Find parts: %s", parts) + LOG.debug("Find parts: %s", parts) return parts @@ -1112,7 +1114,7 @@ def get_added_parts(session, old_parts): """ new_parts = get_parts_list(session) added_parts = list(set(new_parts).difference(set(old_parts))) - logging.info("Added parts:%s", added_parts) + LOG.info("Added parts:%s", added_parts) return added_parts @@ -1150,7 +1152,7 @@ def get_disk_by_serial(serial_str, session=None): else: status = process.run(cmd, shell=True, ignore_status=True).exit_status if not status: - logging.debug("Disk %s has serial %s", disk, serial_str) + LOG.debug("Disk %s has serial %s", disk, serial_str) return disk @@ -1164,7 +1166,7 @@ def check_remote_vm_disks(params): remote_vm_obj.check_network() remote_vm_obj.setup_ssh_auth() disks = get_linux_disks(remote_vm_obj, False) - logging.debug("Get disks in remote VM: %s", disks) + LOG.debug("Get disks in remote VM: %s", disks) for disk in disks.keys(): linux_disk_check(remote_vm_obj, disk) @@ -1183,7 +1185,7 @@ def dd_data_to_vm_disk(session, disk, bs='1M', seek='0', count='100'): dd_cmd = "dd if=/dev/urandom of=%s bs=%s seek=%s count=%s; sync" dd_cmd %= (disk, bs, seek, count) output = session.cmd_output(dd_cmd).strip() - logging.debug("Using dd to generate data to %s: %s", disk, output) + LOG.debug("Using dd to generate data to %s: %s", disk, output) class Disk(object): @@ -1199,7 +1201,7 @@ def get_answer_file_path(self, filename): return os.path.join(self.mount, filename) def copy_to(self, src): - logging.debug("Copying %s to disk image mount", src) + LOG.debug("Copying %s to disk image mount", src) dst = os.path.join(self.mount, os.path.basename(src)) if os.path.isdir(src): shutil.copytree(src, dst) @@ -1210,7 +1212,7 @@ def close(self): os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) cleanup(self.mount) - logging.debug("Disk %s successfully set", self.path) + LOG.debug("Disk %s successfully set", self.path) class FloppyDisk(Disk): @@ -1234,7 +1236,7 @@ def __init__(self, path, qemu_img_binary, tmpdir, vfd_size): f_cmd = 'mkfs.msdos -s 1 %s' % path process.run(f_cmd, verbose=DEBUG) except process.CmdError as e: - logging.error("Error during floppy initialization: %s" % e) + LOG.error("Error during floppy initialization: %s" % e) cleanup(self.mount) raise @@ -1254,7 +1256,7 @@ def close(self): cleanup(self.mount) def copy_to(self, src): - logging.debug("Copying %s to floppy image", src) + LOG.debug("Copying %s to floppy image", src) mcopy_cmd = "mcopy -s -o -n -i %s %s ::/" % (self.path, src) process.run(mcopy_cmd, verbose=DEBUG) @@ -1325,8 +1327,7 @@ def setup_virtio_win2008(self, virtio_floppy): if os.path.isfile(virtio_floppy): self._copy_virtio_drivers(virtio_floppy) else: - logging.debug( - "No virtio floppy present, not needed for this OS anyway") + LOG.debug("No virtio floppy present, not needed for this OS anyway") class CdromDisk(Disk): @@ -1382,8 +1383,8 @@ def setup_virtio_win2008(self, virtio_floppy, cdrom_virtio): if os.path.isfile(cdrom_virtio) or os.path.isfile(virtio_floppy): self._copy_virtio_drivers(virtio_floppy, cdrom_virtio) else: - logging.debug( - "No virtio floppy/cdrom present, not needed for this OS anyway") + LOG.debug("No virtio floppy/cdrom present, not needed for this OS " + "anyway") @error_context.context_aware def close(self): @@ -1397,8 +1398,8 @@ def close(self): os.chmod(self.path, stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) cleanup(self.mount) - logging.debug("unattended install CD image %s successfully created", - self.path) + LOG.debug("unattended install CD image %s successfully created", + self.path) class CdromInstallDisk(Disk): @@ -1450,8 +1451,8 @@ def close(self): stat.S_IROTH | stat.S_IXOTH) cleanup(self.mount) cleanup(self.source_cdrom) - logging.debug("unattended install CD image %s successfully created", - self.path) + LOG.debug("unattended install CD image %s successfully created", + self.path) class GuestFSModiDisk(object): @@ -1496,7 +1497,7 @@ def __init__(self, disk, backend='direct'): raise exceptions.TestError('libvirtd: service not found') if (not libvirtd_status) and (not libvirtd.start()): raise exceptions.TestError('libvirtd: failed to start') - logging.debug("Launch the disk %s, wait..." % self.disk) + LOG.debug("Launch the disk %s, wait..." % self.disk) self.g.launch() def os_inspects(self): @@ -1526,16 +1527,16 @@ def compare(a, b): for mp_dev in mps: try: msg = "Mount dev '%s' partitions '%s' to '%s'" - logging.info(msg % (root, mp_dev[1], mp_dev[0])) + LOG.info(msg % (root, mp_dev[1], mp_dev[0])) self.g.mount(mp_dev[1], mp_dev[0]) except RuntimeError as err_msg: - logging.info("%s (ignored)" % err_msg) + LOG.info("%s (ignored)" % err_msg) else: raise exceptions.TestError( "inspect_vm: no operating systems found") def umount_all(self): - logging.debug("Umount all device partitions") + LOG.debug("Umount all device partitions") if self.mounts(): self.g.umount_all() diff --git a/virttest/utils_env.py b/virttest/utils_env.py index ca0d33a974..3894f958ec 100644 --- a/virttest/utils_env.py +++ b/virttest/utils_env.py @@ -20,6 +20,8 @@ ENV_VERSION = 1 +LOG = logging.getLogger('avocado.' + __name__) + def get_env_version(): return ENV_VERSION @@ -75,22 +77,21 @@ def __init__(self, filename=None, version=0): if env.get("version", 0) >= version: self.data = env else: - logging.warn( - "Incompatible env file found. Not using it.") + LOG.warn("Incompatible env file found. Not using it.") self.data = empty else: # No previous env file found, proceed... - logging.warn("Creating new, empty env file") + LOG.warn("Creating new, empty env file") self.data = empty # Almost any exception can be raised during unpickling, so let's # catch them all except Exception as e: - logging.warn("Exception thrown while loading env") - logging.warn(e) - logging.warn("Creating new, empty env file") + LOG.warn("Exception thrown while loading env") + LOG.warn(e) + LOG.warn("Creating new, empty env file") self.data = empty else: - logging.warn("Creating new, empty env file") + LOG.warn("Creating new, empty env file") self.data = empty def save(self, filename=None): diff --git a/virttest/utils_gdb.py b/virttest/utils_gdb.py index 025bd5cea3..09409fb3a8 100644 --- a/virttest/utils_gdb.py +++ b/virttest/utils_gdb.py @@ -12,6 +12,8 @@ from enum import Enum from virttest import utils_misc +LOG = logging.getLogger('avocado.' + __name__) + class GDBError(Exception): @@ -187,7 +189,7 @@ def _parse_notify_async_line(self, line): elif event in ['library-loaded', 'library-unloaded']: pass else: - logging.warning('Unprocessed gdb async notification:\n%s', line) + LOG.warning('Unprocessed gdb async notification:\n%s', line) def _parse_exec_async_line(self, line): """ @@ -212,8 +214,8 @@ def _parse_exec_async_line(self, line): self._callback('termination', info) else: for key in info: - logging.warning('Stopped without reason') - logging.warning('%s: %s', key, info[key]) + LOG.warning('Stopped without reason') + LOG.warning('%s: %s', key, info[key]) else: self._callback('termination', info) if event == '*running': @@ -288,7 +290,7 @@ def _callback(self, callback_type, info): "break" or "signal" """ callback_func, params = self.callbacks[callback_type] - logging.debug('gdb is Calling back %s' % callback_type) + LOG.debug('gdb is Calling back %s' % callback_type) thread = threading.Thread( target=callback_func, args=(self, info, params), @@ -438,8 +440,8 @@ def wait_for_start(self, timeout=60): :param timeout: Max time to wait """ - logging.debug("Waiting for gdb inferior %s to start" - % self.inferior_command) + LOG.debug("Waiting for gdb inferior %s to start" + % self.inferior_command) return utils_misc.wait_for( lambda: self.running, timeout, @@ -452,8 +454,8 @@ def wait_for_stop(self, timeout=60): :param timeout: Max time to wait """ - logging.debug("Waiting for gdb inferior %s to stop" - % self.inferior_command) + LOG.debug("Waiting for gdb inferior %s to stop" + % self.inferior_command) res = utils_misc.wait_for( lambda: not self.running, timeout, @@ -467,7 +469,7 @@ def wait_for_termination(self, timeout=60): :param timeout: Max time to wait """ - logging.debug("Waiting for gdb to terminate") + LOG.debug("Waiting for gdb to terminate") return utils_misc.wait_for( lambda: self.terminated, timeout, diff --git a/virttest/utils_hotplug.py b/virttest/utils_hotplug.py index c40a2110c6..e8b059c442 100644 --- a/virttest/utils_hotplug.py +++ b/virttest/utils_hotplug.py @@ -21,6 +21,9 @@ from virttest.libvirt_xml.devices import memory +LOG = logging.getLogger('avocado.' + __name__) + + def create_mem_xml(tg_size, pg_size=None, mem_addr=None, tg_sizeunit="KiB", pg_unit="KiB", tg_node=0, node_mask=0, mem_model="dimm", mem_discard=None, alias=None, lb_size=None, @@ -76,5 +79,5 @@ def create_mem_xml(tg_size, pg_size=None, mem_addr=None, tg_sizeunit="KiB", if uuid: mem_xml.uuid = uuid - logging.debug("Memory device xml: %s", mem_xml) + LOG.debug("Memory device xml: %s", mem_xml) return mem_xml.copy() diff --git a/virttest/utils_iptables.py b/virttest/utils_iptables.py index bbf1fd063c..869254e358 100644 --- a/virttest/utils_iptables.py +++ b/virttest/utils_iptables.py @@ -8,6 +8,8 @@ from avocado.utils import process from avocado.core import exceptions +LOG = logging.getLogger('avocado.' + __name__) + class Iptables(object): """ @@ -54,13 +56,13 @@ def setup_or_cleanup_iptables_rules(cls, rules, params=None, flag = False for exist_rule in exist_rules: if rule in exist_rule: - logging.debug("Rule: %s exist in iptables", rule) + LOG.debug("Rule: %s exist in iptables", rule) flag = True if cleanup: - logging.debug("cleaning rule: %s", rule) + LOG.debug("cleaning rule: %s", rule) commands.append("iptables -D %s" % rule) if not flag and not cleanup: - logging.debug("Adding rule: %s", rule) + LOG.debug("Adding rule: %s", rule) commands.append("iptables -I %s" % rule) # Once rules are filtered, then it is executed in remote or local # machine @@ -72,11 +74,11 @@ def setup_or_cleanup_iptables_rules(cls, rules, params=None, raise exceptions.TestError("iptables command failed " "remotely %s" % command) else: - logging.debug("iptable command success %s", command) + LOG.debug("iptable command success %s", command) else: try: cmd_output = process.run(command, shell=True).stdout_text - logging.debug("iptable command success %s", command) + LOG.debug("iptable command success %s", command) except process.CmdError as info: raise exceptions.TestError("iptables fails for command " "locally %s" % command) @@ -118,7 +120,7 @@ def command(self, cmd, **dargs): self.cmd += " --zone=%s" % zone self.status, self.output = self.func(self.cmd) if self.status != 0: - logging.error("Failed to execute %s: %s", self.cmd, self.output) + LOG.error("Failed to execute %s: %s", self.cmd, self.output) # Reload the configuration to make effect at once if dargs.get("firewalld_reload", True): self.reload() diff --git a/virttest/utils_kernel_module.py b/virttest/utils_kernel_module.py index 09acd8a0b6..29b4523ca5 100644 --- a/virttest/utils_kernel_module.py +++ b/virttest/utils_kernel_module.py @@ -21,6 +21,8 @@ from avocado.utils import process +LOG = logging.getLogger('avocado.' + __name__) + class KernelModuleError(Exception): @@ -96,7 +98,7 @@ def unload_module(self): """ if os.path.exists(self._module_path): unload_cmd = 'rmmod ' + self._module_name - logging.debug("Unloading module: %s", unload_cmd) + LOG.debug("Unloading module: %s", unload_cmd) status, output = process.getstatusoutput(unload_cmd) if status: raise KernelModuleUnloadError(self._module_name, output) @@ -132,15 +134,15 @@ def reload_module(self, force, params=""): do_not_load = False if (current_config and all(x in current_config.split() for x in params.split())): - logging.debug("Not reloading module. Current module config" - " uration for %s already contains all reques" - " ted parameters. Requested: '%s'. Current:" - " '%s'. Use force=True to force loading.", - self._module_name, params, current_config) + LOG.debug("Not reloading module. Current module config" + " uration for %s already contains all reques" + " ted parameters. Requested: '%s'. Current:" + " '%s'. Use force=True to force loading.", + self._module_name, params, current_config) do_not_load = True elif not self._was_loaded: - logging.debug("Module %s isn't loaded. Use force=True to force" - " loading.", self._module_name) + LOG.debug("Module %s isn't loaded. Use force=True to force" + " loading.", self._module_name) do_not_load = True if do_not_load: return @@ -151,7 +153,7 @@ def reload_module(self, force, params=""): holder.unload_module() self.unload_module() reload_cmd = 'modprobe %s %s' % (self._module_name, params) - logging.debug("Reloading module: %s", reload_cmd) + LOG.debug("Reloading module: %s", reload_cmd) status, output = process.getstatusoutput(reload_cmd.strip()) if status: raise KernelModuleReloadError(self._module_name, output) @@ -187,7 +189,7 @@ def restore(self): if self._was_loaded: restore_cmd = 'modprobe %s %s' % (self._module_name, self._config_backup) - logging.debug("Restoring module state: %s", restore_cmd) + LOG.debug("Restoring module state: %s", restore_cmd) status, output = process.getstatusoutput(restore_cmd) if status: raise KernelModuleRestoreError(self._module_name, @@ -206,9 +208,9 @@ def _backup_config(self): self._was_loaded = True else: self._was_loaded = False - logging.debug("Backed up %s module state (was_loaded, params)" - "=(%s, %s)", self._module_name, self._was_loaded, - self._config_backup) + LOG.debug("Backed up %s module state (was_loaded, params)" + "=(%s, %s)", self._module_name, self._was_loaded, + self._config_backup) @property def was_loaded(self): diff --git a/virttest/utils_libguestfs.py b/virttest/utils_libguestfs.py index ba991928a7..96227e2d64 100644 --- a/virttest/utils_libguestfs.py +++ b/virttest/utils_libguestfs.py @@ -13,6 +13,8 @@ from . import propcan +LOG = logging.getLogger('avocado.' + __name__) + class LibguestfsCmdError(Exception): @@ -52,7 +54,7 @@ def lgf_cmd_check(cmd): try: return path.find_command(cmd) except path.CmdNotFoundError: - logging.warning("You have not installed %s on this host.", cmd) + LOG.warning("You have not installed %s on this host.", cmd) return None @@ -66,7 +68,7 @@ def lgf_command(cmd, ignore_status=True, debug=False, timeout=60): and ignore_status=False """ if debug: - logging.debug("Running command %s in debug mode.", cmd) + LOG.debug("Running command %s in debug mode.", cmd) # Raise exception if ignore_status is False try: @@ -76,9 +78,9 @@ def lgf_command(cmd, ignore_status=True, debug=False, timeout=60): raise LibguestfsCmdError(detail) if debug: - logging.debug("status: %s", ret.exit_status) - logging.debug("stdout: %s", ret.stdout_text.strip()) - logging.debug("stderr: %s", ret.stderr_text.strip()) + LOG.debug("status: %s", ret.exit_status) + LOG.debug("stdout: %s", ret.stdout_text.strip()) + LOG.debug("stderr: %s", ret.stderr_text.strip()) # Return CmdResult instance when ignore_status is True ret.stdout = ret.stdout_text @@ -124,11 +126,11 @@ def set_debug(self, debug): desired_setting = bool(debug) if not current_setting and desired_setting: self.__dict_set__('debug', True) - logging.debug("Libguestfs debugging enabled") + LOG.debug("Libguestfs debugging enabled") # current and desired could both be True if current_setting and not desired_setting: self.__dict_set__('debug', False) - logging.debug("Libguestfs debugging disabled") + LOG.debug("Libguestfs debugging disabled") def set_timeout(self, timeout): """ @@ -141,7 +143,7 @@ def set_timeout(self, timeout): timeout = int(str(timeout)) self.__dict_set__('timeout', timeout) except ValueError: - logging.debug("Set timeout failed.") + LOG.debug("Set timeout failed.") def get_uri(self): """ @@ -345,8 +347,8 @@ def cmd_status_output(self, cmd, ignore_status=None, verbose=None, timeout=60): (line, cmd, ret.stdout_text.strip())) raise LibguestfsCmdError(e_msg) - logging.debug("command: %s", cmd) - logging.debug("stdout: %s", ret.stdout_text.strip()) + LOG.debug("command: %s", cmd) + LOG.debug("stdout: %s", ret.stdout_text.strip()) return 0, ret.stdout_text.strip() @@ -405,8 +407,7 @@ def __init__(self, disk_img=None, ro_mode=False, status, output = guestfs_session.cmd_status_output( 'is-config', timeout=60) if status != 0: - logging.debug( - "Persistent guestfish session is not responding.") + LOG.debug("Persistent guestfish session is not responding.") raise aexpect.ShellStatusError(self.lgf_exec, 'is-config') def close_session(self): @@ -694,11 +695,11 @@ def do_mount(self, mountpoint): vg_name = self.params.get("vg_name", "vol_test") lv_name = self.params.get("lv_name", "vol_file") device = "/dev/%s/%s" % (vg_name, lv_name) - logging.info("mount lvm partition...%s" % device) + LOG.info("mount lvm partition...%s" % device) elif partition_type == "physical": pv_name = self.params.get("pv_name", "/dev/sdb") device = pv_name + "1" - logging.info("mount physical partition...%s" % device) + LOG.info("mount physical partition...%s" % device) self.mount(device, mountpoint) def read_file(self, path): diff --git a/virttest/utils_libvirt/libvirt_ceph_utils.py b/virttest/utils_libvirt/libvirt_ceph_utils.py index 7321b32eb0..2e1760c001 100644 --- a/virttest/utils_libvirt/libvirt_ceph_utils.py +++ b/virttest/utils_libvirt/libvirt_ceph_utils.py @@ -22,6 +22,8 @@ from virttest.libvirt_xml import vm_xml +LOG = logging.getLogger('avocado.' + __name__) + def _create_secret(auth_sec_usage_type, ceph_auth_key): """ @@ -83,7 +85,7 @@ def create_or_cleanup_ceph_backend_vm_disk(vm, params, is_setup=True): :param is_setup: one parameter indicate whether setup or clean up """ vmxml = vm_xml.VMXML.new_from_dumpxml(vm.name) - logging.debug("original xml is: %s", vmxml) + LOG.debug("original xml is: %s", vmxml) # Device related configurations device_format = params.get("virt_disk_device_format", "raw") diff --git a/virttest/utils_libvirt/libvirt_config.py b/virttest/utils_libvirt/libvirt_config.py index c686cb876b..9f3937d27c 100644 --- a/virttest/utils_libvirt/libvirt_config.py +++ b/virttest/utils_libvirt/libvirt_config.py @@ -13,6 +13,8 @@ from virttest import remote from virttest.utils_test import libvirt +LOG = logging.getLogger('avocado.' + __name__) + def remove_key_in_conf(value_list, conf_type="libvirtd", remote_params=None, restart_libvirt=False): @@ -57,7 +59,7 @@ def remove_key_in_conf(value_list, conf_type="libvirtd", try: del target_conf[item] except utils_config.ConfigNoOptionError as err: - logging.error(err) + LOG.error(err) if restart_libvirt: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() @@ -94,15 +96,15 @@ def remove_key_for_modular_daemon(params, remote_dargs=None): no_search_cond = eval(params.get("no_search", '{}')) for k, v in search_cond.items(): if not re.search(v, k, re.IGNORECASE): - logging.debug("The key '%s' does not contain '%s', " - "no need to remove %s in %s conf file.", - k, v, remove_key, conf_type) + LOG.debug("The key '%s' does not contain '%s', " + "no need to remove %s in %s conf file.", + k, v, remove_key, conf_type) return for k, v in no_search_cond.items(): if re.search(v, k, re.IGNORECASE): - logging.debug("The key '%s' contains '%s', " - "no need to remove %s in %s conf file.", - k, v, remove_key, conf_type) + LOG.debug("The key '%s' contains '%s', " + "no need to remove %s in %s conf file.", + k, v, remove_key, conf_type) return conf_obj = remove_key_in_conf(remove_key, conf_type=conf_type, diff --git a/virttest/utils_libvirt/libvirt_cpu.py b/virttest/utils_libvirt/libvirt_cpu.py index 7304d6f0fd..637df7fb2c 100644 --- a/virttest/utils_libvirt/libvirt_cpu.py +++ b/virttest/utils_libvirt/libvirt_cpu.py @@ -9,6 +9,8 @@ from virttest.libvirt_xml import vm_xml from virttest.utils_libvirt import libvirt_vmxml +LOG = logging.getLogger('avocado.' + __name__) + def add_cpu_settings(vmxml, params): """ @@ -82,7 +84,7 @@ def add_cpu_settings(vmxml, params): # otherwise, the vm may fail to define vm_attrs = {k.replace('setvm_', ''): params[k] for k in params if k.startswith('setvm_')} - logging.debug(vm_attrs) + LOG.debug(vm_attrs) libvirt_vmxml.set_vm_attrs(vmxml, vm_attrs) vmxml.xmltreefile.write() vmxml.sync() diff --git a/virttest/utils_libvirt/libvirt_disk.py b/virttest/utils_libvirt/libvirt_disk.py index 90314f6759..d05e847aae 100644 --- a/virttest/utils_libvirt/libvirt_disk.py +++ b/virttest/utils_libvirt/libvirt_disk.py @@ -21,6 +21,8 @@ from virttest.libvirt_xml.devices.disk import Disk +LOG = logging.getLogger('avocado.' + __name__) + def create_disk(disk_type, path=None, size="500M", disk_format="raw", extra='', session=None): @@ -82,13 +84,13 @@ def create_primitive_disk_xml(type_name, disk_device, device_target, device_bus, driver_dict = {"name": "qemu", "type": device_format} disk_xml.driver = driver_dict if disk_src_dict: - logging.debug("disk src dict is: %s" % disk_src_dict) + LOG.debug("disk src dict is: %s" % disk_src_dict) disk_source = disk_xml.new_disk_source(**disk_src_dict) disk_xml.source = disk_source if disk_auth_dict: - logging.debug("disk auth dict is: %s" % disk_auth_dict) + LOG.debug("disk auth dict is: %s" % disk_auth_dict) disk_xml.auth = disk_xml.new_auth(**disk_auth_dict) - logging.debug("new disk xml in create_primitive_disk is: %s", disk_xml) + LOG.debug("new disk xml in create_primitive_disk is: %s", disk_xml) return disk_xml @@ -127,7 +129,7 @@ def create_custom_metadata_disk(disk_path, disk_format, # Attach metadatacache into drivermetadata object new_one_drivermetadata.metadata_cache = custom_disk.DriverMetadata().new_metadatacache(**metadata_cache_dict) custom_disk.drivermetadata = new_one_drivermetadata - logging.debug("disk xml in create_custom_metadata_disk: %s\n", custom_disk) + LOG.debug("disk xml in create_custom_metadata_disk: %s\n", custom_disk) return custom_disk @@ -145,8 +147,8 @@ def get_images_with_xattr(vm): getfattr_result = get_image_xattr(disk_path) if "selinux" in getfattr_result: dirty_images.append(disk_path) - logging.debug("Image '%s' having xattr left: %s", - disk_path, getfattr_result.stdout) + LOG.debug("Image '%s' having xattr left: %s", + disk_path, getfattr_result.stdout) return dirty_images @@ -260,7 +262,7 @@ def create_reuse_external_snapshots(vm, pre_set_root_dir=None, skip_first_one=Fa virsh.snapshot_create_as(vm.name, options, ignore_status=False, debug=True) - logging.debug('reuse external snapshots:%s' % snapshot_external_disks) + LOG.debug('reuse external snapshots:%s' % snapshot_external_disks) return root_dir, snapshot_external_disks @@ -400,7 +402,7 @@ def get_chain_backing_files(disk_src_file): if libvirt_storage.check_qemu_image_lock_support(): cmd = "qemu-img info -U %s --backing-chain" % disk_src_file ret = process.run(cmd, shell=True).stdout_text.strip() - logging.debug("The actual qemu-img output:%s\n", ret) + LOG.debug("The actual qemu-img output:%s\n", ret) match = re.findall(r'(backing file: )(.+\n)', ret) qemu_img_info_backing_chain = [] for i in range(len(match)): @@ -425,7 +427,7 @@ def get_mirror_part_in_xml(vm, disk_target): else: disk_xml = disk.xmltreefile break - logging.debug("disk xml in mirror: %s\n", disk_xml) + LOG.debug("disk xml in mirror: %s\n", disk_xml) disk_mirror = disk_xml.find('mirror') job_details = [] if disk_mirror is not None: @@ -446,7 +448,7 @@ def create_mbxml(mb_params): for attr_key in mb_params: setattr(mb_xml, attr_key, mb_params[attr_key]) - logging.debug(mb_xml) + LOG.debug(mb_xml) return mb_xml.copy() @@ -470,12 +472,12 @@ def check_in_vm(vm, target, old_parts, is_equal=False): added_parts = utils_disk.get_added_parts(session, old_parts) if is_equal: if len(added_parts) != 0: - logging.error("new added parts are not equal the old one") + LOG.error("new added parts are not equal the old one") return False else: return True if len(added_parts) != 1: - logging.error("The number of new partitions is invalid in VM") + LOG.error("The number of new partitions is invalid in VM") return False added_part = None @@ -487,7 +489,7 @@ def check_in_vm(vm, target, old_parts, is_equal=False): added_part = added_parts[0] if not added_part: - logging.error("Can't see added partition in VM") + LOG.error("Can't see added partition in VM") return False device_source = os.path.join(os.sep, 'dev', added_part) @@ -500,14 +502,14 @@ def check_in_vm(vm, target, old_parts, is_equal=False): cmd = ("mount /dev/%s1 /mnt && echo '123' > /mnt/testfile" " && cat /mnt/testfile && umount /mnt" % added_part) s, o = session.cmd_status_output(cmd) - logging.info("Check disk operation in VM:\n%s", o) + LOG.info("Check disk operation in VM:\n%s", o) session.close() if s != 0: - logging.error("error happened when execute command:\n%s", cmd) + LOG.error("error happened when execute command:\n%s", cmd) return False return True except Exception as e: - logging.error(str(e)) + LOG.error(str(e)) return False @@ -550,7 +552,7 @@ def fill_null_in_vm(vm, target, size_value=500): try: session = vm.wait_for_login() if not utils_package.package_install(["parted"], session, timeout=300): - logging.error("Failed to install the required 'parted' package") + LOG.error("Failed to install the required 'parted' package") device_source = os.path.join(os.sep, 'dev', target) libvirt.mk_label(device_source, session=session) libvirt.mk_part(device_source, size="%sM" % size_value, session=session) @@ -561,7 +563,7 @@ def fill_null_in_vm(vm, target, size_value=500): cmd = ("mount /dev/%s1 /mnt && dd if=/dev/zero of=/mnt/testfile bs=1024 count=1024x%s " " && umount /mnt" % (target, count_number)) s, o = session.cmd_status_output(cmd) - logging.info("Check disk operation in VM:\n%s", o) + LOG.info("Check disk operation in VM:\n%s", o) session.close() if s != 0: raise exceptions.TestError("Error happened when executing command:\n%s" % cmd) diff --git a/virttest/utils_libvirt/libvirt_embedded_qemu.py b/virttest/utils_libvirt/libvirt_embedded_qemu.py index 48eb73bee2..81df4725f0 100644 --- a/virttest/utils_libvirt/libvirt_embedded_qemu.py +++ b/virttest/utils_libvirt/libvirt_embedded_qemu.py @@ -18,6 +18,8 @@ except path.CmdNotFoundError: EMBEDDEDQEMU = None +LOG = logging.getLogger('avocado.' + __name__) + class EmbeddedQemuSession(object): @@ -76,7 +78,7 @@ def wait_for_working(self, timeout=60): :param timeout: Max wait time """ - logging.debug('Waiting for %s to work', self.service_exec) + LOG.debug('Waiting for %s to work', self.service_exec) return utils_misc.wait_for( self.is_working, timeout=timeout, diff --git a/virttest/utils_libvirt/libvirt_keywrap.py b/virttest/utils_libvirt/libvirt_keywrap.py index 07414568f7..a313dea112 100644 --- a/virttest/utils_libvirt/libvirt_keywrap.py +++ b/virttest/utils_libvirt/libvirt_keywrap.py @@ -6,6 +6,8 @@ from avocado.utils import process +LOG = logging.getLogger('avocado.' + __name__) + class ProtectedKeyHelper(object): """ @@ -26,7 +28,7 @@ def load_module(self): error, output = cmd_status_output(cmd="modprobe %s" % self.module_name, session=self.session) if error: - logging.debug("Error loading module 'pkey': %s", output) + LOG.debug("Error loading module 'pkey': %s", output) return False return True @@ -41,7 +43,7 @@ def get_some_aes_key_token(self): error, output = cmd_status_output(cmd="hexdump %s" % attr_path, session=self.session) if error or "No such device" in output: - logging.debug("Error reading from %s: %s", attr_path, output) + LOG.debug("Error reading from %s: %s", attr_path, output) return None return output diff --git a/virttest/utils_libvirt/libvirt_misc.py b/virttest/utils_libvirt/libvirt_misc.py index baf5ba121f..e2eec41258 100644 --- a/virttest/utils_libvirt/libvirt_misc.py +++ b/virttest/utils_libvirt/libvirt_misc.py @@ -7,6 +7,8 @@ import re import logging +LOG = logging.getLogger('avocado.' + __name__) + def convert_to_dict(content, pattern=r'(\d+) +(\S+)'): """ @@ -21,5 +23,5 @@ def convert_to_dict(content, pattern=r'(\d+) +(\S+)'): info_list = re.findall(pattern, content, re.M) for info in info_list: info_dict[info[0]] = info[1] - logging.debug("The dict converted is:\n%s", info_dict) + LOG.debug("The dict converted is:\n%s", info_dict) return info_dict diff --git a/virttest/utils_libvirt/libvirt_nested.py b/virttest/utils_libvirt/libvirt_nested.py index 3f6b9badd2..150a51472e 100644 --- a/virttest/utils_libvirt/libvirt_nested.py +++ b/virttest/utils_libvirt/libvirt_nested.py @@ -7,6 +7,8 @@ from virttest import utils_libvirtd from virttest import utils_package +LOG = logging.getLogger('avocado.' + __name__) + def install_virt_pkgs(vm_session): """ @@ -16,7 +18,7 @@ def install_virt_pkgs(vm_session): :raises: exceptions.TestError if installation fails """ pkg_names = ['libvirt', 'qemu-kvm'] - logging.info("Virt packages will be installed") + LOG.info("Virt packages will be installed") pkg_mgr = utils_package.package_manager(vm_session, pkg_names) if not pkg_mgr.install(): raise exceptions.TestError("Package '%s' installation " @@ -59,14 +61,14 @@ def update_vm_cpu(guest_xml, cpu_mode=None): # Update cpu mode if needed cur_vmcpuxml = guest_xml.cpu if cpu_mode: - logging.info("Update cpu mode from '{}' to '{}'".format(cur_vmcpuxml.mode, cpu_mode)) + LOG.info("Update cpu mode from '{}' to '{}'".format(cur_vmcpuxml.mode, cpu_mode)) cur_vmcpuxml.mode = cpu_mode if cur_vmcpuxml.mode != cpu_mode else cur_vmcpuxml.mode # If the cpu mode is host-passthrough, then there might no cpu feature in the vm xml try: vmx_index = cur_vmcpuxml.get_feature_index('vmx') except Exception as detail: - logging.warning(detail) + LOG.warning(detail) else: cur_vmcpuxml.set_feature(vmx_index, name='vmx', policy='require') diff --git a/virttest/utils_libvirt/libvirt_network.py b/virttest/utils_libvirt/libvirt_network.py index 6fdc9e242c..6fab949f3e 100644 --- a/virttest/utils_libvirt/libvirt_network.py +++ b/virttest/utils_libvirt/libvirt_network.py @@ -15,6 +15,8 @@ from virttest.libvirt_xml import NetworkXML from virttest.utils_test import libvirt +LOG = logging.getLogger('avocado.' + __name__) + def create_or_del_network(net_dict, is_del=False, remote_args=None): """ @@ -101,7 +103,7 @@ def check_established(params): pat_str = r'.*%s:(\d*).*ESTABLISHED.*qemu-kvm.*' % server_ip search = re.search(pat_str, cmdRes.stdout_text.strip()) if search: - logging.debug("Get the port used:%s", search.group(1)) + LOG.debug("Get the port used:%s", search.group(1)) return search.group(1) else: raise exceptions.TestFail("Pattern '%s' is not matched in " @@ -153,7 +155,7 @@ def modify_network_xml(net_dict, testnet_xml): if forward: testnet_xml.del_forward() testnet_xml.forward = eval(forward) - logging.debug("current mode is %s" % testnet_xml.forward) + LOG.debug("current mode is %s" % testnet_xml.forward) if interface_dev: testnet_xml.forward_interface = [{'dev': interface_dev}] if virtualport: @@ -192,19 +194,19 @@ def check_tap_connected(tap_name, estate, br_name): """ cmd = "bridge link | grep master | grep %s" % br_name outputs = process.run(cmd, shell=True, ignore_status=True).stdout_text - logging.debug("The interface attached to the bridge is:\n%s", outputs) + LOG.debug("The interface attached to the bridge is:\n%s", outputs) if tap_name in outputs: if estate: - logging.debug("The tap is attached to bridge as expected!") + LOG.debug("The tap is attached to bridge as expected!") else: - logging.error("The tap isn't detached from bridge!") + LOG.error("The tap isn't detached from bridge!") return False else: if estate: - logging.error("The tap is not attached to bridge!") + LOG.error("The tap is not attached to bridge!") return False else: - logging.debug("The tap isn't attached to bridge as expected!") + LOG.debug("The tap isn't attached to bridge as expected!") return True @@ -218,7 +220,7 @@ def check_network_connection(net_name, expected_conn=0): """ netxml = NetworkXML(network_name=net_name).new_from_net_dumpxml(net_name) net_conn = int(netxml.xmltreefile.getroot().get('connections', '0')) - logging.debug("Network connection is %d.", net_conn) + LOG.debug("Network connection is %d.", net_conn) if expected_conn != net_conn: raise exceptions.TestFail("Unable to get the expected connection " "number. Expected: %d, Actual: %d." diff --git a/virttest/utils_libvirt/libvirt_numa.py b/virttest/utils_libvirt/libvirt_numa.py index 42ac652e73..b9240e7463 100644 --- a/virttest/utils_libvirt/libvirt_numa.py +++ b/virttest/utils_libvirt/libvirt_numa.py @@ -9,6 +9,8 @@ from virttest.libvirt_xml import vm_xml +LOG = logging.getLogger('avocado.' + __name__) + def create_cell_distances_xml(vmxml, params): """ @@ -23,14 +25,14 @@ def create_cell_distances_xml(vmxml, params): cells = [] for numacell_xml in cpu_xml.numa_cell: - logging.debug("numacell_xml:%s" % numacell_xml) + LOG.debug("numacell_xml:%s" % numacell_xml) cell_distances_xml = numacell_xml.CellDistancesXML() cell_distances_xml.update({'sibling': eval(params.get('sibling%s' % i))}) numacell_xml.distances = cell_distances_xml i = i + 1 cells.append(numacell_xml) cpu_xml.numa_cell = cells - logging.debug("cpu_xml with cell distances added: %s" % cpu_xml) + LOG.debug("cpu_xml with cell distances added: %s" % cpu_xml) vmxml.cpu = cpu_xml vmxml.sync() @@ -50,17 +52,17 @@ def create_hmat_xml(vmxml, params): cells = [] for numacell_xml in cpu_xml.numa_cell: - logging.debug("numacell_xml:%s" % numacell_xml) + LOG.debug("numacell_xml:%s" % numacell_xml) caches = [] cell_caches = params.get("cell_caches%s" % i, "").split() cell_cache_list = [ast.literal_eval(x) for x in cell_caches] for cell_cache in cell_cache_list: cellcache_xml = vm_xml.CellCacheXML() cellcache_xml.update(cell_cache) - logging.debug("cellcach_xml:%s" % cellcache_xml) + LOG.debug("cellcach_xml:%s" % cellcache_xml) caches.append(cellcache_xml) numacell_xml.caches = caches - logging.debug("numacell_xml:%s" % numacell_xml) + LOG.debug("numacell_xml:%s" % numacell_xml) i = i + 1 cells.append(numacell_xml) cpu_xml.numa_cell = cells @@ -72,7 +74,7 @@ def create_hmat_xml(vmxml, params): interconnects_xml.bandwidth = bandwidth_list cpu_xml.interconnects = interconnects_xml - logging.debug("cpu_xml with HMAT configuration added: %s" % cpu_xml) + LOG.debug("cpu_xml with HMAT configuration added: %s" % cpu_xml) vmxml.cpu = cpu_xml vmxml.sync() diff --git a/virttest/utils_libvirt/libvirt_nwfilter.py b/virttest/utils_libvirt/libvirt_nwfilter.py index 4b14fb25fb..7f5d887f73 100644 --- a/virttest/utils_libvirt/libvirt_nwfilter.py +++ b/virttest/utils_libvirt/libvirt_nwfilter.py @@ -9,6 +9,8 @@ from virttest import virsh +LOG = logging.getLogger('avocado.' + __name__) + def clean_up_nwfilter_binding(ignore_status=False): """ @@ -26,7 +28,7 @@ def clean_up_nwfilter_binding(ignore_status=False): # Split on whitespace, assume 1 column linesplit = line.split(None, 1) result.append(linesplit[0]) - logging.info("existed nwfilter binding list is: %s", result) + LOG.info("existed nwfilter binding list is: %s", result) for binding_uuid in result: virsh.nwfilter_binding_delete(binding_uuid, ignore_status=ignore_status) diff --git a/virttest/utils_libvirt/libvirt_pcicontr.py b/virttest/utils_libvirt/libvirt_pcicontr.py index 34ffc0f3b4..6637803e70 100644 --- a/virttest/utils_libvirt/libvirt_pcicontr.py +++ b/virttest/utils_libvirt/libvirt_pcicontr.py @@ -9,6 +9,8 @@ from virttest.utils_test import libvirt from virttest.libvirt_xml import vm_xml +LOG = logging.getLogger('avocado.' + __name__) + def get_max_contr_indexes(vm_xml, cntlr_type, cntlr_model, cntl_num=1): """ @@ -26,10 +28,10 @@ def get_max_contr_indexes(vm_xml, cntlr_type, cntlr_model, cntl_num=1): usable_indexes.append(int(elem.index)) usable_indexes = sorted(usable_indexes, reverse=True) - logging.debug("The indexes returned for controller type '{}' and " - "controller model '{}' is '{}'".format(cntlr_type, - cntlr_model, - usable_indexes[:cntl_num])) + LOG.debug("The indexes returned for controller type '{}' and " + "controller model '{}' is '{}'".format(cntlr_type, + cntlr_model, + usable_indexes[:cntl_num])) return usable_indexes[:cntl_num] @@ -48,7 +50,7 @@ def get_free_pci_slot(vm_xml, max_slot=31): address = dev.find('address') if (address is not None and address.get('bus') == '0x00'): used_slot.append(address.get('slot')) - logging.debug("Collect used slot:%s", used_slot) + LOG.debug("Collect used slot:%s", used_slot) for slot_index in range(1, max_slot + 1): slot = "%0#4x" % slot_index if slot not in used_slot: @@ -74,13 +76,13 @@ def reset_pci_num(vm_name, num=15): vmxml, 'pci', 'pcie-to-pci-bridge') cur_pci_num = ret_indexes[0] if not pcie_to_pci_brg_indexes else \ max(ret_indexes[0], pcie_to_pci_brg_indexes[0]) - logging.debug("The current maximum PCI controller index is %d", cur_pci_num) + LOG.debug("The current maximum PCI controller index is %d", cur_pci_num) if cur_pci_num < num: for i in list(range(cur_pci_num + 1, num)): pcie_root_port.update({'controller_index': "%d" % i}) vmxml.add_device(libvirt.create_controller_xml(pcie_root_port)) else: - logging.info("Current pci number is greater than expected") + LOG.info("Current pci number is greater than expected") # synchronize XML vmxml.sync() diff --git a/virttest/utils_libvirt/libvirt_vfio.py b/virttest/utils_libvirt/libvirt_vfio.py index e23b8169be..b3cde6b2f6 100644 --- a/virttest/utils_libvirt/libvirt_vfio.py +++ b/virttest/utils_libvirt/libvirt_vfio.py @@ -8,6 +8,8 @@ from avocado.core import exceptions from avocado.utils import process +LOG = logging.getLogger('avocado.' + __name__) + def check_vfio_pci(pci_id, status_error=False, ignore_error=False): """ @@ -27,7 +29,7 @@ def check_vfio_pci(pci_id, status_error=False, ignore_error=False): err_msg = ("Get incorrect driver {}, it should{} be vfio-pci." .format(output, ' not' if status_error else '')) if ignore_error: - logging.error(err_msg) + LOG.error(err_msg) return False else: raise exceptions.TestFail(err_msg) diff --git a/virttest/utils_libvirt/libvirt_vmxml.py b/virttest/utils_libvirt/libvirt_vmxml.py index bf451b6fe4..36c1610514 100644 --- a/virttest/utils_libvirt/libvirt_vmxml.py +++ b/virttest/utils_libvirt/libvirt_vmxml.py @@ -12,6 +12,8 @@ from virttest import virsh from virttest.libvirt_xml import vm_xml +LOG = logging.getLogger('avocado.' + __name__) + def set_vm_attrs(vmxml, vm_attrs): """ @@ -22,7 +24,7 @@ def set_vm_attrs(vmxml, vm_attrs): :return the updated vmxml """ for attr, value in list(vm_attrs.items()): - logging.debug('Set %s = %s', attr, value) + LOG.debug('Set %s = %s', attr, value) setattr(vmxml, attr, int(value) if value.isdigit() else value) vmxml.xmltreefile.write() vmxml.sync() @@ -44,7 +46,7 @@ def check_guest_xml(vm_name, pat_in_dumpxml, option='', status_error=False): prefix_found = '' if found else 'not ' msg = "The pattern '%s' is %sfound in the vm dumpxml" % (pat_in_dumpxml, prefix_found) if found ^ status_error: - logging.debug(msg) + LOG.debug(msg) else: raise exceptions.TestFail(msg) diff --git a/virttest/utils_libvirtd.py b/virttest/utils_libvirtd.py index fbc3bc772c..173d263739 100644 --- a/virttest/utils_libvirtd.py +++ b/virttest/utils_libvirtd.py @@ -23,6 +23,8 @@ except path.CmdNotFoundError: LIBVIRTD = None +LOG = logging.getLogger('avocado.' + __name__) + class Libvirtd(object): @@ -51,8 +53,8 @@ def __init__(self, service_name=None, session=None): self.service_list = [] if LIBVIRTD is None: - logging.warning("Libvirtd service is not available in host, " - "utils_libvirtd module will not function normally") + LOG.warning("Libvirtd service is not available in host, " + "utils_libvirtd module will not function normally") self.service_name = "libvirtd" if not service_name else service_name @@ -217,7 +219,7 @@ def __init__(self, gdb=False, self.libvirtd_service = Libvirtd(service_name=self.service_exec) self.was_running = self.libvirtd_service.is_running() if self.was_running: - logging.debug('Stopping %s service', self.service_exec) + LOG.debug('Stopping %s service', self.service_exec) self.libvirtd_service.stop() self.logging_handler = logging_handler @@ -285,7 +287,7 @@ def set_callback(self, callback_type, callback_func, callback_params=None): self.gdb.set_callback( callback_type, callback_func, callback_params) else: - logging.error("Only gdb session supports setting callback") + LOG.error("Only gdb session supports setting callback") def start(self, arg_str='', wait_for_working=True): """ @@ -315,7 +317,7 @@ def cont(self): if self.gdb: self.gdb.cont() else: - logging.error("Only gdb session supports continue") + LOG.error("Only gdb session supports continue") def kill(self): """ @@ -333,7 +335,7 @@ def restart(self, arg_str='', wait_for_working=True): :param arg_str: Argument passing to the session :param wait_for_working: Whether wait for libvirtd finish loading """ - logging.debug("Restarting %s session", self.service_exec) + LOG.debug("Restarting %s session", self.service_exec) self.kill() self.start(arg_str=arg_str, wait_for_working=wait_for_working) @@ -343,7 +345,7 @@ def wait_for_working(self, timeout=60): :param timeout: Max wait time """ - logging.debug('Waiting for %s to work', self.service_exec) + LOG.debug('Waiting for %s to work', self.service_exec) return utils_misc.wait_for( self.is_working, timeout=timeout, @@ -356,7 +358,7 @@ def back_trace(self): if self.gdb: return self.gdb.back_trace() else: - logging.warning('Can not get back trace without gdb') + LOG.warning('Can not get back trace without gdb') def insert_break(self, break_func): """ @@ -367,7 +369,7 @@ def insert_break(self, break_func): if self.gdb: return self.gdb.insert_break(break_func) else: - logging.warning('Can not insert breakpoint without gdb') + LOG.warning('Can not insert breakpoint without gdb') def is_working(self): """ @@ -387,7 +389,7 @@ def wait_for_stop(self, timeout=60, step=0.1): :param timeout: Max wait time :param step: Checking interval """ - logging.debug('Waiting for %s to stop', self.service_exec) + LOG.debug('Waiting for %s to stop', self.service_exec) if self.gdb: return self.gdb.wait_for_stop(timeout=timeout) else: @@ -403,11 +405,11 @@ def wait_for_termination(self, timeout=60): :param timeout: Max wait time """ - logging.debug('Waiting for %s to terminate', self.service_exec) + LOG.debug('Waiting for %s to terminate', self.service_exec) if self.gdb: return self.gdb.wait_for_termination(timeout=timeout) else: - logging.error("Only gdb session supports wait_for_termination.") + LOG.error("Only gdb session supports wait_for_termination.") def exit(self): """ @@ -428,9 +430,9 @@ def deprecation_warning(): As the utils_libvirtd.libvirtd_xxx interfaces are deprecated, this function are printing the warning to user. """ - logging.warning("This function was deprecated, Please use " - "class utils_libvirtd.Libvirtd to manage " - "libvirtd service.") + LOG.warning("This function was deprecated, Please use " + "class utils_libvirtd.Libvirtd to manage " + "libvirtd service.") def libvirtd_start(): diff --git a/virttest/utils_misc.py b/virttest/utils_misc.py index aefd4d15b0..322387df18 100644 --- a/virttest/utils_misc.py +++ b/virttest/utils_misc.py @@ -87,6 +87,7 @@ import six from six.moves import xrange +LOG = logging.getLogger('avocado.' + __name__) ARCH = platform.machine() @@ -245,7 +246,7 @@ def get_usable_memory_size(align=None): return usable_mem -def log_last_traceback(msg=None, log=logging.error): +def log_last_traceback(msg=None, log=LOG.error): """ Writes last traceback into specified log. @@ -256,7 +257,7 @@ def log_last_traceback(msg=None, log=logging.error): :param log: Where to log the traceback [logging.error] """ if not log: - log = logging.error + log = LOG.error if msg: log(msg) exc_type, exc_value, exc_traceback = sys.exc_info() @@ -297,15 +298,15 @@ def find_substring(string, pattern1, pattern2=None): :return: Match substring or None """ if not pattern1: - logging.debug("pattern1: get empty string.") + LOG.debug("pattern1: get empty string.") return None pattern = pattern1 if pattern2: pattern += "|%s" % pattern2 ret = re.findall(pattern, string) if not ret: - logging.debug("Could not find matched string with pattern: %s", - pattern) + LOG.debug("Could not find matched string with pattern: %s", + pattern) return None return ret[0] @@ -351,8 +352,8 @@ def find_command(cmd): :param cmd: Command to be found. :raise: ValueError in case the command was not found. """ - logging.warning("Function utils_misc.find_command is deprecated. " - "Please use avocado.utils.path.find_command instead.") + LOG.warning("Function utils_misc.find_command is deprecated. " + "Please use avocado.utils.path.find_command instead.") return utils_path.find_command(cmd) @@ -371,9 +372,9 @@ def kill_process_tree(pid, sig=signal.SIGKILL, send_sigcont=True, timeout=0): try: return _kill_process_tree(pid, sig, send_sigcont, timeout) except TypeError: - logging.warning("Trying to kill_process_tree with timeout but running" - " old Avocado without it's support. Sleeping for 10s " - "instead.") + LOG.warning("Trying to kill_process_tree with timeout but running" + " old Avocado without it's support. Sleeping for 10s " + "instead.") # Depending on the Avocado version this can either return None or # list of killed pids. ret = _kill_process_tree(pid, sig, send_sigcont) # pylint: disable=E1128 @@ -601,7 +602,7 @@ def wait_for(func, timeout, first=0.0, step=1.0, text=None, ignore_errors=False) while time.time() < end_time: if text: - logging.debug("%s (%f secs)", text, (time.time() - start_time)) + LOG.debug("%s (%f secs)", text, (time.time() - start_time)) try: output = func() @@ -609,7 +610,7 @@ def wait_for(func, timeout, first=0.0, step=1.0, text=None, ignore_errors=False) if not ignore_errors: raise else: - logging.debug("Ignoring error '%s'", sys.exc_info()) + LOG.debug("Ignoring error '%s'", sys.exc_info()) output = None if output: return output @@ -639,12 +640,12 @@ def display_attributes(instance): Inspects a given class instance attributes and displays them, convenient for debugging. """ - logging.debug("Attributes set:") + LOG.debug("Attributes set:") for member in inspect.getmembers(instance): name, value = member attribute = getattr(instance, name) if not (name.startswith("__") or callable(attribute) or not value): - logging.debug(" %s: %s", name, value) + LOG.debug(" %s: %s", name, value) def get_full_pci_id(pci_id): @@ -789,8 +790,7 @@ def archive_as_tarball(source_dir, dest_dir, tarball_name=None, tarball_path = tarball_name if verbose: - logging.debug('Archiving %s as %s' % (source_dir, - tarball_path)) + LOG.debug('Archiving %s as %s' % (source_dir, tarball_path)) os.chdir(os.path.dirname(source_dir)) tarball = tarfile.open(name=tarball_path, mode=mode_str) @@ -998,7 +998,7 @@ def install_host_kernel(job, params): install_type = params.get('host_kernel_install_type') if install_type == 'rpm': - logging.info('Installing host kernel through rpm') + LOG.info('Installing host kernel through rpm') rpm_url = params.get('host_kernel_rpm_url') k_basename = os.path.basename(rpm_url) @@ -1011,7 +1011,7 @@ def install_host_kernel(job, params): host_kernel.boot() elif install_type in ['koji', 'brew']: - logging.info('Installing host kernel through koji/brew') + LOG.info('Installing host kernel through koji/brew') koji_cmd = params.get('host_kernel_koji_cmd') koji_build = params.get('host_kernel_koji_build') @@ -1024,10 +1024,10 @@ def install_host_kernel(job, params): package='kernel', subpackages=['kernel']) c = utils_koji.KojiClient(koji_cmd) - logging.info('Fetching kernel dependencies (-devel, -firmware)') + LOG.info('Fetching kernel dependencies (-devel, -firmware)') c.get_pkgs(k_deps, job.tmpdir) - logging.info('Installing kernel dependencies (-devel, -firmware) ' - 'through %s', install_type) + LOG.info('Installing kernel dependencies (-devel, -firmware) ' + 'through %s', install_type) k_deps_rpm_file_names = [os.path.join(job.tmpdir, rpm_file_name) for rpm_file_name in c.get_pkg_rpm_file_names(k_deps)] process.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names)) @@ -1043,7 +1043,7 @@ def install_host_kernel(job, params): host_kernel.boot() elif install_type == 'git': - logging.info('Chose to install host kernel through git, proceeding') + LOG.info('Chose to install host kernel through git, proceeding') repo = params.get('host_kernel_git_repo') repo_base = params.get('host_kernel_git_repo_base', None) @@ -1071,8 +1071,8 @@ def install_host_kernel(job, params): host_kernel.boot() else: - logging.info('Chose %s, using the current kernel for the host', - install_type) + LOG.info('Chose %s, using the current kernel for the host', + install_type) k_version = process.run('uname -r', ignore_status=True).stdout_text write_keyval(job.resultdir, {'software_version_kernel': k_version}) @@ -1266,7 +1266,7 @@ def create_x509_dir(path, cacert_subj, server_subj, passphrase, for cmd in cmd_set: process.run(cmd) - logging.info(cmd) + LOG.info(cmd) def convert_ipv4_to_ipv6(ipv4): @@ -1323,23 +1323,23 @@ def add_identities_into_ssh_agent(): Adds RSA or DSA identities to the authentication agent """ ssh_env = subprocess.check_output(["ssh-agent"]).decode("utf-8") - logging.info("The current SSH ENV: %s", ssh_env) + LOG.info("The current SSH ENV: %s", ssh_env) re_auth_sock = re.compile('SSH_AUTH_SOCK=(?P[^;]*);') ssh_auth_sock = re.search(re_auth_sock, ssh_env).group("SSH_AUTH_SOCK") - logging.debug("The SSH_AUTH_SOCK: %s", ssh_auth_sock) + LOG.debug("The SSH_AUTH_SOCK: %s", ssh_auth_sock) re_agent_pid = re.compile('SSH_AGENT_PID=(?P[^;]*);') ssh_agent_pid = re.search(re_agent_pid, ssh_env).group("SSH_AGENT_PID") - logging.debug("SSH_AGENT_PID: %s", ssh_agent_pid) + LOG.debug("SSH_AGENT_PID: %s", ssh_agent_pid) - logging.debug("Update SSH envrionment variables") + LOG.debug("Update SSH envrionment variables") os.environ['SSH_AUTH_SOCK'] = ssh_auth_sock os.system("set SSH_AUTH_SOCK " + ssh_auth_sock) os.environ['SSH_AGENT_PID'] = ssh_agent_pid process.run("set SSH_AGENT_PID " + ssh_agent_pid, shell=True) - logging.info("Adds RSA or DSA identities to the authentication agent") + LOG.info("Adds RSA or DSA identities to the authentication agent") process.run("ssh-add") @@ -1389,8 +1389,8 @@ def cpu_str_to_list(origin_str): cpu_id = int(cpu) cpu_list.append(cpu_id) except ValueError: - logging.error("Illegimate string in cpu " - "information: %s" % cpu) + LOG.error("Illegimate string in cpu " + "information: %s" % cpu) cpu_list = [] break cpu_list.sort() @@ -1501,7 +1501,7 @@ def __init__(self, all_nodes_path=None, online_nodes_path=None, session=None): # ensure numactl package is available if not utils_package.package_install('numactl', session=self.session): - logging.error("Numactl package is not installed") + LOG.error("Numactl package is not installed") for node_id in self.online_nodes: self.nodes[node_id] = NumaNode(node_id + 1, session=self.session) @@ -1550,19 +1550,19 @@ def get_node_distance(self, node_id): status, output = cmd_status_output(cmd, shell=True, session=self.session) if status != 0: - logging.error("Failed to get information from %s", cmd) + LOG.error("Failed to get information from %s", cmd) try: node_distances = output.split("node distances:")[-1].strip() node_distance = re.findall("%s:.*" % node_id, node_distances)[0] node_distance = node_distance.split(":")[-1] except Exception: - logging.warn("Get unexpect information from numctl") + LOG.warn("Get unexpect information from numctl") numa_sys_path = self.numa_sys_path distance_path = get_path(numa_sys_path, "node%s/distance" % node_id) if not check_isfile(distance_path, session=self.session): - logging.error("Can not get distance information for" - " node %s" % node_id) + LOG.error("Can not get distance information for" + " node %s" % node_id) return [] numa_sys = kernel_interface.SysFS(distance_path, session=self.session, regex="\d+%s") @@ -1640,8 +1640,8 @@ def get_online_nodes_withmem(self): regex="\d+%s") nodes_info = str(numa_sys.sys_fs_value) else: - logging.warning("sys numa node with memory file not" - "present, fallback to online nodes") + LOG.warning("sys numa node with memory file not" + "present, fallback to online nodes") return self.online_nodes return cpu_str_to_list(nodes_info) @@ -1658,8 +1658,8 @@ def get_online_nodes_withcpu(self): regex="\d+%s") nodes_info = str(numa_sys.sys_fs_value) else: - logging.warning("sys numa node with cpu file not" - "present, fallback to online nodes") + LOG.warning("sys numa node with cpu file not" + "present, fallback to online nodes") return self.online_nodes return cpu_str_to_list(nodes_info) @@ -1712,7 +1712,7 @@ def get_node_cpus(self, i): cmd = "numactl --hardware" status, output = cmd_status_output(cmd, session=self.session) if status != 0: - logging.error("Failed to get the information of %s", cmd) + LOG.error("Failed to get the information of %s", cmd) cpus = re.findall("node %s cpus: (.*)" % i, output) if cpus: cpus = cpus[0] @@ -1725,9 +1725,9 @@ def get_node_cpus(self, i): regex="\d+%s") cpus = str(numa_sys.sys_fs_value) except Exception as info: - logging.warn("Can not find the cpu list information from both" - " numactl and sysfs. Please check your system.\n" - " Error: %s", info) + LOG.warn("Can not find the cpu list information from both" + " numactl and sysfs. Please check your system.\n" + " Error: %s", info) break_flag = True if not break_flag: # Try to expand the numbers with '-' to a string of numbers @@ -1745,8 +1745,8 @@ def get_node_cpus(self, i): _ += "%s " % str(n) cpus = re.sub(cstr, _, cpus) except (IndexError, ValueError): - logging.warn("The format of cpu list is not the same as" - " expected.") + LOG.warn("The format of cpu list is not the same as" + " expected.") break_flag = False if break_flag: cpus = "" @@ -1776,8 +1776,8 @@ def get_cpu_topology(self, cpu_id): key_val = str(numa_sys.sys_fs_value).rstrip('\n') cpu_topo[key] = key_val except IOError: - logging.warn("Can not find file %s from sysfs. Please check " - "your system." % key_path) + LOG.warn("Can not find file %s from sysfs. Please check " + "your system." % key_path) cpu_topo[key] = None return cpu_topo @@ -1801,7 +1801,7 @@ def _flush_pin(self): cmd = "ps -eLf | awk '{print $4}'" status, all_pids = cmd_status_output(cmd, shell=True, session=self.session) if status != 0: - logging.error("Failed to get information of %s", cmd) + LOG.error("Failed to get information of %s", cmd) for i in self.cpus: for j in self.dict[i]: if str(j) not in all_pids: @@ -1831,7 +1831,7 @@ def pin_cpu(self, pid, cpu=None, extra=False): if (cpu is not None and cpu == i) or (cpu is None and not self.dict[i]): self.dict[i].append(pid) cmd = "taskset -cp %s %s" % (int(i), pid) - logging.debug("NumaNode (%s): " % i + cmd) + LOG.debug("NumaNode (%s): " % i + cmd) cmd_status_output(cmd, shell=True, session=self.session) return i @@ -1839,9 +1839,9 @@ def show(self): """ Display the record dict in a convenient way. """ - logging.info("Numa Node record dict:") + LOG.info("Numa Node record dict:") for i in self.cpus: - logging.info(" %s: %s" % (i, self.dict[i])) + LOG.info(" %s: %s" % (i, self.dict[i])) def get_dev_major_minor(dev): @@ -1900,15 +1900,15 @@ def get_qemu_binary(params): params.get("qemu_binary", "qemu")) if not os.path.isfile(qemu_binary_path): - logging.debug('Could not find params qemu in %s, searching the ' - 'host PATH for one to use', qemu_binary_path) + LOG.debug('Could not find params qemu in %s, searching the ' + 'host PATH for one to use', qemu_binary_path) QEMU_BIN_NAMES = ['qemu-kvm', 'qemu-system-%s' % (ARCH), 'qemu-system-ppc64', 'qemu-system-x86', 'qemu_system', 'kvm'] for qemu_bin in QEMU_BIN_NAMES: try: qemu_binary = utils_path.find_command(qemu_bin) - logging.debug('Found %s', qemu_binary) + LOG.debug('Found %s', qemu_binary) break except utils_path.CmdNotFoundError: continue @@ -1959,12 +1959,12 @@ def get_binary(binary_name, params): binary_path = get_path(_get_backend_dir(params), params.get(key_in_params, binary_name)) if not os.path.isfile(binary_path): - logging.debug('Could not find params %s in %s, searching the ' - 'host PATH for one to use', - binary_name, - binary_path) + LOG.debug('Could not find params %s in %s, searching the ' + 'host PATH for one to use', + binary_name, + binary_path) binary_path = utils_path.find_command(binary_name) - logging.debug('Found %s', binary_path) + LOG.debug('Found %s', binary_path) return binary_path @@ -2018,8 +2018,8 @@ def get_qemu_version(params=None): if "module" in line_l or "scrmod" in line_l: version['module'] = True if None in list(version.values()): - logging.error("Local install qemu version cannot be detected, " - "the version info is: %s" % version_raw) + LOG.error("Local install qemu version cannot be detected, " + "the version info is: %s" % version_raw) return None return version @@ -2038,7 +2038,7 @@ def compare_qemu_version(major, minor, update, is_rhev=True, params={}): """ installed_version = get_qemu_version(params) if installed_version is None: - logging.error("Cannot get local qemu version, return False directly.") + LOG.error("Cannot get local qemu version, return False directly.") return False if installed_version['module']: is_rhev = False @@ -2403,8 +2403,8 @@ def selinux_enforcing(): Alias to utils_selinux.is_enforcing() """ - logging.warning("This function was deprecated, Please use " - "utils_selinux.is_enforcing().") + LOG.warning("This function was deprecated, Please use " + "utils_selinux.is_enforcing().") return utils_selinux.is_enforcing() @@ -2501,7 +2501,7 @@ def get_all_disks_did(session, partition=False): kname, serial and wwn. """ disks = list_linux_guest_disks(session, partition) - logging.debug("Disks detail: %s" % disks) + LOG.debug("Disks detail: %s" % disks) all_disks_did = {} for line in disks: kname = line.split('/')[2] @@ -2546,24 +2546,24 @@ def format_windows_disk(session, did, mountpoint=None, size=None, fstype="ntfs", cmd_footer += '&& del /f disk' detail_cmd = 'echo detail disk >> disk' detail_cmd = ' '.join([cmd_header, detail_cmd, cmd_footer]) - logging.debug("Detail for 'Disk%s'" % did) + LOG.debug("Detail for 'Disk%s'" % did) details = session.cmd_output(detail_cmd) pattern = "DISK %s.*Offline" % did if re.search(pattern, details, re.I | re.M): online_cmd = 'echo online disk>> disk' online_cmd = ' '.join([cmd_header, online_cmd, cmd_footer]) - logging.info("Online 'Disk%s'" % did) + LOG.info("Online 'Disk%s'" % did) session.cmd(online_cmd) if re.search("Read.*Yes", details, re.I | re.M): set_rw_cmd = 'echo attributes disk clear readonly>> disk' set_rw_cmd = ' '.join([cmd_header, set_rw_cmd, cmd_footer]) - logging.info("Clear readonly bit on 'Disk%s'" % did) + LOG.info("Clear readonly bit on 'Disk%s'" % did) session.cmd(set_rw_cmd) if re.search(r"Volume.*%s" % fstype, details, re.I | re.M) and not force: - logging.info("Disk%s has been formatted, cancel format" % did) + LOG.info("Disk%s has been formatted, cancel format" % did) continue if not size: @@ -2574,10 +2574,10 @@ def format_windows_disk(session, did, mountpoint=None, size=None, fstype="ntfs", mkpart_cmd = mkpart_cmd % size list_cmd = ' && echo list partition >> disk ' cmds = ' '.join([cmd_header, mkpart_cmd, list_cmd, cmd_footer]) - logging.info("Create partition on 'Disk%s'" % did) + LOG.info("Create partition on 'Disk%s'" % did) partition_index = re.search( r'\*\s+Partition\s(\d+)\s+', session.cmd(cmds), re.M).group(1) - logging.info("Format the 'Disk%s' to %s" % (did, fstype)) + LOG.info("Format the 'Disk%s' to %s" % (did, fstype)) format_cmd = 'echo list partition >> disk && ' format_cmd += 'echo select partition %s >> disk && ' % partition_index if not mountpoint: @@ -2610,7 +2610,7 @@ def format_linux_disk(session, did, all_disks_did, partition=False, :return Boolean: disk usable or not. """ disks = list_linux_guest_disks(session, partition) - logging.debug("Disks detail: %s" % disks) + LOG.debug("Disks detail: %s" % disks) for line in disks: kname = line.split('/')[2] did_list = all_disks_did[kname] @@ -2623,7 +2623,7 @@ def format_linux_disk(session, did, all_disks_did, partition=False, size = size_output.splitlines()[0].split()[1] all_disks_before = list_linux_guest_disks(session, True) devname = line - logging.info("Create partition on disk '%s'" % devname) + LOG.info("Create partition on disk '%s'" % devname) mkpart_cmd = "parted -s %s mklabel gpt mkpart " mkpart_cmd += "primary 0 %s" mkpart_cmd = mkpart_cmd % (devname, size) @@ -2631,13 +2631,13 @@ def format_linux_disk(session, did, all_disks_did, partition=False, session.cmd_output_safe("partprobe %s" % devname) all_disks_after = list_linux_guest_disks(session, True) partname = (all_disks_after - all_disks_before).pop() - logging.info("Format partition to '%s'" % fstype) + LOG.info("Format partition to '%s'" % fstype) format_cmd = "yes|mkfs -t %s %s" % (fstype, partname) session.cmd_output_safe(format_cmd) if not mountpoint: session.cmd_output_safe("mkdir /mnt/%s" % kname) mountpoint = os.path.join("/mnt", kname) - logging.info("Mount the disk to '%s'" % mountpoint) + LOG.info("Mount the disk to '%s'" % mountpoint) mount_cmd = "mount -t %s %s %s" % (fstype, partname, mountpoint) session.cmd_output_safe(mount_cmd) return True @@ -2683,14 +2683,14 @@ def get_linux_drive_path(session, did, timeout=120): cmd += 'echo `udevadm info -q property -p $dev_path`; done' status, output = session.cmd_status_output(cmd, timeout=timeout) if status != 0: - logging.error("Can not get drive information:\n%s" % output) + LOG.error("Can not get drive information:\n%s" % output) return "" p = r"DEVNAME=([^\s]+)\s.*(?:ID_SERIAL|ID_SERIAL_SHORT|ID_WWN)=%s" % did dev = re.search(p, output, re.M) if dev: return dev.groups()[0] - logging.error("Can not get drive path by id '%s', " - "command output:\n%s" % (did, output)) + LOG.error("Can not get drive path by id '%s', " + "command output:\n%s" % (did, output)) return "" @@ -2722,7 +2722,7 @@ def valued_option_dict(options, split_pattern, start_count=0, dict_split=None): if options.strip() is not None: pat = re.compile(split_pattern) option_list = pat.split(options.lstrip(split_pattern)) - logging.debug("option_list is %s", option_list) + LOG.debug("option_list is %s", option_list) for match in option_list[start_count:]: match_list = match.split(dict_split) @@ -2732,7 +2732,7 @@ def valued_option_dict(options, split_pattern, start_count=0, dict_split=None): if key not in option_dict: option_dict[key] = value else: - logging.debug("key %s in option_dict", key) + LOG.debug("key %s in option_dict", key) option_dict[key] = option_dict[key].split() option_dict[key].append(value) @@ -2879,9 +2879,9 @@ def is_qemu_capability_supported(capability): for elem in xmltree.getroot().findall('flag'): name = elem.attrib.get('name') if name == capability: - logging.info("The qemu capability '%s' is supported", capability) + LOG.info("The qemu capability '%s' is supported", capability) return True - logging.info("The qemu capability '%s' is not supported.", capability) + LOG.info("The qemu capability '%s' is not supported.", capability) return False @@ -3080,7 +3080,7 @@ def set_ksm_feature(self, feature_args): """ for key in list(feature_args.keys()): if key not in self.get_writable_features(): - logging.error("Do not support setting of '%s'.", key) + LOG.error("Do not support setting of '%s'.", key) raise KSMError if self.interface == "sysfs": # Get writable parameters @@ -3196,7 +3196,7 @@ def verify_dmesg(dmesg_log_file=None, ignore_result=False, level_check=3, err += " Please check %s dmesg log %s." % (environ, dmesg_log_file) else: err += " Please check %s dmesg log in debug log." % environ - logging.debug(d_log) + LOG.debug(d_log) if session: session.cmd("dmesg -C") else: @@ -3226,7 +3226,7 @@ def add_ker_cmd(kernel_cmdline, kernel_param, remove_similar=False): kernel_cmdline_cmp = " %s " % kernel_cmdline need_update = True if " %s " % kernel_param in kernel_cmdline_cmp: - logging.debug("Parameter already in kernel command line.") + LOG.debug("Parameter already in kernel command line.") need_update = False elif "=" in kernel_param and remove_similar: kernel_param_key = kernel_param.split("=")[0] @@ -3272,18 +3272,18 @@ def check_module(module_name, submodules=[]): Check whether module and its submodules work. """ module_info = linux_modules.loaded_module_info(module_name) - logging.debug(module_info) + LOG.debug(module_info) # Return if module is not loaded. if not len(module_info): - logging.debug("Module %s was not loaded.", module_name) + LOG.debug("Module %s was not loaded.", module_name) return False module_work = True l_sub = module_info.get('submodules') for submodule in submodules: if submodule not in l_sub: - logging.debug("Submodule %s of %s is not loaded.", - submodule, module_name) + LOG.debug("Submodule %s of %s is not loaded.", + submodule, module_name) module_work = False return module_work @@ -3323,7 +3323,7 @@ def get_pci_group_by_id(pci_id, device_type=""): like 'Ethernet', 'Fibre' """ if len(pci_id.split(':')) < 2: - logging.error("Please provide formal pci id.") + LOG.error("Please provide formal pci id.") # Informal pci_id, no matched list return [] devices = get_pci_devices_in_group(device_type) @@ -3377,7 +3377,7 @@ def bind_device_driver(pci_id, driver_type): """ vd_list = get_pci_vendor_device(pci_id) if len(vd_list) == 0: - logging.error("Can't find device matched.") + LOG.error("Can't find device matched.") return False bind_file = "/sys/bus/pci/drivers/%s/new_id" % driver_type vendor = vd_list[0].split(':')[0] @@ -3393,7 +3393,7 @@ def unbind_device_driver(pci_id): """ vd_list = get_pci_vendor_device(pci_id) if len(vd_list) == 0: - logging.error("Can't find device matched.") + LOG.error("Can't find device matched.") return False unbind_file = "/sys/bus/pci/devices/%s/driver/unbind" % pci_id unbind_cmd = "echo %s > %s" % (pci_id, unbind_file) @@ -3407,12 +3407,12 @@ def check_device_driver(pci_id, driver_type): """ device_driver = "/sys/bus/pci/devices/%s/driver" % pci_id if not check_isdir(device_driver): - logging.debug("Make sure %s has binded driver.") + LOG.debug("Make sure %s has binded driver.") return False driver = process.run("readlink %s" % device_driver, ignore_status=True).stdout_text.strip() driver = os.path.basename(driver) - logging.debug("% is %s, expect %s", pci_id, driver, driver_type) + LOG.debug("% is %s, expect %s", pci_id, driver, driver_type) return driver == driver_type @@ -3442,8 +3442,8 @@ def get_bootloader_cfg(session=None): cfg_path = path break if not cfg_path: - logging.error("Failed to locate bootloader config file " - "in %s." % bootloader_cfg) + LOG.error("Failed to locate bootloader config file " + "in %s." % bootloader_cfg) return cfg_path @@ -3552,10 +3552,10 @@ def add_device_to_iommu_group(self, pci_id): """ unbind_device_driver(pci_id) if not self.bind_device_to_iommu_group(pci_id): - logging.debug('Bind vfio driver for %s failed.', pci_id) + LOG.debug('Bind vfio driver for %s failed.', pci_id) return False if not check_device_driver(pci_id, "vfio-pci"): - logging.debug("Awesome, driver does not match after binding.") + LOG.debug("Awesome, driver does not match after binding.") return False return True @@ -3614,7 +3614,7 @@ def get_sebool_local(self): """ get_sebool_cmd = "getsebool %s | awk -F'-->' '{print $2}'" % ( self.local_bool_var) - logging.debug("The command: %s", get_sebool_cmd) + LOG.debug("The command: %s", get_sebool_cmd) result = process.run(get_sebool_cmd, shell=True) return result.stdout_text.strip() @@ -3625,7 +3625,7 @@ def get_sebool_remote(self): get_sebool_cmd = "getsebool %s" % self.remote_bool_var cmd = (self.ssh_cmd + "'%s'" % (get_sebool_cmd + "'| awk -F'-->' '{print $2}''")) - logging.debug("The command: %s", cmd) + LOG.debug("The command: %s", cmd) result = process.run(cmd, shell=True) return result.stdout_text.strip() @@ -3688,7 +3688,7 @@ def setup_local(self): raise exceptions.TestSkipError(result.stderr_text.strip()) boolean_curr = self.get_sebool_local() - logging.debug("To check local boolean value: %s", boolean_curr) + LOG.debug("To check local boolean value: %s", boolean_curr) if boolean_curr != self.local_bool_value: raise exceptions.TestFail(result.stderr_text.strip()) @@ -3711,7 +3711,7 @@ def setup_remote(self): raise exceptions.TestSkipError(result.stderr_text.strip()) boolean_curr = self.get_sebool_remote() - logging.debug("To check remote boolean value: %s", boolean_curr) + LOG.debug("To check remote boolean value: %s", boolean_curr) if boolean_curr != self.remote_bool_value: raise exceptions.TestFail(result.stderr_text.strip()) @@ -3765,7 +3765,7 @@ def __init__(self, command, stdout_tee=None, stderr_tee=None, verbose=True, self.string_stdin = None if verbose: - logging.debug("Running '%s'" % command) + LOG.debug("Running '%s'" % command) # Ok, bash is nice and everything, but we might face occasions where # it is not available. Just do the right thing and point to /bin/sh. shell = '/bin/bash' @@ -4146,8 +4146,8 @@ def _wait_for_commands(bg_jobs, start_time, timeout): if bg_job.result.exit_status is not None: continue - logging.warn('run process timeout (%s) fired on: %s', timeout, - bg_job.command) + LOG.warn('run process timeout (%s) fired on: %s', timeout, + bg_job.command) nuke_subprocess(bg_job.sp) bg_job.result.exit_status = bg_job.sp.poll() bg_job.result.duration = time.time() - start_time @@ -4181,7 +4181,7 @@ def start_rsyslogd(): exceptions.TestError("No rsyslogd command found.") rsyslogd = service.Factory.create_service('rsyslog') if not rsyslogd.status(): - logging.info("Need to start rsyslog service") + LOG.info("Need to start rsyslog service") return rsyslogd.start() return True @@ -4201,7 +4201,7 @@ def get_distro(session=None): try: status, output = session.cmd_status_output(cmd, timeout=300) if status: - logging.debug("Unable to get the distro name: %s" % output) + LOG.debug("Unable to get the distro name: %s" % output) else: distro_name = output.split('=')[1].strip() finally: @@ -4234,7 +4234,7 @@ def get_sosreport(path=None, session=None, remote_ip=None, remote_pwd=None, sosreport_pkg = "sosreport" if not utils_package.package_install(sosreport_pkg, session=session): - logging.error("Failed to install sos package") + LOG.error("Failed to install sos package") return None cmd = "sosreport --batch --all-logs" func = process.getstatusoutput @@ -4261,16 +4261,16 @@ def get_sosreport(path=None, session=None, remote_ip=None, remote_pwd=None, try: status, output = func(cmd, timeout=timeout) if status != 0: - logging.error(output) + LOG.error(output) return None if session: - logging.info("copying sosreport from remote host/guest path: %s " - "to host path: %s", path, host_path) + LOG.info("copying sosreport from remote host/guest path: %s " + "to host path: %s", path, host_path) remote.copy_files_from(remote_ip, 'scp', remote_user, remote_pwd, "22", path, host_path, directory=True) except Exception as info: if ignore_status: - logging.error(info) + LOG.error(info) else: raise exceptions.TestError(info) finally: diff --git a/virttest/utils_nbd.py b/virttest/utils_nbd.py index 8cc80805fa..779108ab43 100644 --- a/virttest/utils_nbd.py +++ b/virttest/utils_nbd.py @@ -22,6 +22,8 @@ from virttest.utils_conn import build_server_key, build_CA, build_client_key +LOG = logging.getLogger('avocado.' + __name__) + class NbdExport(object): """qemu-nbd export disk images. @@ -64,12 +66,12 @@ def __init__(self, image, image_format="raw", image_size="1G", port="10001", exp def _create_img(self): """Create a image file with specified format""" if os.path.exists(self.image): - logging.info('image already existed') + LOG.info('image already existed') if self.deleteExisted: os.remove(self.image) else: return - logging.debug("create one image .... if not existed") + LOG.debug("create one image .... if not existed") process.run("qemu-img create" + ' -f %s %s %s' % (self.image_format, self.image, self.image_size), ignore_status=True, shell=True, verbose=True) @@ -178,9 +180,9 @@ def start_nbd_server(self): qemu_nbd_cmd += "-x %s " % self.export_name qemu_nbd_cmd += "&" process.run(qemu_nbd_cmd, ignore_status=False, shell=True, verbose=True, ignore_bg_processes=True) - logging.info("nbd server start at port: %s", self.port) + LOG.info("nbd server start at port: %s", self.port) except Exception as info: - logging.debug("nbd server fail to start") + LOG.debug("nbd server fail to start") raise def stop_nbd_server(self): @@ -190,7 +192,7 @@ def stop_nbd_server(self): def cleanTLS(self): """clean TLS""" - logging.debug("enter cleanup TLS now...") + LOG.debug("enter cleanup TLS now...") if self.tls: ca_folder = '/etc/pki/CA' if os.path.exists(ca_folder): diff --git a/virttest/utils_net.py b/virttest/utils_net.py index d0c259f237..a7e1a3046c 100644 --- a/virttest/utils_net.py +++ b/virttest/utils_net.py @@ -64,6 +64,8 @@ sock = None sockfd = None +LOG = logging.getLogger('avocado.' + __name__) + class NetError(Exception): @@ -837,7 +839,7 @@ def raw_ping(command, timeout, session, output_func): :param session: Local executon hint or session to execute the ping command. """ if session is None: - logging.info("The command of Ping is: %s", command) + LOG.info("The command of Ping is: %s", command) process = aexpect.run_bg(command, output_func=output_func, timeout=timeout) @@ -885,7 +887,7 @@ def raw_ping(command, timeout, session, output_func): def ping(dest=None, count=None, interval=None, interface=None, packetsize=None, ttl=None, hint=None, adaptive=False, broadcast=False, flood=False, timeout=0, - output_func=logging.debug, session=None, force_ipv4=False): + output_func=LOG.debug, session=None, force_ipv4=False): """ Wrapper of ping. @@ -981,7 +983,7 @@ def get_macvtap_base_iface(base_interface=None): if base_interface: warn_msg = "Can not use '%s' as macvtap base interface, " warn_msg += "will choice automatically" - logging.warn(warn_msg % base_interface) + LOG.warn(warn_msg % base_interface) for interface in dev_int: base_inter = Interface(interface) if base_inter.is_brport(): @@ -1174,7 +1176,7 @@ def get_stp_status(self, brname): try: bridge_stp = self.get_structure()[brname]['stp'] except KeyError: - logging.error("Not find bridge %s", brname) + LOG.error("Not find bridge %s", brname) return bridge_stp @@ -1196,7 +1198,7 @@ def wrap_init(*args, **kargs): if (not __ovs.check()): raise Exception("Check of OpenVSwitch failed.") except Exception as e: - logging.debug("Host does not support OpenVSwitch: %s", e) + LOG.debug("Host does not support OpenVSwitch: %s", e) return func(*args, **kargs) return wrap_init @@ -1294,7 +1296,7 @@ def vnet_mq_probe(tapfd): try: r = fcntl.ioctl(tapfd, arch.TUNGETFEATURES, u) except OverflowError: - logging.debug("Fail to get tun features!") + LOG.debug("Fail to get tun features!") return False flags = struct.unpack("I", r)[0] if flags & arch.IFF_MULTI_QUEUE: @@ -1313,7 +1315,7 @@ def vnet_hdr_probe(tapfd): try: r = fcntl.ioctl(tapfd, arch.TUNGETFEATURES, u) except OverflowError: - logging.debug("Fail to get tun features!") + LOG.debug("Fail to get tun features!") return False flags = struct.unpack("I", r)[0] if flags & arch.IFF_VNET_HDR: @@ -1558,7 +1560,7 @@ def get_guest_ip_addr(session, mac_addr, os_type="linux", ip_version="ipv4", except IndexError: time.sleep(1) except Exception as err: - logging.debug(session.cmd_output(info_cmd)) + LOG.debug(session.cmd_output(info_cmd)) raise IPAddrGetError(mac_addr, err) return None @@ -1618,7 +1620,7 @@ def set_guest_ip_addr(session, mac, ip_addr, info_cmd = "" raise IPAddrSetError(mac, ip_addr, "Unknown os type") except Exception as err: - logging.debug(session.cmd_output(info_cmd)) + LOG.debug(session.cmd_output(info_cmd)) raise IPAddrSetError(mac, ip_addr, err) @@ -1633,9 +1635,9 @@ def get_guest_nameserver(session): output = None try: output = session.cmd_output(cmd).strip() - logging.debug("Guest name server is %s" % output) + LOG.debug("Guest name server is %s" % output) except (aexpect.ShellError, aexpect.ShellTimeoutError): - logging.error("Failed to get the guest's nameserver") + LOG.error("Failed to get the guest's nameserver") return output @@ -1787,25 +1789,25 @@ def create_network_script(iface_name, mac_addr, boot_proto, net_mask, status, output = session.cmd_status_output(cmd) if "ubuntu" in distro: if iface_name in output.strip(): - logging.error("network script file for %s already exists in " - "guest %s", iface_name, script_file) + LOG.error("network script file for %s already exists in " + "guest %s", iface_name, script_file) return else: if not status: - logging.error("network script file for %s already exists in " - "guest %s", iface_name, script_file) + LOG.error("network script file for %s already exists in " + "guest %s", iface_name, script_file) return else: distro = platform.platform().lower() if "ubuntu" in distro: if iface_name in process.run(cmd).stdout_text.strip(): - logging.error("network script file for %s already exists in " - "host %s", iface_name, script_file) + LOG.error("network script file for %s already exists in " + "host %s", iface_name, script_file) return else: if os.path.isfile(script_file): - logging.error("network script file for %s already exists in " - "host %s", iface_name, script_file) + LOG.error("network script file for %s already exists in " + "host %s", iface_name, script_file) return if "ubuntu" in distro: network_param_list = ['auto %s' % iface_name, 'iface %s inet %s' % @@ -1835,7 +1837,7 @@ def create_network_script(iface_name, mac_addr, boot_proto, net_mask, else: if process.system(command, shell=True): raise exceptions.TestError("Failed to create network script file") - logging.debug("Network script file created in %s:", script_file) + LOG.debug("Network script file created in %s:", script_file) def ipv6_from_mac_addr(mac_addr): @@ -1960,8 +1962,8 @@ def check_add_dnsmasq_to_br(br_name, tmpdir): leases = ("%s.leases") % (br_ips[0]) if not (set(br_ips) & set(dnsmasq_listen)): - logging.debug("There is no dnsmasq on br %s." - "Starting new one." % (br_name)) + LOG.debug("There is no dnsmasq on br %s." + "Starting new one." % (br_name)) process.run("/usr/sbin/dnsmasq --strict-order --bind-interfaces" " --pid-file=%s --conf-file= --except-interface lo" " --listen-address %s --dhcp-range %s,%s --dhcp-leasefile=%s" @@ -2230,7 +2232,7 @@ def if_set_macaddress(ifname, mac): try: fcntl.ioctl(ctrl_sock, arch.SIOCSIFHWADDR, ifr) except IOError as e: - logging.info(e) + LOG.info(e) raise HwAddrSetError(ifname, mac) ctrl_sock.close() @@ -2337,11 +2339,11 @@ def get_addr_list(self, runner=None): if not runner: ipv6_addr_list = get_net_if_addrs(self.client_ifname).get("ipv6") - logging.debug("Local IPv6 address list: %s", ipv6_addr_list) + LOG.debug("Local IPv6 address list: %s", ipv6_addr_list) else: ipv6_addr_list = get_net_if_addrs(self.server_ifname, runner).get("ipv6") - logging.debug("remote IPv6 address list: %s", ipv6_addr_list) + LOG.debug("remote IPv6 address list: %s", ipv6_addr_list) return ipv6_addr_list @@ -2364,7 +2366,7 @@ def check_connectivity(client_ifname, server_ipv6, count=5): "unreachable: %s", server_ipv6, result.stderr_text) else: - logging.info("The '%s' destination is connectivity!", server_ipv6) + LOG.info("The '%s' destination is connectivity!", server_ipv6) def flush_ip6tables(self): """ @@ -2387,7 +2389,7 @@ def flush_ip6tables(self): (test_fail_err, result.stderr_text)) else: - logging.info("%s on the local host", flush_cmd_pass) + LOG.info("%s on the local host", flush_cmd_pass) # check if ip6tables command exists on the remote if self.session.cmd_status(find_ip6tables_cmd): @@ -2396,7 +2398,7 @@ def flush_ip6tables(self): if self.session.cmd_status(flush_cmd): raise exceptions.TestFail("%s on the remote host" % test_fail_err) else: - logging.info("%s on the remote host", flush_cmd_pass) + LOG.info("%s on the remote host", flush_cmd_pass) def setup(self): """ @@ -2406,7 +2408,7 @@ def setup(self): runner = self.session.cmd_output try: - logging.info("Prepare to configure IPv6 test environment...") + LOG.info("Prepare to configure IPv6 test environment...") local_ipv6_addr_list = self.get_addr_list() # the ipv6 address looks like this '3efe::101/64' @@ -2418,7 +2420,7 @@ def setup(self): set_net_if_ip(self.client_ifname, self.client_ipv6_addr) self.client_ipv6_added = True else: - logging.debug( + LOG.debug( "Skip to add the existing ipv6 address %s", ipv6_addr_src) @@ -2434,7 +2436,7 @@ def setup(self): runner) self.server_ipv6_added = True else: - logging.debug( + LOG.debug( "Skip to add the existing ipv6 address %s", ipv6_addr_des) @@ -2454,7 +2456,7 @@ def cleanup(self): """ Cleanup IPv6 network environment. """ - logging.info("Prepare to clean up IPv6 test environment...") + LOG.info("Prepare to clean up IPv6 test environment...") local_ipv6_addr_list = self.get_addr_list() # the ipv6 address looks like this '3efe::101/64' @@ -2774,11 +2776,11 @@ def process_mac(self, value): del value['mac'] # don't store invalid macs # Notify user about these, but don't go crazy if self.__class__.DISCARD_WARNINGS >= 0: - logging.warning('Discarded invalid mac "%s" for nic "%s" ' - 'from input, %d warnings remaining.' - % (original_mac, - value.get('nic_name'), - self.__class__.DISCARD_WARNINGS)) + LOG.warning('Discarded invalid mac "%s" for nic "%s" ' + 'from input, %d warnings remaining.' + % (original_mac, + value.get('nic_name'), + self.__class__.DISCARD_WARNINGS)) self.__class__.DISCARD_WARNINGS -= 1 def mac_list(self): @@ -3221,8 +3223,8 @@ def generate_mac_address(self, nic_index_or_name, attempts=1024): """ nic = self[nic_index_or_name] if 'mac' in nic: - logging.warning("Overwriting mac %s for nic %s with random" - % (nic.mac, str(nic_index_or_name))) + LOG.warning("Overwriting mac %s for nic %s with random" + % (nic.mac, str(nic_index_or_name))) self.free_mac_address(nic_index_or_name) attempts_remaining = attempts while attempts_remaining > 0: @@ -3267,8 +3269,8 @@ def set_mac_address(self, nic_index_or_name, mac): """ nic = self[nic_index_or_name] if 'mac' in nic: - logging.warning("Overwriting mac %s for nic %s with %s" - % (nic.mac, str(nic_index_or_name), mac)) + LOG.warning("Overwriting mac %s for nic %s with %s" + % (nic.mac, str(nic_index_or_name), mac)) nic.mac = mac.lower() self.update_db() @@ -3403,7 +3405,7 @@ def __verify_neigh(ip, macs, dev, timeout, session=None, **dargs): output = func(ip_cmd, timeout=timeout, **dargs) devs = set(re.findall(r"dev\s+(\S+)", output, re.I)) if not devs: - logging.debug("No path to %s in route table: %s" % (ip, output)) + LOG.debug("No path to %s in route table: %s" % (ip, output)) return False # TODO: use same verification function for both ipv4 and ipv6 @@ -3476,12 +3478,12 @@ def get_ip_address_by_interface(ifname, ip_ver="ipv4", linklocal=False): return [a['addr'] for a in addr if not a['addr'].lower().startswith(linklocal_prefix)][0] except IndexError: - logging.warning("No IP address configured for " - "the network interface %s !", ifname) + LOG.warning("No IP address configured for " + "the network interface %s !", ifname) return None else: - logging.warning("No IP address configured for the network interface" - "%s !", ifname) + LOG.warning("No IP address configured for the network interface" + "%s !", ifname) return None @@ -3501,13 +3503,13 @@ def get_host_ip_address(params=None, ip_ver="ipv4", linklocal=False): if params: host_ip = params.get('host_ip_addr', None) if host_ip: - logging.debug("Use IP address at config %s=%s", 'host_ip_addr', host_ip) + LOG.debug("Use IP address at config %s=%s", 'host_ip_addr', host_ip) return host_ip net_dev = params.get("netdst") if not net_dev: net_dev = get_default_gateway(iface_name=True) - logging.warning("No IP address of host was provided, using IP address" - " on %s interface", net_dev) + LOG.warning("No IP address of host was provided, using IP address" + " on %s interface", net_dev) return get_ip_address_by_interface(net_dev, ip_ver, linklocal) @@ -3582,7 +3584,7 @@ def get_linux_mac(session, nic): try: return str(re.search(pattern, out, re.M | re.I).group(mac_index)) except Exception: - logging.error("No HWaddr/ether found for nic %s: %s" % (nic, out)) + LOG.error("No HWaddr/ether found for nic %s: %s" % (nic, out)) def get_linux_ipaddr(session, nic): @@ -3725,10 +3727,9 @@ def get_linux_iface_info(mac, session=None): else: ip_output_str = process.run(ip_cmd).stdout_text.strip() ip_info = json.loads(ip_output_str) - logging.debug('interfaces inside vm:\n %s', ip_info) + LOG.debug('interfaces inside vm:\n %s', ip_info) except Exception as why: - logging.error('Failed to get interfaces inside vm. Reason: %s', - str(why)) + LOG.error('Failed to get interfaces inside vm. Reason: %s', str(why)) return None for iface in ip_info: @@ -3750,11 +3751,11 @@ def update_mac_ip_address(vm, timeout=240): addr_map = get_guest_address_map(session) session.close() if not addr_map: - logging.warn("No VM's NIC got IP address") + LOG.warn("No VM's NIC got IP address") return vm.address_cache.update(addr_map) except Exception as e: - logging.warn("Error occur when update VM address cache: %s", str(e)) + LOG.warn("Error occur when update VM address cache: %s", str(e)) def get_windows_nic_attribute(session, key, value, target, timeout=240, @@ -3890,11 +3891,11 @@ def get_default_gateway(iface_name=False, session=None): try: if session: output = session.cmd_output(cmd).strip() - logging.debug("Guest default gateway is %s" % output) + LOG.debug("Guest default gateway is %s" % output) else: output = process.run(cmd, shell=True).stdout_text.rstrip() except (aexpect.ShellError, aexpect.ShellTimeoutError, process.CmdError): - logging.error("Failed to get the default GateWay") + LOG.error("Failed to get the default GateWay") return None return output @@ -3922,13 +3923,13 @@ def check_listening_port_by_service(service, port, listen_addr='0.0.0.0', "remote") output = runner(cmd) except process.CmdError: - logging.error("Failed to run command '%s'", cmd) + LOG.error("Failed to run command '%s'", cmd) if not re.search(find_str, output, re.M): raise exceptions.TestFail( "Failed to listen %s: %s" % (find_str, output)) - logging.info("The listening is active: %s", output) + LOG.info("The listening is active: %s", output) def check_listening_port_remote_by_service(server_ip, server_user, server_pwd, @@ -3970,17 +3971,16 @@ def block_specific_ip_by_time(ip_addr, block_time="1 seconds", runner=None): except utils_path.CmdNotFoundError as details: raise exceptions.TestSkipError(details) output = local_runner(cmd, shell=True) - logging.debug("List current iptables rules:\n%s", - local_runner(list_rules)) + LOG.debug("List current iptables rules:\n%s", + local_runner(list_rules)) else: if not runner(find_iptables): raise exceptions.TestSkipError("Missing 'iptables' command on " "remote") output = runner(cmd) - logging.debug("List current iptables rules:\n%s", - runner(list_rules)) + LOG.debug("List current iptables rules:\n%s", runner(list_rules)) except process.CmdError: - logging.error("Failed to run command '%s'", cmd) + LOG.error("Failed to run command '%s'", cmd) def map_hostname_ipaddress(hostname_ip_dict, session=None): @@ -4001,16 +4001,16 @@ def map_hostname_ipaddress(hostname_ip_dict, session=None): for hostname, ipaddress in six.iteritems(hostname_ip_dict): status, output = func(check_cmd) if status != 0: - logging.error(output) + LOG.error(output) return False pattern = "%s(\s+)%s$" % (ipaddress, hostname) if not re.search(pattern, output): cmd = "echo '%s %s' >> %s" % (ipaddress, hostname, hosts_file) status, output = func(cmd) if status != 0: - logging.error(output) + LOG.error(output) return False - logging.info("All the hostnames and IPs are mapped in %s", hosts_file) + LOG.info("All the hostnames and IPs are mapped in %s", hosts_file) return True @@ -4060,7 +4060,7 @@ def _get_pdb_path(session, driver_name): pdb_find_cmd = 'dir /b /s %s\\%s.pdb | findstr "\\%s\\\\"' pdb_find_cmd %= (viowin_ltr, driver_name, pdb_middle_path) pdb_path = session.cmd(pdb_find_cmd).strip() - logging.info("Found %s.pdb file at %s" % (driver_name, pdb_path)) + LOG.info("Found %s.pdb file at %s" % (driver_name, pdb_path)) return pdb_path @@ -4077,14 +4077,13 @@ def _prepare_traceview_windows(params, session, timeout=360): copy_cmd = "xcopy %s %s /y" dst_folder = "c:\\" # copy traceview.exe - logging.info("Copy traceview.exe to drive %s" % dst_folder) + LOG.info("Copy traceview.exe to drive %s" % dst_folder) traceview_path = _get_traceview_path(session, params) session.cmd(copy_cmd % (traceview_path, dst_folder)) # copy Netkvm.pdb driver_name = params.get("driver_name", "netkvm") - logging.info("Locate %s.pdb and copy to drive %s" % - (driver_name, dst_folder)) + LOG.info("Locate %s.pdb and copy to drive %s" % (driver_name, dst_folder)) pdb_path = _get_pdb_path(session, driver_name) session.cmd(copy_cmd % (pdb_path, dst_folder)) @@ -4102,7 +4101,7 @@ def _get_msis_queues_from_traceview_output(output): :return: a tuple of (msis, queues) """ info_str = "Start checking dump content for MSIs&queues info" - logging.info(info_str) + LOG.info(info_str) search_exp = r'No MSIX, using (\d+) queue' # special case for vectors = 0 queue_when_no_msi = re.search(search_exp, output) @@ -4180,26 +4179,26 @@ def dump_traceview_log_windows(params, vm, timeout=360): dump_cmd = "%s -process %s -pdb %s -o %s" % ( traceview_local_path, log_path, pdb_local_path, dump_file) # start traceview - logging.info("Start trace view with pdb file") + LOG.info("Start trace view with pdb file") session_serial = vm.wait_for_serial_login(timeout=timeout) try: session_serial.cmd(clean_cmd + log_path) session_serial.cmd(start_traceview_cmd, timeout=timeout) # restart nic - logging.info("Restart guest nic") + LOG.info("Restart guest nic") mac = vm.get_mac_address(0) connection_id = get_windows_nic_attribute( session_serial, "macaddress", mac, "netconnectionid") restart_windows_guest_network(session_serial, connection_id) # stop traceview - logging.info("Stop traceview") + LOG.info("Stop traceview") session_serial.cmd(stop_traceview_cmd, timeout=timeout) # checkout traceview output - logging.info("Check etl file generated by traceview") + LOG.info("Check etl file generated by traceview") session_serial.cmd(clean_cmd + dump_file) status, output = session_serial.cmd_status_output(dump_cmd) if status: - logging.error("Cann't dump log file %s: %s" % (log_path, output)) + LOG.error("Cann't dump log file %s: %s" % (log_path, output)) _wait_for_traceview_dump_finished(session_serial, dump_file) status, output = session_serial.cmd_status_output( "type %s" % dump_file) @@ -4240,7 +4239,7 @@ def set_netkvm_param_value(vm, param, value): session = vm.wait_for_serial_login(timeout=360) try: - logging.info("Set %s to %s" % (param, value)) + LOG.info("Set %s to %s" % (param, value)) cmd = 'netsh netkvm setparam 0 param=%s value=%s' cmd = cmd % (param, value) status, output = session.cmd_status_output(cmd) @@ -4249,7 +4248,7 @@ def set_netkvm_param_value(vm, param, value): err += "With status=%s, output=%s" % (status, output) raise exceptions.TestError(err) - logging.info("Restart nic to apply changes") + LOG.info("Restart nic to apply changes") dev_mac = vm.virtnet[0].mac connection_id = get_windows_nic_attribute( session, "macaddress", dev_mac, "netconnectionid") @@ -4270,7 +4269,7 @@ def get_netkvm_param_value(vm, param): session = vm.wait_for_serial_login(timeout=360) try: - logging.info("Get the value of %s" % param) + LOG.info("Get the value of %s" % param) cmd = 'netsh netkvm getparam 0 param=%s' % param status, output = session.cmd_status_output(cmd) if status: @@ -4368,7 +4367,7 @@ def get_channel_info(session, interface): cmd = "ethtool -l %s" % interface s, o = session.cmd_status_output(cmd) if s: - logging.error("Get channel parameters for vm failed:%s" % o) + LOG.error("Get channel parameters for vm failed:%s" % o) return {}, {} maximum = {} current = {} @@ -4396,20 +4395,20 @@ def set_channel(session, interface, parameter, value): :return: False or True to indicate if the the parameter is set successfully """ cmd = "ethtool -L %s %s %s" % (interface, parameter, value) - logging.debug("The cmd to set the channel info '%s'" % cmd) + LOG.debug("The cmd to set the channel info '%s'" % cmd) s, o = session.cmd_status_output(cmd) if s: - logging.error("Setting %s to %s failed:%s", parameter, value, o) + LOG.error("Setting %s to %s failed:%s", parameter, value, o) return False _, current = get_channel_info(session, interface) - logging.debug("After set, the current value is %s" % current) + LOG.debug("After set, the current value is %s" % current) try: if int(current['Combined']) == int(value): return True else: - logging.error("Setting passed, but checking failed:%s", current) + LOG.error("Setting passed, but checking failed:%s", current) except KeyError: - logging.error("No 'Combined' found in the channel info") + LOG.error("No 'Combined' found in the channel info") return False @@ -4461,7 +4460,7 @@ def delete_linux_bridge_tmux(linux_bridge_name, iface_name=None, ignore_status=F if not utils_package.package_install(['tmux', 'dhcp-client', 'procps-ng', 'net-tools']): raise exceptions.TestError("Failed to install the required packages.") if not os.path.exists(br_path): - logging.info("There is no bridge named '%s' on the host" % linux_bridge_name) + LOG.info("There is no bridge named '%s' on the host" % linux_bridge_name) return if iface_name: cmd = 'tmux -c "ip link set {1} nomaster; ip link delete {0}; pkill dhclient; ' \ @@ -4483,19 +4482,19 @@ def check_filter_rules(ifname, bandwidth, expect_none=False): """ cmd = "tc -d filter show dev %s parent ffff:" % ifname filter_output = process.run(cmd, shell=True).stdout_text - logging.debug("Bandwidth filter output: %s", filter_output) + LOG.debug("Bandwidth filter output: %s", filter_output) if expect_none: return not filter_output.strip() if not filter_output.count("filter protocol all pref"): - logging.error("Can't find 'protocol all' settings in filter rules") + LOG.error("Can't find 'protocol all' settings in filter rules") return False filter_pattern = r".*police.*rate (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" tc_police = re.search(r"%s" % filter_pattern, filter_output, re.M) if not tc_police: - logging.error("Can't find any filter policy") + LOG.error("Can't find any filter policy") return False - logging.debug("bandwidth from tc output:%s" % str(tc_police.groups())) - logging.debug("bandwidth from setting:%s" % str(bandwidth)) + LOG.debug("bandwidth from tc output:%s" % str(tc_police.groups())) + LOG.debug("bandwidth from setting:%s" % str(bandwidth)) try: if "average" in bandwidth: if tc_police.group(2) == 'M': @@ -4526,14 +4525,14 @@ def check_class_rules(ifname, rule_id, bandwidth): """ cmd = "tc class show dev %s" % ifname class_output = process.run(cmd, shell=True).stdout_text - logging.debug("Bandwidth class output: %s", class_output) + LOG.debug("Bandwidth class output: %s", class_output) class_pattern = (r"class htb %s.*rate (\d+)(K?M?)bit ceil (\d+)(K?M?)bit burst (\d+)(K?M?)b.*" % rule_id) tc_htb = re.search(class_pattern, class_output, re.M) if not tc_htb: - logging.error("Can't find outbound setting for htb %s", rule_id) + LOG.error("Can't find outbound setting for htb %s", rule_id) return False - logging.debug("bandwidth from tc output:%s" % str(tc_htb.groups())) - logging.debug("bandwidth from seting: %s" % str(bandwidth)) + LOG.debug("bandwidth from tc output:%s" % str(tc_htb.groups())) + LOG.debug("bandwidth from seting: %s" % str(bandwidth)) rate = None if "floor" in bandwidth: rate = int(bandwidth["floor"]) * 8 diff --git a/virttest/utils_netperf.py b/virttest/utils_netperf.py index 022554fd23..6685b6f693 100644 --- a/virttest/utils_netperf.py +++ b/virttest/utils_netperf.py @@ -14,6 +14,8 @@ from . import data_dir from . import remote as remote_old +LOG = logging.getLogger('avocado.' + __name__) + class NetperfError(Exception): pass @@ -102,7 +104,7 @@ def __init__(self, address, netperf_path, md5sum="", netperf_source="", self.netperf_path = os.path.join(self.netperf_base_dir, self.netperf_file) - logging.debug("Create remote session") + LOG.debug("Create remote session") self.session = remote.remote_login(self.client, self.address, self.port, self.username, self.password, prompt, @@ -143,7 +145,7 @@ def pull_file(self, netperf_source=None): """ if aurl.is_url(netperf_source): - logging.debug("Download URL file to local path") + LOG.debug("Download URL file to local path") tmp_dir = data_dir.get_download_dir() dst = os.path.join(tmp_dir, os.path.basename(netperf_source)) self.netperf_source = download.get_file(src=netperf_source, @@ -169,12 +171,12 @@ def install(self, install, compile_option): self.pull_file(self.netperf_source) self.push_file(self.netperf_source) if self.pack_suffix: - logging.debug("Compiling netserver from source") + LOG.debug("Compiling netserver from source") self.pack_compile(compile_option) msg = "Using local netperf: %s and %s" % (self.netperf_path, self.netserver_path) - logging.debug(msg) + LOG.debug(msg) return (self.netserver_path, self.netperf_path) @@ -206,7 +208,7 @@ def __init__(self, address, netperf_path, md5sum="", netperf_source="", password, prompt, linesep, status_test_command) self.netserver_path, self.netperf_path = self.package.install(install, compile_option) - logging.debug("Create remote session") + LOG.debug("Create remote session") self.session = remote.remote_login(client, address, port, username, password, prompt, linesep, timeout=360, @@ -221,7 +223,7 @@ def is_target_running(self, target): check_reg = re.compile(r"%s" % target, re.I | re.M) return bool(check_reg.findall(output)) except Exception as err: - logging.debug("Check process error: %s" % str(err)) + LOG.debug("Check process error: %s" % str(err)) return False def stop(self, target): @@ -233,7 +235,7 @@ def stop(self, target): self.session.cmd(stop_cmd, ignore_all_errors=True) if self.is_target_running(target): raise NetserverError("Cannot stop %s" % target) - logging.info("Stop %s successfully" % target) + LOG.info("Stop %s successfully" % target) def cleanup(self, clean_all=True): """ @@ -278,7 +280,7 @@ def start(self, restart=False): :param restart: if restart=True, will restart the netserver """ - logging.info("Start netserver ...") + LOG.info("Start netserver ...") server_cmd = "" if self.client == "nc": server_cmd += "start /b %s > null" % self.netserver_path @@ -288,12 +290,12 @@ def start(self, restart=False): if restart: self.stop() if not self.is_server_running(): - logging.info("Start netserver with cmd: '%s'" % server_cmd) + LOG.info("Start netserver with cmd: '%s'" % server_cmd) self.session.cmd_output_safe(server_cmd) if not wait.wait_for(self.is_server_running, 5): raise NetserverError("Can not start netperf server!") - logging.info("Netserver start successfully") + LOG.info("Netserver start successfully") def is_server_running(self): return self.is_target_running(os.path.basename(self.netserver_path)) @@ -347,11 +349,11 @@ def start(self, server_address, test_option="", timeout=1200, if package_sizes: for p_size in package_sizes.split(): cmd = netperf_cmd + " -- -m %s" % p_size - logging.info("Start netperf with cmd: '%s'" % cmd) + LOG.info("Start netperf with cmd: '%s'" % cmd) output += self.session.cmd_output_safe(cmd, timeout=timeout) else: - logging.info("Start netperf with cmd: '%s'" % netperf_cmd) + LOG.info("Start netperf with cmd: '%s'" % netperf_cmd) output = self.session.cmd_output_safe(netperf_cmd, timeout=timeout) except aexpect.ShellError as err: @@ -390,7 +392,7 @@ def bg_start(self, server_address, test_option="", session_num=1, cmd = "%s > /dev/null" % cmd txt = "Start %s sessions netperf background" % session_num txt += " with cmd: '%s' " % cmd - logging.info(txt) + LOG.info(txt) for num in xrange(int(session_num)): self.session.cmd_output_safe("%s &" % cmd) else: @@ -400,7 +402,7 @@ def bg_start(self, server_address, test_option="", session_num=1, netperf_cmd = "%s > /dev/null " % netperf_cmd txt = "Start %s sessions netperf background" % session_num txt += " with cmd: '%s' " % netperf_cmd - logging.info(txt) + LOG.info(txt) for num in xrange(int(session_num)): self.session.cmd_output_safe("%s &" % netperf_cmd) diff --git a/virttest/utils_npiv.py b/virttest/utils_npiv.py index 376214e616..c4baf81003 100644 --- a/virttest/utils_npiv.py +++ b/virttest/utils_npiv.py @@ -12,6 +12,7 @@ from virttest.libvirt_xml.nodedev_xml import NodedevXML from virttest.libvirt_xml.devices import hostdev +LOG = logging.getLogger('avocado.' + __name__) _FC_HOST_PATH = "/sys/class/fc_host" _TIMEOUT = 5 @@ -30,18 +31,18 @@ def check_nodedev(dev_name, dev_parent=None): # Check if the /sys/class/fc_host/host$NUM exists if not os.access(fc_host_path, os.R_OK): - logging.error("Can't access %s", fc_host_path) + LOG.error("Can't access %s", fc_host_path) return False dev_xml = NodedevXML.new_from_dumpxml(dev_name) if not dev_xml: - logging.error("Can't dumpxml %s XML", dev_name) + LOG.error("Can't dumpxml %s XML", dev_name) return False # Check device parent name if dev_parent != dev_xml.parent: - logging.error("The parent name is different: %s is not %s", - dev_parent, dev_xml.parent) + LOG.error("The parent name is different: %s is not %s", + dev_parent, dev_xml.parent) return False wwnn_from_xml = dev_xml.wwnn @@ -57,22 +58,22 @@ def check_nodedev(dev_name, dev_parent=None): # Check wwnn, wwpn and fabric_wwn if len(wwnn_from_xml) != 16: - logging.error("The wwnn is not valid: %s", wwnn_from_xml) + LOG.error("The wwnn is not valid: %s", wwnn_from_xml) return False if len(wwpn_from_xml) != 16: - logging.error("The wwpn is not valid: %s", wwpn_from_xml) + LOG.error("The wwpn is not valid: %s", wwpn_from_xml) return False if fc_dict["node_name"] != wwnn_from_xml: - logging.error("The node name is differnet: %s is not %s", - fc_dict["node_name"], wwnn_from_xml) + LOG.error("The node name is differnet: %s is not %s", + fc_dict["node_name"], wwnn_from_xml) return False if fc_dict["port_name"] != wwpn_from_xml: - logging.error("The port name is different: %s is not %s", - fc_dict["port_name"], wwpn_from_xml) + LOG.error("The port name is different: %s is not %s", + fc_dict["port_name"], wwpn_from_xml) return False if fc_dict["fabric_name"] != fabric_wwn_from_xml: - logging.error("The fabric wwpn is differnt: %s is not %s", - fc_dict["fabric_name"], fabric_wwn_from_xml) + LOG.error("The fabric wwpn is differnt: %s is not %s", + fc_dict["fabric_name"], fabric_wwn_from_xml) return False fc_type_from_xml = dev_xml.fc_type @@ -80,7 +81,7 @@ def check_nodedev(dev_name, dev_parent=None): # Check capability type if (cap_type_from_xml != "scsi_host") or (fc_type_from_xml != "fc_host"): - logging.error("The capability type isn't 'scsi_host' or 'fc_host'") + LOG.error("The capability type isn't 'scsi_host' or 'fc_host'") return False return True @@ -167,7 +168,7 @@ def nodedev_create_from_xml(params): vhba_xml.parent = nodedev_parent vhba_xml.wwnn = scsi_wwnn vhba_xml.wwpn = scsi_wwpn - logging.debug("Prepare the nodedev XML: %s", vhba_xml) + LOG.debug("Prepare the nodedev XML: %s", vhba_xml) vhba_file = mktemp() with open(vhba_file, 'w') as xml_object: xml_object.write(str(vhba_xml)) @@ -179,7 +180,7 @@ def nodedev_create_from_xml(params): os.unlink(vhba_file) libvirt.check_exit_status(result, status_error) output = result.stdout_text - logging.info(output) + LOG.info(output) for scsi in output.split(): if scsi.startswith('scsi_host'): # Check node device @@ -202,12 +203,12 @@ def nodedev_destroy(scsi_host, params={}): """ status_error = "yes" == params.get("status_error", "no") result = virsh.nodedev_destroy(scsi_host) - logging.info("destroying scsi:%s", scsi_host) + LOG.info("destroying scsi:%s", scsi_host) # Check status_error libvirt.check_exit_status(result, status_error) # Check nodedev value if not check_nodedev(scsi_host): - logging.info(result.stdout_text) + LOG.info(result.stdout_text) else: raise exceptions.TestFail("The relevant directory still exists" " or mismatch with result") @@ -234,9 +235,9 @@ def vhbas_cleanup(vhba_list): nodedev_destroy(scsi_host) left_vhbas = find_hbas("vhba") if left_vhbas: - logging.error("old vhbas are: %s", left_vhbas) + LOG.error("old vhbas are: %s", left_vhbas) else: - logging.debug("scsi_hosts destroyed: %s", vhba_list) + LOG.debug("scsi_hosts destroyed: %s", vhba_list) def create_hostdev_xml(adapter_name="", **kwargs): @@ -265,7 +266,7 @@ def create_hostdev_xml(adapter_name="", **kwargs): source_args['target'] = addr_target source_args['unit'] = addr_unit hostdev_xml.source = hostdev_xml.new_source(**source_args) - logging.info(hostdev_xml) + LOG.info(hostdev_xml) return hostdev_xml @@ -360,18 +361,18 @@ def prepare_multipath_conf(conf_path="/etc/multipath.conf", conf_content="", if os.path.exists(conf_path): with open(conf_path, 'r+') as conf_file: old_conf_content = conf_file.read() - logging.info("Old multipath conf is: %s" % old_conf_content) + LOG.info("Old multipath conf is: %s" % old_conf_content) if replace_existing: conf_file.seek(0) conf_file.truncate() conf_file.write(new_conf_content) - logging.info("Replace multipath conf to: %s" % new_conf_content) + LOG.info("Replace multipath conf to: %s" % new_conf_content) else: - logging.info("Multipath conf exsits, skip preparation.") + LOG.info("Multipath conf exsits, skip preparation.") else: with open(conf_path, 'w') as conf_file: conf_file.write(new_conf_content) - logging.info("Create multipath conf: %s" % new_conf_content) + LOG.info("Create multipath conf: %s" % new_conf_content) if restart_multipath: restart_multipathd() return old_conf_content diff --git a/virttest/utils_package.py b/virttest/utils_package.py index dd0838efe4..41fd91ba82 100644 --- a/virttest/utils_package.py +++ b/virttest/utils_package.py @@ -11,6 +11,8 @@ from virttest import utils_misc from virttest import vt_console +LOG = logging.getLogger('avocado.' + __name__) + PACKAGE_MANAGERS = ['apt-get', 'yum', 'zypper', @@ -110,17 +112,14 @@ def operate(self, timeout, default_status, internal_timeout=2): timeout, internal_timeout) if status: - logging.error("'%s' execution failed " - "with %s", cmd, output) + LOG.error("'%s' execution failed with %s", cmd, output) # Try to clean the repo db and re-try installation if not self.clean(): - logging.error("Package %s was broken", - self.package_manager) + LOG.error("Package %s was broken", self.package_manager) return False status, output = self.session.cmd_status_output(cmd, timeout) if status: - logging.error("'%s' execution failed " - "with %s", cmd, output) + LOG.error("'%s' execution failed with %s", cmd, output) return False return True @@ -180,7 +179,7 @@ def operate(self, default_status): need = True if need: if not self.func(pkg): - logging.error("Operate %s on host failed", pkg) + LOG.error("Operate %s on host failed", pkg) return False return True diff --git a/virttest/utils_pyvmomi.py b/virttest/utils_pyvmomi.py index ee5ec5d484..ddb6f488d5 100644 --- a/virttest/utils_pyvmomi.py +++ b/virttest/utils_pyvmomi.py @@ -13,6 +13,8 @@ from pyVim.task import WaitForTask from pyVmomi import vim +LOG = logging.getLogger('avocado.' + __name__) + def to_list(obj): tmp_list = [] @@ -241,7 +243,7 @@ def connect(self): self.service_instance = SmartConnect(**kwargs) if self.service_instance: - logging.debug( + LOG.debug( 'New vsphere connection established: %s (%s)', self.service_instance, id(self.service_instance)) @@ -252,8 +254,8 @@ def close(self): del self.target_vm if not self.service_instance: return - logging.debug('vsphere connection closed: %s (%s)', - self.service_instance, id(self.service_instance)) + LOG.debug('vsphere connection closed: %s (%s)', + self.service_instance, id(self.service_instance)) Disconnect(self.service_instance) self.service_instance = None @@ -321,7 +323,7 @@ def wraper(self, *args, **kwargs): self.target_vm = self._target_vm_name vmobj = self.target_vm if vm_name: - logging.warning( + LOG.warning( "Have you forgotten to reset target_vm to 'new vm name'?") elif vm_name: self.target_vm = vm_name @@ -375,7 +377,7 @@ def _set_vm(self, name): self._target_vm = tmp_vm raise VSphereVMNotFound(name) self._target_vm_name = self._target_vm.name - logging.debug('Current target VM is %s' % self._target_vm.name) + LOG.debug('Current target VM is %s' % self._target_vm.name) def _del_vm(self): """ @@ -450,7 +452,7 @@ def power_on(self, vm_obj=None, vm_name=None): :param vm_name: a vm's name """ WaitForTask(vm_obj.PowerOn()) - logging.debug('VM %s was powered on', vm_obj.name) + LOG.debug('VM %s was powered on', vm_obj.name) @vm_picker def power_off(self, vm_obj=None, vm_name=None): @@ -464,7 +466,7 @@ def power_off(self, vm_obj=None, vm_name=None): :param vm_name: a vm's name """ WaitForTask(vm_obj.PowerOff()) - logging.debug('VM %s was powered off', vm_obj.name) + LOG.debug('VM %s was powered off', vm_obj.name) @vm_picker def remove_all_snapshots(self, vm_obj=None, vm_name=None): @@ -476,7 +478,7 @@ def remove_all_snapshots(self, vm_obj=None, vm_name=None): """ if not vm_obj.snapshot: return - logging.debug('Remove all snapshots for VM %s', vm_obj.name) + LOG.debug('Remove all snapshots for VM %s', vm_obj.name) WaitForTask(vm_obj.RemoveAllSnapshots()) @vm_picker @@ -489,7 +491,7 @@ def remove_current_snapshot(self, vm_obj=None, vm_name=None): """ if not vm_obj.snapshot: return - logging.debug('Remove current snapshot for VM %s', vm_obj.name) + LOG.debug('Remove current snapshot for VM %s', vm_obj.name) WaitForTask( vm_obj.snapshot.currentSnapshot.Remove( removeChildren=True)) @@ -552,13 +554,13 @@ def remove_snapshot_by_id( if raise_not_found: raise VSphereSnapNotFound(vm_obj.name, snapshot_id) else: - logging.debug( + LOG.debug( 'Not found snapshot_id %s for VM %s', snapshot_id, vm_obj.name) return - logging.debug('Remove snapshot %s for VM %s', snap, vm_obj.name) + LOG.debug('Remove snapshot %s for VM %s', snap, vm_obj.name) WaitForTask(snap.Remove(removeChildren=remove_children)) @vm_picker @@ -662,7 +664,7 @@ def get_dev_by_key_or_label(self, devices, label=None, key=None): "Not found device for label(%s) or key(%s)" % (label, key)) - logging.debug( + LOG.debug( "Found device: label(%s) key(%s) summary(%s)", res[0].deviceInfo.label, res[0].key, diff --git a/virttest/utils_sasl.py b/virttest/utils_sasl.py index ebe6907d03..b507f8d5fe 100644 --- a/virttest/utils_sasl.py +++ b/virttest/utils_sasl.py @@ -14,6 +14,8 @@ from virttest import propcan from virttest import virsh +LOG = logging.getLogger('avocado.' + __name__) + class SASL(propcan.PropCanBase): @@ -115,7 +117,7 @@ def list_users(self, remote=True, sasldb_path="/etc/libvirt/passwd.db"): else: return process.run(cmd).stdout_text except process.CmdError: - logging.error("Failed to set a user's sasl password %s", cmd) + LOG.error("Failed to set a user's sasl password %s", cmd) def setup(self, remote=True): """ @@ -132,7 +134,7 @@ def setup(self, remote=True): else: process.system(cmd) except process.CmdError: - logging.error("Failed to set a user's sasl password %s", cmd) + LOG.error("Failed to set a user's sasl password %s", cmd) def cleanup(self, remote=True): """ @@ -147,7 +149,7 @@ def cleanup(self, remote=True): else: process.system(cmd) except process.CmdError: - logging.error("Failed to disable a user's access %s", cmd) + LOG.error("Failed to disable a user's access %s", cmd) class VirshSessionSASL(virsh.VirshSession): @@ -178,6 +180,6 @@ def __init__(self, params): self.sendline(self.sasl_pwd) # make sure session is connected successfully if self.cmd_status('list', timeout=60) != 0: - logging.debug("Persistent virsh session is not responding, " - "libvirtd may be dead.") + LOG.debug("Persistent virsh session is not responding, " + "libvirtd may be dead.") raise aexpect.ShellStatusError(virsh.VIRSH_EXEC, 'list') diff --git a/virttest/utils_secret.py b/virttest/utils_secret.py index 1a13137e16..ada316c930 100644 --- a/virttest/utils_secret.py +++ b/virttest/utils_secret.py @@ -6,6 +6,8 @@ from virttest import virsh +LOG = logging.getLogger('avocado.' + __name__) + def get_secret_list(remote_virsh=None): """ @@ -14,14 +16,14 @@ def get_secret_list(remote_virsh=None): :param remote_virsh: remote virsh shell session. :return secret list including secret UUID """ - logging.info("Get secret list ...") + LOG.info("Get secret list ...") try: if remote_virsh: secret_list_result = remote_virsh.secret_list() else: secret_list_result = virsh.secret_list() except Exception as e: - logging.error("Exception thrown while getting secret lists: %s", str(e)) + LOG.error("Exception thrown while getting secret lists: %s", str(e)) raise secret_list = secret_list_result.stdout_text.strip().splitlines() # First two lines contain table header followed by entries @@ -56,5 +58,5 @@ def clean_up_secrets(remote_virsh=None): else: virsh.secret_undefine(secret_uuid) except Exception as e: - logging.error("Exception thrown while undefining secret: %s", str(e)) + LOG.error("Exception thrown while undefining secret: %s", str(e)) raise diff --git a/virttest/utils_selinux.py b/virttest/utils_selinux.py index 971f41aa73..480e3da090 100644 --- a/virttest/utils_selinux.py +++ b/virttest/utils_selinux.py @@ -12,6 +12,8 @@ ubuntu = distro.detect().name == 'Ubuntu' +LOG = logging.getLogger('avocado.' + __name__) + class SelinuxError(Exception): @@ -70,7 +72,7 @@ def get_status(selinux_force=False): but the output is not expected. """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return 'disabled' cmd = 'getenforce' @@ -105,7 +107,7 @@ def set_status(status, selinux_force=False): but status of selinux is not set to expected. """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return if status not in STATUS_LIST: @@ -132,7 +134,7 @@ def set_status(status, selinux_force=False): else: pass - logging.debug("Set status of selinux to %s success.", status) + LOG.debug("Set status of selinux to %s success.", status) def is_disabled(selinux_force=False): @@ -142,7 +144,7 @@ def is_disabled(selinux_force=False): :param selinux_force: True to force selinux configuration on Ubuntu """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return True status = get_status(selinux_force) @@ -159,7 +161,7 @@ def is_not_disabled(selinux_force=False): :param selinux_force: True to force selinux configuration on Ubuntu """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return False return not is_disabled(selinux_force) @@ -172,7 +174,7 @@ def is_enforcing(selinux_force=False): :param selinux_force: True to force selinux configuration on Ubuntu """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return False return (get_status(selinux_force) == "enforcing") @@ -185,7 +187,7 @@ def is_permissive(selinux_force=False): :param selinux_force: True to force selinux configuration on Ubuntu """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return False return (get_status(selinux_force) == "permissive") @@ -231,7 +233,7 @@ def get_context_of_file(filename, selinux_force=False): :raise SeCmdError: if execute 'getfattr' failed. """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return # More direct than scraping 'ls' output. @@ -257,7 +259,7 @@ def set_context_of_file(filename, context, selinux_force=False): file is not setted to context. """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return context = context.strip() @@ -274,7 +276,7 @@ def set_context_of_file(filename, context, selinux_force=False): "but not expected %s." % (filename, context_result, context)) - logging.debug("Set context of %s success.", filename) + LOG.debug("Set context of %s success.", filename) def check_context_of_file(filename, label, selinux_force=False): @@ -287,12 +289,12 @@ def check_context_of_file(filename, label, selinux_force=False): """ se_label = get_context_of_file(filename, selinux_force) if se_label is not None: - logging.debug("Context of shared filename '%s' is '%s'" % - (filename, se_label)) + LOG.debug("Context of shared filename '%s' is '%s'" % + (filename, se_label)) if label not in se_label: return False else: - logging.warning("Context of shared filename '%s' is None" % filename) + LOG.warning("Context of shared filename '%s' is None" % filename) return False return True @@ -326,7 +328,7 @@ def get_defcon(local=False, selinux_force=False): :return: list of dictionaries of default context attributes """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return if local: @@ -412,7 +414,7 @@ def set_defcon(context_type, pathregex, context_range=None, selinux_force=False) :raise SeCmdError: if semanage exits non-zero """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return cmd = "semanage fcontext --add" @@ -441,7 +443,7 @@ def del_defcon(context_type, pathregex, selinux_force=False): :raise SeCmdError: if semanage exits non-zero """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return cmd = ("semanage fcontext --delete -t %s '%s'" % (context_type, pathregex)) @@ -466,7 +468,7 @@ def _run_restorecon(pathname, dirdesc, readonly=True, force=False, selinux_force :param selinux_force: True to force selinux configuration on Ubuntu """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return 0 cmd = 'restorecon -v' @@ -494,7 +496,7 @@ def verify_defcon(pathname, dirdesc=False, readonly=True, forcedesc=False, selin :note: By default DOES NOT follow symlinks """ if ubuntu and not selinux_force: - logging.warning("Ubuntu doesn't support selinux by default") + LOG.warning("Ubuntu doesn't support selinux by default") return False # Default context path regexes only work on canonical paths changes = _run_restorecon(pathname, dirdesc, diff --git a/virttest/utils_spice.py b/virttest/utils_spice.py index 3e5258f76b..7415174585 100644 --- a/virttest/utils_spice.py +++ b/virttest/utils_spice.py @@ -12,6 +12,8 @@ from . import utils_net, utils_misc +LOG = logging.getLogger('avocado.' + __name__) + class RVConnectError(Exception): @@ -35,7 +37,7 @@ def wait_timeout(timeout=10): :param timeout=10 """ - logging.debug("Waiting (timeout=%ss)", timeout) + LOG.debug("Waiting (timeout=%ss)", timeout) time.sleep(timeout) @@ -52,7 +54,7 @@ def kill_app(vm_name, app_name, params, env): vm_session = vm.wait_for_login( timeout=int(params.get("login_timeout", 360))) - logging.info("Try to kill %s", app_name) + LOG.info("Try to kill %s", app_name) if vm.params.get("os_type") == "linux": vm_session.cmd("pkill %s" % app_name .split(os.path.sep)[-1]) @@ -85,7 +87,7 @@ def verify_established(client_vm, host, port, rv_binary, cmd = ('(netstat -pn 2>&1| grep "^tcp.*:.*%s.*ESTABLISHED.*%s.*")' % (host, rv_binary)) netstat_out = client_session.cmd_output(cmd) - logging.info("netstat output: %s", netstat_out) + LOG.info("netstat output: %s", netstat_out) if tls_port: tls_count = netstat_out.count(tls_port) @@ -93,22 +95,22 @@ def verify_established(client_vm, host, port, rv_binary, tls_port = port if (netstat_out.count(port) + tls_count) < 4: - logging.error("Not enough channels were open") + LOG.error("Not enough channels were open") raise RVConnectError() if secure_channels: if tls_count < len(secure_channels.split(',')): - logging.error("Not enough secure channels open") + LOG.error("Not enough secure channels open") raise RVConnectError() for line in netstat_out.split('\n'): if ((port in line and "ESTABLISHED" not in line) or (tls_port in line and "ESTABLISHED" not in line)): - logging.error("Failed to get established connection from netstat") + LOG.error("Failed to get established connection from netstat") raise RVConnectError() if "ESTABLISHED" not in netstat_out: - logging.error("Failed to get established connection from netstat") + LOG.error("Failed to get established connection from netstat") raise RVConnectError() - logging.info("%s connection to %s:%s successful.", - rv_binary, host, port) + LOG.info("%s connection to %s:%s successful.", + rv_binary, host, port) client_session.close() @@ -122,16 +124,16 @@ def start_vdagent(guest_session, test_timeout): """ cmd = "service spice-vdagentd start" try: - guest_session.cmd(cmd, print_func=logging.info, + guest_session.cmd(cmd, print_func=LOG.info, timeout=test_timeout) except ShellStatusError: - logging.debug("Status code of \"%s\" was not obtained, most likely" - "due to a problem with colored output" % cmd) + LOG.debug("Status code of \"%s\" was not obtained, most likely" + "due to a problem with colored output" % cmd) except Exception: raise exceptions.TestFail("Guest Vdagent Daemon Start failed") - logging.debug("------------ End of guest checking for Spice Vdagent" - " Daemon ------------") + LOG.debug("------------ End of guest checking for Spice Vdagent" + " Daemon ------------") wait_timeout(3) @@ -144,15 +146,15 @@ def restart_vdagent(guest_session, test_timeout): """ cmd = "service spice-vdagentd restart" try: - guest_session.cmd(cmd, print_func=logging.info, + guest_session.cmd(cmd, print_func=LOG.info, timeout=test_timeout) except ShellCmdError: raise exceptions.TestFail("Couldn't restart spice vdagent process") except Exception: raise exceptions.TestFail("Guest Vdagent Daemon Check failed") - logging.debug("------------ End of Spice Vdagent" - " Daemon Restart ------------") + LOG.debug("------------ End of Spice Vdagent" + " Daemon Restart ------------") wait_timeout(3) @@ -165,18 +167,18 @@ def stop_vdagent(guest_session, test_timeout): """ cmd = "service spice-vdagentd stop" try: - guest_session.cmd(cmd, print_func=logging.info, + guest_session.cmd(cmd, print_func=LOG.info, timeout=test_timeout) except ShellStatusError: - logging.debug("Status code of \"%s\" was not obtained, most likely" - "due to a problem with colored output" % cmd) + LOG.debug("Status code of \"%s\" was not obtained, most likely" + "due to a problem with colored output" % cmd) except ShellCmdError: raise exceptions.TestFail("Couldn't turn off spice vdagent process") except Exception: raise exceptions.TestFail("Guest Vdagent Daemon Check failed") - logging.debug("------------ End of guest checking for Spice Vdagent" - " Daemon ------------") + LOG.debug("------------ End of guest checking for Spice Vdagent" + " Daemon ------------") wait_timeout(3) @@ -190,10 +192,10 @@ def verify_vdagent(guest_session, test_timeout): cmd = "rpm -qa | grep spice-vdagent" try: - guest_session.cmd(cmd, print_func=logging.info, timeout=test_timeout) + guest_session.cmd(cmd, print_func=LOG.info, timeout=test_timeout) finally: - logging.debug("----------- End of guest check to see if vdagent " - "package is available ------------") + LOG.debug("----------- End of guest check to see if vdagent " + "package is available ------------") wait_timeout(3) @@ -209,7 +211,7 @@ def get_vdagent_status(vm_session, test_timeout): wait_timeout(3) try: output = vm_session.cmd( - cmd, print_func=logging.info, timeout=test_timeout) + cmd, print_func=LOG.info, timeout=test_timeout) except ShellCmdError: # getting the status of vdagent stopped returns 3, which results in a # ShellCmdError @@ -231,10 +233,10 @@ def verify_virtio(guest_session, test_timeout): """ cmd = "ls /dev/virtio-ports/" try: - guest_session.cmd(cmd, print_func=logging.info, timeout=test_timeout) + guest_session.cmd(cmd, print_func=LOG.info, timeout=test_timeout) finally: - logging.debug("------------ End of guest check of the Virtio-Serial" - " Driver------------") + LOG.debug("------------ End of guest check of the Virtio-Serial" + " Driver------------") wait_timeout(3) @@ -285,7 +287,7 @@ def clear_interface(vm, login_timeout=360, timeout=5): try: session.cmd("taskkill /F /IM remote-viewer.exe") except Exception: - logging.info("Remote-viewer not running") + LOG.info("Remote-viewer not running") else: clear_interface_linux(vm, login_timeout, timeout) @@ -296,7 +298,7 @@ def clear_interface_linux(vm, login_timeout, timeout): :param vm: VM where cleaning is required """ - logging.info("restarting X/gdm on: %s", vm.name) + LOG.info("restarting X/gdm on: %s", vm.name) session = vm.wait_for_login(username="root", password="123456", timeout=login_timeout) @@ -341,21 +343,21 @@ def deploy_epel_repo(guest_session, params): if "release 5" in guest_session.cmd("cat /etc/redhat-release"): cmd = ("yum -y localinstall https://dl.fedoraproject.org/" "pub/epel/epel-release-latest-5.noarch.rpm") - logging.info("Installing epel repository to %s", - params.get("guest_vm")) - guest_session.cmd(cmd, print_func=logging.info, timeout=300) + LOG.info("Installing epel repository to %s", + params.get("guest_vm")) + guest_session.cmd(cmd, print_func=LOG.info, timeout=300) elif "release 6" in guest_session.cmd("cat /etc/redhat-release"): cmd = ("yum -y localinstall https://dl.fedoraproject.org/" "pub/epel/epel-release-latest-6.noarch.rpm") - logging.info("Installing epel repository to %s", - params.get("guest_vm")) - guest_session.cmd(cmd, print_func=logging.info, timeout=300) + LOG.info("Installing epel repository to %s", + params.get("guest_vm")) + guest_session.cmd(cmd, print_func=LOG.info, timeout=300) elif "release 7" in guest_session.cmd("cat /etc/redhat-release"): cmd = ("yum -y localinstall https://dl.fedoraproject.org/" "pub/epel/epel-release-latest-7.noarch.rpm") - logging.info("Installing epel repository to %s", - params.get("guest_vm")) - guest_session.cmd(cmd, print_func=logging.info, timeout=300) + LOG.info("Installing epel repository to %s", + params.get("guest_vm")) + guest_session.cmd(cmd, print_func=LOG.info, timeout=300) else: raise Exception("Unsupported RHEL guest") diff --git a/virttest/utils_split_daemons.py b/virttest/utils_split_daemons.py index b306114672..157ebdeed5 100644 --- a/virttest/utils_split_daemons.py +++ b/virttest/utils_split_daemons.py @@ -17,6 +17,8 @@ IS_MODULAR_DAEMON = {'local': None} +LOG = logging.getLogger('avocado.' + __name__) + class VirtDaemonCommon(object): @@ -44,8 +46,8 @@ def __init__(self, daemon_name='', session=None): runner = process.run if not self.daemon_name: - logging.warning("libvirt split daemon service is not available in host, " - "utils_daemons module will not function normally") + LOG.warning("libvirt split daemon service is not available in host, " + "utils_daemons module will not function normally") self.virtdaemon = service.Factory.create_service(self.daemon_name, run=runner) def _wait_for_start(self, timeout=60): @@ -131,7 +133,7 @@ def __init__(self, gdb=False, self.virtqemud_service = VirtQemud() self.was_running = self.virtqemud_service.is_running() if self.was_running: - logging.debug('Stopping virtqemud service') + LOG.debug('Stopping virtqemud service') self.virtqemud_service.stop() self.logging_handler = logging_handler @@ -200,7 +202,7 @@ def set_callback(self, callback_type, callback_func, callback_params=None): self.gdb.set_callback( callback_type, callback_func, callback_params) else: - logging.error("Only gdb session supports setting callback") + LOG.error("Only gdb session supports setting callback") def start(self, arg_str='', wait_for_working=True): """ @@ -230,7 +232,7 @@ def cont(self): if self.gdb: self.gdb.cont() else: - logging.error("Only gdb session supports continue") + LOG.error("Only gdb session supports continue") def kill(self): """ @@ -248,7 +250,7 @@ def restart(self, arg_str='', wait_for_working=True): :param arg_str: Argument passing to the session :param wait_for_working: Whether wait for virtqemud finish loading """ - logging.debug("Restarting virtqemud session") + LOG.debug("Restarting virtqemud session") self.kill() self.start(arg_str=arg_str, wait_for_working=wait_for_working) @@ -258,7 +260,7 @@ def wait_for_working(self, timeout=60): :param timeout: Max wait time """ - logging.debug('Waiting for virtqemud to work') + LOG.debug('Waiting for virtqemud to work') return utils_misc.wait_for( self.is_working, timeout=timeout, @@ -271,7 +273,7 @@ def back_trace(self): if self.gdb: return self.gdb.back_trace() else: - logging.warning('Can not get back trace without gdb') + LOG.warning('Can not get back trace without gdb') def insert_break(self, break_func): """ @@ -282,7 +284,7 @@ def insert_break(self, break_func): if self.gdb: return self.gdb.insert_break(break_func) else: - logging.warning('Can not insert breakpoint without gdb') + LOG.warning('Can not insert breakpoint without gdb') def is_working(self): """ @@ -302,7 +304,7 @@ def wait_for_stop(self, timeout=60, step=0.1): :param timeout: Max wait time :param step: Checking interval """ - logging.debug('Waiting for virtqemud to stop') + LOG.debug('Waiting for virtqemud to stop') if self.gdb: return self.gdb.wait_for_stop(timeout=timeout) else: @@ -318,11 +320,11 @@ def wait_for_termination(self, timeout=60): :param timeout: Max wait time """ - logging.debug('Waiting for virtqemud to terminate') + LOG.debug('Waiting for virtqemud to terminate') if self.gdb: return self.gdb.wait_for_termination(timeout=timeout) else: - logging.error("Only gdb session supports wait_for_termination.") + LOG.error("Only gdb session supports wait_for_termination.") def exit(self): """ diff --git a/virttest/utils_sriov.py b/virttest/utils_sriov.py index 04da9acaf1..a810ab93b4 100644 --- a/virttest/utils_sriov.py +++ b/virttest/utils_sriov.py @@ -11,6 +11,8 @@ from virttest import utils_net from virttest import utils_package +LOG = logging.getLogger('avocado.' + __name__) + def find_pf(driver, session=None): """ @@ -79,7 +81,7 @@ def get_pf_info(session=None): 'status': utils_net.get_net_if_operstate( iface_name.strip(), runner=runner)}) pf_info.update({pci: tmp_info}) - logging.debug("PF info: %s.", pf_info) + LOG.debug("PF info: %s.", pf_info) return pf_info @@ -109,7 +111,7 @@ def get_pf_info_by_pci(pci_id, session=None): pf_info = get_pf_info(session=session) for pf in pf_info.values(): if pf.get('pci_id') == pci_id: - logging.debug("PF %s details: %s.", pci_id, pf) + LOG.debug("PF %s details: %s.", pci_id, pf) return pf @@ -161,7 +163,7 @@ def set_vf(pci_addr, vf_no=4, session=None, timeout=60): :param timeout: Time limit in seconds to wait for cmd to complete :return: True if successful """ - logging.debug("pci_addr is %s", pci_addr) + LOG.debug("pci_addr is %s", pci_addr) cmd = "echo %s > %s/sriov_numvfs" % (vf_no, pci_addr) s, o = utils_misc.cmd_status_output(cmd, shell=True, timeout=timeout, verbose=True, session=session) @@ -260,7 +262,7 @@ def add_or_del_connection(params, session=None, is_del=False): return if not utils_package.package_install(["tmux", "dhcp-client"], session): - logging.error("Failed to install the required package") + LOG.error("Failed to install the required package") recover_cmd = 'tmux -c "ip link set {0} nomaster; ip link delete {1}; ' \ 'pkill dhclient; sleep 5; dhclient"'.format( pf_name, bridge_name) diff --git a/virttest/utils_stress.py b/virttest/utils_stress.py index 2f65562e8f..cb37abf5d1 100644 --- a/virttest/utils_stress.py +++ b/virttest/utils_stress.py @@ -12,6 +12,8 @@ from virttest.utils_test import libvirt from virttest.libvirt_xml.devices.disk import Disk +LOG = logging.getLogger('avocado.' + __name__) + class VMStressEvents(): @@ -129,12 +131,12 @@ def vm_stress_events(self, event, vm, params): vm.reboot() elif "nethotplug" in event: for iface_num in range(int(iface_num)): - logging.debug("Try to attach interface %d" % iface_num) + LOG.debug("Try to attach interface %d" % iface_num) mac = utils_net.generate_mac_address_simple() options = ("%s %s --model %s --mac %s %s" % (iface_type, iface_source['network'], iface_model, mac, attach_option)) - logging.debug("VM name: %s , Options for Network attach: %s", vm.name, options) + LOG.debug("VM name: %s , Options for Network attach: %s", vm.name, options) ret = virsh.attach_interface(vm.name, options, ignore_status=True) time.sleep(self.event_sleep_time) @@ -143,7 +145,7 @@ def vm_stress_events(self, event, vm, params): if detach_option: options = ("--type %s --mac %s %s" % (iface_type, mac, detach_option)) - logging.debug("VM name: %s , Options for Network detach: %s", vm.name, options) + LOG.debug("VM name: %s , Options for Network detach: %s", vm.name, options) ret = virsh.detach_interface(vm.name, options, ignore_status=True) if not self.ignore_status: @@ -180,7 +182,7 @@ def host_stress_event(self, event): for itr in range(self.host_iterations): if "cpu_freq_governor" in event: cpu.set_freq_governor() if hasattr(cpu, 'set_freq_governor') else cpu.set_cpufreq_governor() - logging.debug("Current governor: %s", cpu.get_freq_governor() if hasattr(cpu, 'get_freq_governor') else cpu.get_cpufreq_governor()) + LOG.debug("Current governor: %s", cpu.get_freq_governor() if hasattr(cpu, 'get_freq_governor') else cpu.get_cpufreq_governor()) time.sleep(self.event_sleep_time) elif "cpu_idle" in event: idlestate = cpu.get_idle_state() if hasattr(cpu, 'get_idle_state') else cpu.get_cpuidle_state() diff --git a/virttest/utils_sys.py b/virttest/utils_sys.py index 1a28e38d5f..41bad66eb7 100644 --- a/virttest/utils_sys.py +++ b/virttest/utils_sys.py @@ -9,6 +9,8 @@ from avocado.utils import process +LOG = logging.getLogger('avocado.' + __name__) + # TODO: check function in avocado.utils after the next LTS def check_dmesg_output(pattern, expect=True, session=None): @@ -25,16 +27,16 @@ def check_dmesg_output(pattern, expect=True, session=None): dmesg = func_get_dmesg(dmesg_cmd) prefix = '' if expect else 'Not ' - logging.info('%sExpecting pattern: "%s".', prefix, pattern) + LOG.info('%sExpecting pattern: "%s".', prefix, pattern) # Search for pattern found = bool(re.search(pattern, dmesg)) log_content = ('' if found else 'Not') + 'Found "%s"' % pattern - logging.debug(log_content) + LOG.debug(log_content) if found ^ expect: - logging.error('Dmesg output does not meet expectation.') + LOG.error('Dmesg output does not meet expectation.') return False else: - logging.info('Dmesg output met expectation') + LOG.info('Dmesg output met expectation') return True diff --git a/virttest/utils_test/__init__.py b/virttest/utils_test/__init__.py index d9b64d16bf..58dbfd4277 100755 --- a/virttest/utils_test/__init__.py +++ b/virttest/utils_test/__init__.py @@ -78,6 +78,8 @@ ping = utils_net.ping raw_ping = utils_net.raw_ping +LOG = logging.getLogger('avocado.' + __name__) + def update_boot_option_ubuntu(args, grub_key=None, session=None, remove_args=None): """ @@ -125,7 +127,7 @@ def update_boot_option_ubuntu(args, grub_key=None, session=None, remove_args=Non if status: raise exceptions.TestError("Failed to update grub to modify kernel " "cmdline") - logging.debug("updated boot option: %s with %s", grub_key, args) + LOG.debug("updated boot option: %s with %s", grub_key, args) def check_kernel_cmdline(session, remove_args="", args=""): @@ -158,10 +160,10 @@ def check_kernel_cmdline(session, remove_args="", args=""): def __run_cmd_and_handle_error(msg, cmd, session, test_fail_msg): - logging.info(msg) + LOG.info(msg) status, output = session.cmd_status_output(cmd) if status != 0: - logging.error(output) + LOG.error(output) raise exceptions.TestError(test_fail_msg) @@ -188,7 +190,7 @@ def update_boot_option(vm, args_removed="", args_added="", # (this function is not implement.) # here we just: msg = "update_boot_option() is supported only for Linux guest" - logging.warning(msg) + LOG.warning(msg) return login_timeout = int(vm.params.get("login_timeout")) session = vm.wait_for_login(timeout=login_timeout, serial=serial_login, @@ -227,7 +229,7 @@ def update_boot_option(vm, args_removed="", args_added="", # reboot is required only if we really add/remove any args if need_reboot and (req_args or req_remove_args): - logging.info("Rebooting guest ...") + LOG.info("Rebooting guest ...") session = vm.reboot(session=session, timeout=login_timeout, serial=serial_login) # check nothing is required to be added/removed by now @@ -386,8 +388,8 @@ def get_time(session, time_command, time_filter_re, time_format): time.strptime(host_time_out, time_format)) host_time += float(diff) except Exception as err: - logging.debug("(time_format, time_string): (%s, %s)", - time_format, host_time_out) + LOG.debug("(time_format, time_string): (%s, %s)", + time_format, host_time_out) raise err finally: locale.setlocale(locale.LC_TIME, loc) @@ -400,12 +402,12 @@ def get_time(session, time_command, time_filter_re, time_format): diff = str_time.split()[-2] str_time = " ".join(str_time.split()[:-2]) except IndexError: - logging.debug("The time string from guest is:\n%s", str_time) + LOG.debug("The time string from guest is:\n%s", str_time) raise exceptions.TestError( "The time string from guest is unexpected.") except Exception as err: - logging.debug("(time_filter_re, time_string): (%s, %s)", - time_filter_re, str_time) + LOG.debug("(time_filter_re, time_string): (%s, %s)", + time_filter_re, str_time) raise err guest_time = None @@ -415,8 +417,8 @@ def get_time(session, time_command, time_filter_re, time_format): guest_time = time.mktime(time.strptime(str_time, time_format)) guest_time += float(diff) except Exception as err: - logging.debug("(time_format, time_string): (%s, %s)", - time_format, str_time) + LOG.debug("(time_format, time_string): (%s, %s)", + time_format, str_time) raise err finally: locale.setlocale(locale.LC_TIME, loc) @@ -432,14 +434,14 @@ def get_time(session, time_command, time_filter_re, time_format): if len(reo) > 1: num = float(reo[1]) except IndexError: - logging.debug("The time string from guest is:\n%s", output) + LOG.debug("The time string from guest is:\n%s", output) raise exceptions.TestError( "The time string from guest is unexpected.") except ValueError as err: - logging.debug("Couldn't parse float time offset from %s" % reo) + LOG.debug("Couldn't parse float time offset from %s" % reo) except Exception as err: - logging.debug("(time_filter_re, time_string): (%s, %s)", - time_filter_re, output) + LOG.debug("(time_filter_re, time_string): (%s, %s)", + time_filter_re, output) raise err guest_time = time.mktime(time.strptime(str_time, time_format)) + num @@ -520,7 +522,7 @@ def get_image_version(qemu_image): :return: compatibility level """ error_context.context("Get qcow2 image('%s') version" - % qemu_image.image_filename, logging.info) + % qemu_image.image_filename, LOG.info) info_out = qemu_image.info() compat = re.search(r'compat: +(.*)', info_out, re.M) if compat: @@ -542,7 +544,7 @@ def update_qcow2_image_version(qemu_image, ver_from, ver_to): if ver_from == ver_to: return None error_context.context("Update qcow2 image version from %s to %s" - % (ver_from, ver_to), logging.info) + % (ver_from, ver_to), LOG.info) qemu_image.params.update({"amend_compat": "%s" % ver_to}) qemu_image.amend(qemu_image.params) @@ -574,7 +576,7 @@ def run_image_copy(test, params, env): # Define special image to be taken as a source. source_image_name = params.get('source_image_name') if source_image_name: - logging.info('Using image as source image: %s', source_image_name) + LOG.info('Using image as source image: %s', source_image_name) asset_name = '%s' % (os.path.split(source_image_name)[1]) image = '%s.%s' % (params['image_name'], params['image_format']) dst_path = storage.get_image_filename(params, data_dir.get_data_dir()) @@ -592,7 +594,7 @@ def run_image_copy(test, params, env): try: os.makedirs(mount_dest_dir) except OSError as err: - logging.warning('mkdir %s error:\n%s', mount_dest_dir, err) + LOG.warning('mkdir %s error:\n%s', mount_dest_dir, err) if not os.path.exists(mount_dest_dir): raise exceptions.TestError('Failed to create NFS share dir %s' % @@ -619,7 +621,7 @@ def run_image_copy(test, params, env): force = params.get("force_copy", "yes") == "yes" try: - error_context.context("Copy image '%s'" % image, logging.info) + error_context.context("Copy image '%s'" % image, LOG.info) if aurl.is_url(asset_info['url']): asset.download_file(asset_info, interactive=False, force=force) @@ -629,7 +631,7 @@ def run_image_copy(test, params, env): finally: sub_type = params.get("sub_type") if sub_type: - error_context.context("Run sub test '%s'" % sub_type, logging.info) + error_context.context("Run sub test '%s'" % sub_type, LOG.info) params['image_name'] += "-error" params['boot_once'] = "c" vm.create(params=params) @@ -655,7 +657,7 @@ def run_file_transfer(test, params, env): vm.verify_alive() login_timeout = int(params.get("login_timeout", 360)) session = vm.wait_for_login(timeout=login_timeout) - error_context.context("Login to guest", logging.info) + error_context.context("Login to guest", LOG.info) transfer_timeout = int(params.get("transfer_timeout", 1000)) clean_cmd = params.get("clean_cmd", "rm -f") filesize = int(params.get("filesize", 4000)) @@ -671,11 +673,11 @@ def run_file_transfer(test, params, env): cmd = "dd if=/dev/zero of=%s bs=10M count=%d" % (host_path, count) try: error_context.context( - "Creating %dMB file on host" % filesize, logging.info) + "Creating %dMB file on host" % filesize, LOG.info) process.run(cmd) original_md5 = crypto.hash_file(host_path, algorithm="md5") error_context.context("Transferring file host -> guest, " - "timeout: %ss" % transfer_timeout, logging.info) + "timeout: %ss" % transfer_timeout, LOG.info) vm.copy_files_to( host_path, guest_path, @@ -683,7 +685,7 @@ def run_file_transfer(test, params, env): filesize=filesize) error_context.context("Transferring file guest -> host, " - "timeout: %ss" % transfer_timeout, logging.info) + "timeout: %ss" % transfer_timeout, LOG.info) vm.copy_files_from( guest_path, host_path, @@ -692,7 +694,7 @@ def run_file_transfer(test, params, env): current_md5 = crypto.hash_file(host_path, algorithm="md5") error_context.context("Compare md5sum between original file and " - "transferred file", logging.info) + "transferred file", LOG.info) if original_md5 != current_md5: raise exceptions.TestFail("File changed after transfer host -> guest " "and guest -> host") @@ -700,12 +702,12 @@ def run_file_transfer(test, params, env): try: os.remove(host_path) except OSError as detail: - logging.warn("Could not remove temp files in host: '%s'", detail) - logging.info('Cleaning temp file on guest') + LOG.warn("Could not remove temp files in host: '%s'", detail) + LOG.info('Cleaning temp file on guest') try: session.cmd("%s %s" % (clean_cmd, guest_path)) except aexpect.ShellError as detail: - logging.warn("Could not remove temp files in guest: '%s'", detail) + LOG.warn("Could not remove temp files in guest: '%s'", detail) finally: session.close() @@ -744,7 +746,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, for num in xrange(n_time): md5_host = "1" md5_guest = "2" - logging.info("Data transfer repeat %s/%s." % (num + 1, n_time)) + LOG.info("Data transfer repeat %s/%s." % (num + 1, n_time)) try: args = (host_cmd, timeout) host_thread = utils_misc.InterruptedThread(run_host_cmd, args) @@ -757,7 +759,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, if md5_check: raise exceptions.TestFail(err) else: - logging.warn(err) + LOG.warn(err) else: md5_re = "md5_sum = (\w{32})" try: @@ -777,7 +779,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, if md5_check: raise exceptions.TestFail(err) else: - logging.warn(err) + LOG.warn(err) else: md5_re = "md5_sum = (\w{32})" try: @@ -793,7 +795,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, if md5_check: raise exceptions.TestFail(err) else: - logging.warn(err) + LOG.warn(err) env["serial_file_transfer_start"] = False vm = env.get_vm(params["main_vm"]) @@ -805,7 +807,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, port_name = params["file_transfer_serial_port"] guest_scripts = params["guest_scripts"] guest_path = params.get("guest_script_folder", "C:\\") - error_context.context("Copy test scripts to guest.", logging.info) + error_context.context("Copy test scripts to guest.", LOG.info) for script in guest_scripts.split(";"): link = os.path.join(data_dir.get_root_dir(), "shared", "deps", "serial", script) @@ -826,7 +828,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, if sender == "host" or sender == "both": cmd = "dd if=/dev/zero of=%s bs=1M count=%d" % (host_data_file, count) error_context.context( - "Creating %dMB file on host" % filesize, logging.info) + "Creating %dMB file on host" % filesize, LOG.info) process.run(cmd) else: guest_file_create_cmd = "dd if=/dev/zero of=%s bs=1M count=%d" @@ -834,7 +836,7 @@ def transfer_data(session, host_cmd, guest_cmd, n_time, timeout, guest_file_create_cmd) cmd = guest_file_create_cmd % (guest_data_file, count) error_context.context( - "Creating %dMB file on host" % filesize, logging.info) + "Creating %dMB file on host" % filesize, LOG.info) session.cmd(cmd, timeout=600) if sender == "host": @@ -975,39 +977,39 @@ def env_check(self): for _, packages in self.prerequisites.items(): pacman = utils_package.package_manager(self.session, packages) if not pacman.install(timeout=self.timeout): - logging.error("Failed to install - %s", packages) + LOG.error("Failed to install - %s", packages) self.python = find_python(self.session, compat='python') if not self.python: - logging.error("Unable to find python.") + LOG.error("Unable to find python.") return False self.pip_bin = find_bin(self.session, ['pip3', 'pip2', 'pip']) if self.pip_bin: cmd = "%s install --upgrade pip;" % (self.pip_bin) if self.session.cmd_status(cmd, timeout=self.timeout) != 0: - logging.error("Unable to upgrade pip.") + LOG.error("Unable to upgrade pip.") return False if not utils_misc.make_dirs(self.test_path, session=self.session): - logging.error("Failed to create test path in guest") + LOG.error("Failed to create test path in guest") return False if self.avocado_vt: cmd = "lsmod | grep %s || modprobe %s" % (self.kvm_module, self.kvm_module) if self.session.cmd_status(cmd, timeout=self.timeout) != 0: - logging.error("nested kvm module not available") + LOG.error("nested kvm module not available") return False cmd = "service libvirtd restart" if self.session.cmd_status(cmd, timeout=self.timeout) != 0: - logging.error("Failed to restart libvirtd inside guest") + LOG.error("Failed to restart libvirtd inside guest") return False pip_pack = ['setuptools', 'netifaces', 'aexpect', 'netaddr'] cmd = "" for each in pip_pack: cmd = "%s install %s --upgrade" % (self.pip_bin, each) if self.session.cmd_status(cmd, timeout=self.timeout) != 0: - logging.error("Failed to update and install package %s" % each) + LOG.error("Failed to update and install package %s" % each) return False if not utils_misc.make_dirs(self.result_path, session=self.session): - logging.error("Failed to create result path in guest") + LOG.error("Failed to create result path in guest") return False return True @@ -1016,7 +1018,7 @@ def install_avocado(self): """ Method to install Avocado/Avocado-VT and its plugins """ - logging.debug("Installing avocado") + LOG.debug("Installing avocado") status = 0 if (self.session.cmd_status("which avocado") == 0) and not self.reinstall: return True @@ -1025,7 +1027,7 @@ def install_avocado(self): cmd = "%s avocado-framework" % pip_install_cmd status, output = self.session.cmd_status_output(cmd, timeout=self.timeout) if status != 0: - logging.error("Avocado pip installation failed:\n%s", output) + LOG.error("Avocado pip installation failed:\n%s", output) return False for plugin in self.plugins[self.installtype]: if self.pip_bin != "pip3": @@ -1038,27 +1040,27 @@ def install_avocado(self): cmd = "%s %s" % (pip_install_cmd, plugin) status, output = self.session.cmd_status_output(cmd, timeout=self.timeout) if status != 0: - logging.error("Avocado plugin %s pip " - "installation failed:\n%s", plugin, output) + LOG.error("Avocado plugin %s pip " + "installation failed:\n%s", plugin, output) return False elif "package" in self.installtype: raise NotImplementedError elif "git" in self.installtype: if not self.git_install(self.avocado_repo, branch=self.avocado_repo_branch): - logging.error("Avocado git installation failed") + LOG.error("Avocado git installation failed") return False for plugin in self.plugins[self.installtype]: cmd = "cd %s;" % os.path.join(self.plugins_path, plugin) cmd += "%s setup.py install" % self.python if self.session.cmd_status(cmd, timeout=self.timeout) != 0: - logging.error("Avocado plugin %s git " - "installation failed", plugin) + LOG.error("Avocado plugin %s git " + "installation failed", plugin) return False if self.avocado_vt and not self.git_install(self.avocado_vt_repo, make='requirements', branch=self.avocado_vt_repo_branch): - logging.error("Avocado-VT git installation failed") + LOG.error("Avocado-VT git installation failed") return False return True @@ -1090,7 +1092,7 @@ def runtest(self): """ Run test method to download the tests and trigger avocado command """ - logging.debug("Downloading Test") + LOG.debug("Downloading Test") if self.avocado_vt: cmd = "avocado vt-bootstrap --yes-to-all" if self.vt_type: @@ -1106,7 +1108,7 @@ def runtest(self): if not self.git_install(self.test_repo, install=False): raise exceptions.TestError("Downloading test failed") - logging.debug("Running Test") + LOG.debug("Running Test") avocado_cmd = "avocado run" if self.avocado_vt: avocado_cmd += " %s" % self.testlist[0].strip() @@ -1144,21 +1146,21 @@ def runtest(self): timeout=self.timeout) if status != 0: # TODO: Map test return status with error strings and print - logging.error("Avocado cmd: %s has failures consult " - "the logs for details\nstatus: " - "%s\nstdout: %s", avocado_cmd, status, output) + LOG.error("Avocado cmd: %s has failures consult " + "the logs for details\nstatus: " + "%s\nstdout: %s", avocado_cmd, status, output) return status == 0 def get_results(self): """ Copy avocado results present on the guest back to the host. """ - logging.debug("Trying to copy avocado results from guest") + LOG.debug("Trying to copy avocado results from guest") guest_results_dir = utils_misc.get_path(self.test.debugdir, self.vm.name) os.makedirs(guest_results_dir) - logging.debug("Guest avocado test results placed " - "under %s", guest_results_dir) + LOG.debug("Guest avocado test results placed " + "under %s", guest_results_dir) # result info tarball to host result dir results_tarball = os.path.join(self.test_path, "results.tgz") utils_package.package_install('tar', session=self.session) @@ -1281,15 +1283,15 @@ def copy_if_hash_differs(vm, local_path, remote_path): elif output: remote_hash = output.split()[0] else: - logging.warning("MD5 check for remote path %s did not return.", - remote_path) + LOG.warning("MD5 check for remote path %s did not return.", + remote_path) # Let's be a little more lenient here and see if it wasn't a # temporary problem remote_hash = "0" if remote_hash == local_hash and directory_exists(destination_autotest_path): return None - logging.debug("Copying %s to guest (remote hash: %s, local hash:%s)", - basename, remote_hash, local_hash) + LOG.debug("Copying %s to guest (remote hash: %s, local hash:%s)", + basename, remote_hash, local_hash) dest_dir = os.path.dirname(remote_path) if not directory_exists(dest_dir): session.cmd("mkdir -p %s" % dest_dir) @@ -1306,7 +1308,7 @@ def extract(vm, remote_path, dest_dir): :param dest_dir: Destination dir for the contents """ basename = os.path.basename(remote_path) - logging.debug("Extracting %s on VM %s", basename, vm.name) + LOG.debug("Extracting %s on VM %s", basename, vm.name) session.cmd("rm -rf %s" % dest_dir, timeout=240) dirname = os.path.dirname(remote_path) session.cmd("cd %s" % dirname) @@ -1347,7 +1349,7 @@ def get_results(base_results_dir): """ Copy autotest results present on the guest back to the host. """ - logging.debug("Trying to copy autotest results from guest") + LOG.debug("Trying to copy autotest results from guest") res_index = get_last_guest_results_index() guest_results_dir = os.path.join( outputdir, "guest_autotest_results%s" % (res_index + 1)) @@ -1395,18 +1397,18 @@ def get_results_summary(): try: output = process.run("cat %s" % status_path).stdout_text except process.CmdError as e: - logging.error("Error getting guest autotest status file: %s", e) + LOG.error("Error getting guest autotest status file: %s", e) return None try: results = scan_autotest_results.parse_results(output) # Report test results - logging.info("Results (test, status, duration, info):") + LOG.info("Results (test, status, duration, info):") for result in results: - logging.info("\t %s", str(result)) + LOG.info("\t %s", str(result)) return results except Exception as e: - logging.error("Error processing guest autotest results: %s", e) + LOG.error("Error processing guest autotest results: %s", e) return None def config_control(control_path, job_args=None): @@ -1625,8 +1627,8 @@ def config_control(control_path, job_args=None): (destination_autotest_path, destination_autotest_path)) # Run the test - logging.info("Running autotest control file %s on guest, timeout %ss", - os.path.basename(control_path), timeout) + LOG.info("Running autotest control file %s on guest, timeout %ss", + os.path.basename(control_path), timeout) # Start a background job to run server process if needed. server_process = None @@ -1641,21 +1643,21 @@ def config_control(control_path, job_args=None): bg = None try: start_time = time.time() - logging.info("---------------- Test output ----------------") + LOG.info("---------------- Test output ----------------") if migrate_background: mig_timeout = float(params.get("mig_timeout", "3600")) mig_protocol = params.get("migration_protocol", "tcp") cmd = "python -x ./autotest-local control" kwargs = {'cmd': cmd, 'timeout': timeout, - 'print_func': logging.info} + 'print_func': LOG.info} bg = utils_misc.InterruptedThread(session.cmd_output, kwargs=kwargs) bg.start() while bg.is_alive(): - logging.info("Autotest job did not end, start a round of " - "migration") + LOG.info("Autotest job did not end, start a round of " + "migration") vm.migrate(timeout=mig_timeout, protocol=mig_protocol) else: if params.get("guest_autotest_verbosity", "yes") == "yes": @@ -1666,9 +1668,9 @@ def config_control(control_path, job_args=None): "python -x ./autotest-local %s control & wait ${!}" % verbose, timeout=timeout, - print_func=logging.info) + print_func=LOG.info) finally: - logging.info("------------- End of test output ------------") + LOG.info("------------- End of test output ------------") if migrate_background and bg: bg.join() # Do some cleanup work on host if test need a server. @@ -1705,14 +1707,14 @@ def config_control(control_path, job_args=None): get_results(destination_autotest_path) raise exceptions.TestError("Autotest job on guest failed " "(VM terminated during job)") - logging.debug("Wait for autotest job finished on guest.") + LOG.debug("Wait for autotest job finished on guest.") session.close() session = vm.wait_for_login() while time.time() < start_time + timeout: ps_cmd = "ps ax" _, processes = session.cmd_status_output(ps_cmd) if "autotest-local" not in processes: - logging.debug("Autotest job finished on guest") + LOG.debug("Autotest job finished on guest") break time.sleep(1) else: @@ -1758,7 +1760,7 @@ def get_loss_ratio(output): try: return float(re.findall(r'(\d*\.?\d+)%.*loss', output)[0]) except IndexError: - logging.warn("Invalid output of ping command: %s" % output) + LOG.warn("Invalid output of ping command: %s" % output) return -1 @@ -1831,7 +1833,7 @@ def get_readable_cdroms(params, session): check_cdrom_patttern = params.get("cdrom_check_cdrom_pattern") o = session.get_command_output(get_cdrom_cmd) cdrom_list = re.findall(check_cdrom_patttern, o) - logging.debug("Found cdroms on guest: %s" % cdrom_list) + LOG.debug("Found cdroms on guest: %s" % cdrom_list) readable_cdroms = [] test_cmd = params.get("cdrom_test_cmd") @@ -1843,7 +1845,7 @@ def get_readable_cdroms(params, session): if not readable_cdroms: info_cmd = params.get("cdrom_info_cmd") output = session.cmd_output(info_cmd) - logging.debug("Guest cdroms info: %s" % output) + LOG.debug("Guest cdroms info: %s" % output) return readable_cdroms @@ -1855,9 +1857,9 @@ def service_setup(vm, session, directory): if rh_perf_envsetup_script: src = os.path.join(directory, rh_perf_envsetup_script) vm.copy_files_to(src, "/tmp/rh_perf_envsetup.sh") - logging.info("setup perf environment for host") + LOG.info("setup perf environment for host") process.getoutput("bash %s host %s" % (src, rebooted)) - logging.info("setup perf environment for guest") + LOG.info("setup perf environment for guest") session.cmd("bash /tmp/rh_perf_envsetup.sh guest %s" % rebooted) @@ -1958,7 +1960,7 @@ def get_driver_hardware_id(driver_path, process.system("umount %s" % mount_point) return hwid except Exception as e: - logging.error("Fail to get hardware id with exception: %s" % e) + LOG.error("Fail to get hardware id with exception: %s" % e) if txt_file: txt_file.close() process.system("umount %s" % mount_point, ignore_status=True) @@ -2067,7 +2069,7 @@ def run_avocado_bg(vm, params, test, testlist=[], avocado_vt=False, bt.start() return bt except Exception as info: - logging.warning("Background guest tests not run: %s", info) + LOG.warning("Background guest tests not run: %s", info) return None @@ -2183,7 +2185,7 @@ def load_stress_tool(self): self.base_name, self.work_path)) launch_cmds = 'nohup %s %s > /dev/null &' % ( self.stress_cmds, self.stress_args) - logging.info("Launch stress with command: %s", launch_cmds) + LOG.info("Launch stress with command: %s", launch_cmds) try: self.cmd_launch(launch_cmds) # The background process sometimes does not return to @@ -2209,7 +2211,7 @@ def _unload_stress(): return True return False - logging.info("stop stress app in guest/host/remote host") + LOG.info("stop stress app in guest/host/remote host") utils_misc.wait_for(_unload_stress, self.stress_wait_for_timeout, first=2.0, text="wait stress app quit", step=1.0) @@ -2258,7 +2260,7 @@ def download_stress(self): try: download_method = getattr( self, "_%s_download" % self.download_type) - logging.info('Download stress tool from %s', self.download_url) + LOG.info('Download stress tool from %s', self.download_url) download_method(self.download_url, tmp_path) except AttributeError: if not self.downloaded_file_path: @@ -2273,7 +2275,7 @@ def download_stress(self): self.base_name = self.downloaded_file_path source = os.path.join(tmp_path, self.base_name) if self.remote_host: - logging.info('Copy stress tool to remote host') + LOG.info('Copy stress tool to remote host') args = (self.remote_host.__getitem__('server_ip'), 'scp', self.remote_host.__getitem__('server_user'), self.remote_host.__getitem__('server_pwd'), '22', @@ -2281,7 +2283,7 @@ def download_stress(self): self.copy_files_to(*args) else: if self.session: - logging.info('Copy stress tool to work dir of guest') + LOG.info('Copy stress tool to work dir of guest') self.copy_files_to(source, self.dst_path) else: self.dst_path = os.path.abspath( @@ -2303,18 +2305,18 @@ def install(self): if not utils_package.package_install(self.stress_package, session=self.session): self.stress_install_from_repo = False - logging.debug("Fail to install stress tool via repo and " - "will download source to make and install it") + LOG.debug("Fail to install stress tool via repo and " + "will download source to make and install it") else: - logging.debug("Successful to install stress tool via repo") + LOG.debug("Successful to install stress tool via repo") return self.download_stress() install_path = os.path.join(self.dst_path, self.base_name, self.work_path) self.make_cmds = "cd %s;%s" % (install_path, self.make_cmds) - logging.info('installing the %s with %s', self.stress_type, - self.make_cmds) + LOG.info('installing the %s with %s', self.stress_type, + self.make_cmds) status, output = self.cmd_status_output(self.make_cmds, timeout=self.stress_shell_timeout) if status != 0: @@ -2331,21 +2333,21 @@ def clean(self): # If succeed, no need to uninstall and remove source any more if not utils_package.package_remove(self.stress_package, session=self.session): - logging.debug("Fail to remove stress tool via repo and " - "will continue to uninstall and remove source") + LOG.debug("Fail to remove stress tool via repo and " + "will continue to uninstall and remove source") else: - logging.debug("Successful to remove stress tool via repo") + LOG.debug("Successful to remove stress tool via repo") return install_path = os.path.join(self.dst_path, self.base_name) if self.cmd_status('cd %s' % install_path) != 0: - logging.error("No source files found in path %s", path) + LOG.error("No source files found in path %s", path) return - logging.info('Uninstall %s', self.stress_type) + LOG.info('Uninstall %s', self.stress_type) status, output = self.cmd_status_output(self.uninstall_cmds) if status != 0: - logging.error('Uninstall stress failed with error: %s', output) - logging.info('Remove the source files') + LOG.error('Uninstall stress failed with error: %s', output) + LOG.info('Remove the source files') rm_cmd = 'cd && rm -rf %s' % install_path if self.stress_type == "uperf": rm_cmd += " && rm -rf %s" % os.path.join( @@ -2644,12 +2646,12 @@ def prepare_profile(self, fpath, pat_repl): with open(fpath, 'r+') as profile_content: tempstr = profile_content.read() profile_content.truncate(0) - logging.debug( + LOG.debug( "In prepare profile: pattern and replacement : %s", pat_repl) for pattern, replace in pat_repl.items(): tempstr = tempstr.replace(pattern, replace) profile_content.write(tempstr) - logging.debug("Profile xml to be run : %s ", tempstr) + LOG.debug("Profile xml to be run : %s ", tempstr) except Exception: raise exceptions.TestError("Failed to update file : %s", fpath) @@ -2702,7 +2704,7 @@ def load_stress(self, params): self.stress_vm[client_vm.name].load_stress_tool() except exceptions.TestError as err_msg: error = True - logging.error(err_msg) + LOG.error(err_msg) return error def verify_unload_stress(self, params): @@ -2718,7 +2720,7 @@ def verify_unload_stress(self, params): vm.get_address(), count=10, timeout=20) if s_ping != 0: error = True - logging.error( + LOG.error( "%s seem to have gone out of network", vm.name) else: vm_params = params.object_params(vm.name) @@ -2727,14 +2729,14 @@ def verify_unload_stress(self, params): if self.iptables_rule: params['server_pwd'] = vm_params.get("password") params['server_ip'] = vm.get_address() - logging.debug("server_ip: %s", vm.get_address()) + LOG.debug("server_ip: %s", vm.get_address()) Iptables.setup_or_cleanup_iptables_rules( [self.iptables_rule], params=params, cleanup=True) self.stress_vm[vm.name].clean() vm.verify_dmesg() except exceptions.TestError as err_msg: error = True - logging.error(err_msg) + LOG.error(err_msg) return error @@ -2799,7 +2801,7 @@ def get_free_space(self, disk_type, path='/', vgname=None): try: output = self.runner.run(cmd).stdout_text except exceptions.CmdError as detail: - logging.debug(output) + LOG.debug(output) raise exceptions.TestError("Get space failed: %s." % str(detail)) if disk_type == "file": @@ -2826,7 +2828,7 @@ def occupy_space(self, disk_type, need_size, path=None, vgname=None, Create an image or volume to occupy the space of destination path """ free = self.get_free_space(disk_type, path, vgname) - logging.debug("Allowed space on remote path:%sGB", free) + LOG.debug("Allowed space on remote path:%sGB", free) occupied_size = int(free - need_size / 2) occupied_path = os.path.join(os.path.dirname(path), "occupied") return self.create_image(disk_type, occupied_path, occupied_size, @@ -2843,7 +2845,7 @@ def get_device_name(self): try: return "/dev/%s" % device_name[0] except IndexError: - logging.error("Can not find target '%s' after login." % self.target) + LOG.error("Can not find target '%s' after login." % self.target) def iscsi_login_setup(self, host, target_name, is_login=True): """ @@ -2876,7 +2878,7 @@ def iscsi_login_setup(self, host, target_name, is_login=True): output = self.runner.run(cmd, ignore_status=True).stdout_text if "successful" not in output: - logging.error("Logout to %s failed.", target_name) + LOG.error("Logout to %s failed.", target_name) def create_vg(self, vgname, device): """ @@ -2884,7 +2886,7 @@ def create_vg(self, vgname, device): """ try: self.runner.run("vgs | grep %s" % vgname) - logging.debug("Volume group %s does already exist.", vgname) + LOG.debug("Volume group %s does already exist.", vgname) return True except process.CmdError: pass # Not found @@ -2892,8 +2894,8 @@ def create_vg(self, vgname, device): self.runner.run("vgcreate %s %s" % (vgname, device)) return True except process.CmdError as detail: - logging.error("Create vgroup '%s' on remote host failed:%s", - vgname, detail) + LOG.error("Create vgroup '%s' on remote host failed:%s", + vgname, detail) return False def remove_vg(self, vgname): @@ -2931,7 +2933,7 @@ def create_image(self, disk_type, path=None, size=10, vgname=None, path = "/dev/%s/%s" % (vgname, lvname) result = self.runner.run(cmd, ignore_status=True, timeout=timeout) - logging.debug(result) + LOG.debug(result) if result.exit_status: raise exceptions.TestFail("Create image '%s' on remote host failed." % path) @@ -2960,7 +2962,7 @@ def check_dest_vm_network(vm, vm_ip, remote_host, username, password, password=password, prompt=shell_prompt) - logging.debug("Check VM network connectivity...") + LOG.debug("Check VM network connectivity...") ping_failed = True ping_cmd = "ping -c 5 %s" % vm_ip while timeout > 0: diff --git a/virttest/utils_test/libguestfs.py b/virttest/utils_test/libguestfs.py index 3f928e3a25..a676f0856b 100644 --- a/virttest/utils_test/libguestfs.py +++ b/virttest/utils_test/libguestfs.py @@ -11,6 +11,8 @@ from .. import qemu_storage from ..libvirt_xml import vm_xml, xcepts +LOG = logging.getLogger('avocado.' + __name__) + class VTError(Exception): pass @@ -60,7 +62,7 @@ def preprocess_image(params): image = qemu_storage.QemuImg(params, image_dir, image_name) image_path, _ = image.create(params) - logging.info("Image created in %s" % image_path) + LOG.info("Image created in %s" % image_path) return image_path @@ -101,7 +103,7 @@ def attach_additional_disk(vm, disksize, targetdev): :param disksize: size of attached disk :param targetdev: target of disk device """ - logging.info("Attaching disk...") + LOG.info("Attaching disk...") disk_path = os.path.join(data_dir.get_tmp_dir(), targetdev) cmd = "qemu-img create %s %s" % (disk_path, disksize) status, output = process.getstatusoutput(cmd) @@ -126,11 +128,11 @@ def define_new_vm(vm_name, new_name): vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) vmxml.vm_name = new_name del vmxml.uuid - logging.debug(str(vmxml)) + LOG.debug(str(vmxml)) vmxml.define() return True except xcepts.LibvirtXMLError as detail: - logging.error(detail) + LOG.error(detail) return False @@ -142,13 +144,13 @@ def cleanup_vm(vm_name=None, disk=None): if vm_name is not None: virsh.undefine(vm_name) except process.CmdError as detail: - logging.error("Undefine %s failed:%s", vm_name, detail) + LOG.error("Undefine %s failed:%s", vm_name, detail) try: if disk is not None: if os.path.exists(disk): os.remove(disk) except IOError as detail: - logging.error("Remove disk %s failed:%s", disk, detail) + LOG.error("Remove disk %s failed:%s", disk, detail) class VirtTools(object): @@ -205,7 +207,7 @@ def clone_vm_filesystem(self, newname=None): :param newname:if newname is None, create a new name with clone added. """ - logging.info("Cloning...") + LOG.info("Cloning...") # Init options for virt-clone options = {} autoclone = bool(self.params.get("autoclone", False)) @@ -233,7 +235,7 @@ def clone_vm_filesystem(self, newname=None): autoclone, **options) if result.exit_status: error_info = "Clone %s to %s failed." % (self.oldvm.name, newname) - logging.error(error_info) + LOG.error(error_info) return (False, result) else: self.newvm.name = newname @@ -246,9 +248,9 @@ def sparsify_disk(self): """ Sparsify a disk """ - logging.info("Sparsifing...") + LOG.info("Sparsifing...") if self.indisk is None: - logging.error("No disk can be sparsified.") + LOG.error("No disk can be sparsified.") return (False, "Input disk is None.") if self.outdisk is None: self.outdisk = "%s-sparsify" % self.indisk @@ -259,7 +261,7 @@ def sparsify_disk(self): if result.exit_status: error_info = "Sparsify %s to %s failed." % (self.indisk, self.outdisk) - logging.error(error_info) + LOG.error(error_info) return (False, result) return (True, result) @@ -272,7 +274,7 @@ def define_vm_with_newdisk(self): 2.delete uuid 3.replace disk """ - logging.info("Define a new vm:") + LOG.info("Define a new vm:") old_vm_name = self.oldvm.name new_vm_name = "%s-vtnewdisk" % old_vm_name self.newvm.name = new_vm_name @@ -284,10 +286,10 @@ def define_vm_with_newdisk(self): vmxml.uuid = "" vmxml.set_xml(re.sub(old_disk, new_disk, str(vmxml.__dict_get__('xml')))) - logging.debug(vmxml.__dict_get__('xml')) + LOG.debug(vmxml.__dict_get__('xml')) vmxml.define() except xcepts.LibvirtXMLError as detail: - logging.debug(detail) + LOG.debug(detail) return (False, detail) return (True, vmxml.xml) @@ -296,7 +298,7 @@ def expand_vm_filesystem(self, resize_part_num=2, resized_size="+1G", """ Expand vm's filesystem with virt-resize. """ - logging.info("Resizing vm's disk...") + LOG.info("Resizing vm's disk...") options = {} options['resize'] = "/dev/sda%s" % resize_part_num options['resized_size'] = resized_size @@ -310,7 +312,7 @@ def expand_vm_filesystem(self, resize_part_num=2, resized_size="+1G", options['timeout'] = int(self.params.get("timeout", 480)) result = lgf.virt_resize_cmd(self.indisk, self.outdisk, **options) if result.exit_status: - logging.error(result) + LOG.error(result) return (False, result) return (True, self.outdisk) @@ -320,7 +322,7 @@ def guestmount(self, mountpoint, disk_or_domain=None): :param disk_or_domain: if it is None, use default vm in params """ - logging.info("Mounting filesystems...") + LOG.info("Mounting filesystems...") if disk_or_domain is None: disk_or_domain = self.oldvm.name if not os.path.isdir(mountpoint): @@ -342,7 +344,7 @@ def guestmount(self, mountpoint, disk_or_domain=None): if result.exit_status: error_info = "Mount %s to %s failed." % (disk_or_domain, mountpoint) - logging.error(result) + LOG.error(result) return (False, error_info) return (True, mountpoint) @@ -352,12 +354,12 @@ def write_file_with_guestmount(self, mountpoint, path, """ Write content to file with guestmount """ - logging.info("Creating file...") + LOG.info("Creating file...") gms, gmo = self.guestmount(mountpoint, vm_ref) if gms is True: mountpoint = gmo else: - logging.error("Create file %s failed.", path) + LOG.error("Create file %s failed.", path) return (False, gmo) # file's path on host's mountpoint @@ -370,9 +372,9 @@ def write_file_with_guestmount(self, mountpoint, path, fd.write(content) fd.close() except IOError as detail: - logging.error(detail) + LOG.error(detail) return (False, detail) - logging.info("Create file %s successfully", file_path) + LOG.info("Create file %s successfully", file_path) # Cleanup created file if cleanup: process.run("rm -f %s" % file_path, ignore_status=True) @@ -467,8 +469,8 @@ def get_vm_info_with_inspector(self, vm_ref=None): sys_info = {} result = lgf.virt_inspector(vm_ref, ignore_status=True) if result.exit_status: - logging.error("Get %s information with inspector(2) failed:\n%s", - vm_ref, result) + LOG.error("Get %s information with inspector(2) failed:\n%s", + vm_ref, result) return sys_info # Analyse output to get information try: @@ -477,7 +479,7 @@ def get_vm_info_with_inspector(self, vm_ref=None): if os_root is None: raise VTXMLParseError("operatingsystem", os_root) except (IOError, VTXMLParseError) as detail: - logging.error(detail) + LOG.error(detail) return sys_info sys_info['root'] = os_root.findtext("root") sys_info['name'] = os_root.findtext("name") @@ -504,7 +506,7 @@ def get_vm_info_with_inspector(self, vm_ref=None): fs_detail['uuid'] = node.findtext("uuid") filesystems[fs_device] = fs_detail sys_info['filesystems'] = filesystems - logging.debug("VM information:\n%s", sys_info) + LOG.debug("VM information:\n%s", sys_info) return sys_info @@ -537,7 +539,7 @@ def get_root(self): getroot_result = self.inspect_os() roots_list = getroot_result.stdout_text.splitlines() if getroot_result.exit_status or not len(roots_list): - logging.error("Get root failed:%s", getroot_result) + LOG.error("Get root failed:%s", getroot_result) return (False, getroot_result) return (True, roots_list[0].strip()) @@ -545,11 +547,11 @@ def analyse_release(self): """ Analyse /etc/redhat-release """ - logging.info("Analysing /etc/redhat-release...") + LOG.info("Analysing /etc/redhat-release...") release_result = self.cat("/etc/redhat-release") - logging.debug(release_result) + LOG.debug(release_result) if release_result.exit_status: - logging.error("Cat /etc/redhat-release failed") + LOG.error("Cat /etc/redhat-release failed") return (False, release_result) release_type = {'rhel': "Red Hat Enterprise Linux", @@ -563,11 +565,11 @@ def write_file(self, path, content): """ Create a new file to vm with guestfish """ - logging.info("Creating file %s in vm...", path) + LOG.info("Creating file %s in vm...", path) write_result = self.write(path, content) if write_result.exit_status: - logging.error("Create '%s' with content '%s' failed:%s", - path, content, write_result) + LOG.error("Create '%s' with content '%s' failed:%s", + path, content, write_result) return False return True @@ -577,7 +579,7 @@ def get_partitions_info(self, device="/dev/sda"): """ list_result = self.part_list(device) if list_result.exit_status: - logging.error("List partition info failed:%s", list_result) + LOG.error("List partition info failed:%s", list_result) return (False, list_result) list_lines = list_result.stdout_text.splitlines() # This dict is a struct like this: {key:{a dict}, key:{a dict}} @@ -607,7 +609,7 @@ def get_partitions_info(self, device="/dev/sda"): if index != -1: partitions[index] = part_details - logging.info(partitions) + LOG.info(partitions) return (True, partitions) def get_part_size(self, part_num): @@ -639,7 +641,7 @@ def create_fs(self): return (False, "partition_type is incorrect, support [physical,lvm]") if partition_type == "lvm": - logging.info("create lvm partition...") + LOG.info("create lvm partition...") pv_name = self.params.get("pv_name", "/dev/sdb") vg_name = self.params.get("vg_name", "vol_test") lv_name = self.params.get("lv_name", "vol_file") @@ -654,7 +656,7 @@ def create_fs(self): self.lvcreate(lv_name, vg_name, lv_size) elif partition_type == "physical": - logging.info("create physical partition...") + LOG.info("create physical partition...") pv_name = self.params.get("pv_name", "/dev/sdb") mount_point = pv_name + "1" @@ -669,7 +671,7 @@ def create_fs(self): fs_type, mount_point, "blocksize:%s" % (blocksize)) self.vfs_type(mount_point) else: - logging.error("with_blocksize is set but blocksize not given") + LOG.error("with_blocksize is set but blocksize not given") self.umount_all() self.sync() return (False, "with_blocksize is set but blocksize not given") @@ -692,14 +694,14 @@ def create_msdos_part(self, device, start="1", end="-1"): Default partition section is whole disk(1~-1). And return its part name if part add succeed. """ - logging.info("Creating a new partition on %s...", device) + LOG.info("Creating a new partition on %s...", device) init_result = self.part_init(device, "msdos") if init_result.exit_status: - logging.error("Init disk failed:%s", init_result) + LOG.error("Init disk failed:%s", init_result) return (False, init_result) add_result = self.part_add(device, "p", start, end) if add_result.exit_status: - logging.error("Add a partition failed:%s", add_result) + LOG.error("Add a partition failed:%s", add_result) return (False, add_result) # Get latest created part num to return @@ -722,14 +724,14 @@ def create_whole_disk_msdos_part(self, device): Create only one msdos partition in given device. And return its part name if part add succeed. """ - logging.info("Creating one partition of whole %s...", device) + LOG.info("Creating one partition of whole %s...", device) init_result = self.part_init(device, "msdos") if init_result.exit_status: - logging.error("Init disk failed:%s", init_result) + LOG.error("Init disk failed:%s", init_result) return (False, init_result) disk_result = self.part_disk(device, "msdos") if disk_result.exit_status: - logging.error("Init disk failed:%s", disk_result) + LOG.error("Init disk failed:%s", disk_result) return (False, disk_result) # Get latest created part num to return @@ -779,10 +781,10 @@ def get_md5(self, path): """ Get files md5 value. """ - logging.info("Computing %s's md5...", path) + LOG.info("Computing %s's md5...", path) md5_result = self.checksum("md5", path) if md5_result.exit_status: - logging.error("Check %s's md5 failed:%s", path, md5_result) + LOG.error("Check %s's md5 failed:%s", path, md5_result) return (False, md5_result) return (True, md5_result.stdout_text.strip()) @@ -795,9 +797,9 @@ def reset_interface(self, iface_mac): if not vm_ref: vm_ref = self.params.get("disk_img") if not vm_ref: - logging.error("No object to edit.") + LOG.error("No object to edit.") return False - logging.info("Resetting %s's mac to %s", vm_ref, iface_mac) + LOG.info("Resetting %s's mac to %s", vm_ref, iface_mac) # Fix file which includes interface devices information # Default is /etc/udev/rules.d/70-persistent-net.rules @@ -816,15 +818,15 @@ def reset_interface(self, iface_mac): expr=edit_expr, debug=True, ignore_status=True) if result.exit_status: - logging.error("Edit %s failed:%s", devices_file, result) + LOG.error("Edit %s failed:%s", devices_file, result) return False except lgf.LibguestfsCmdError as detail: - logging.error("Edit %s failed:%s", devices_file, detail) + LOG.error("Edit %s failed:%s", devices_file, detail) return False self.new_session() # Just to keep output looking better self.is_ready() - logging.debug(self.cat(devices_file)) + LOG.debug(self.cat(devices_file)) # Fix interface file for ifcfg_file in ifcfg_files: @@ -838,15 +840,15 @@ def reset_interface(self, iface_mac): expr=edit_expr, debug=True, ignore_status=True) if result.exit_status: - logging.error("Edit %s failed:%s", ifcfg_file, result) + LOG.error("Edit %s failed:%s", ifcfg_file, result) return False except lgf.LibguestfsCmdError as detail: - logging.error("Edit %s failed:%s", ifcfg_file, detail) + LOG.error("Edit %s failed:%s", ifcfg_file, detail) return False self.new_session() # Just to keep output looking better self.is_ready() - logging.debug(self.cat(ifcfg_file)) + LOG.debug(self.cat(ifcfg_file)) return True def copy_ifcfg_back(self): @@ -859,6 +861,6 @@ def copy_ifcfg_back(self): if is_need.stdout.strip() == b"false": cp_result = self.cp(bak_file, ifcfg_file) if cp_result.exit_status: - logging.warn("Recover ifcfg file failed:%s", cp_result) + LOG.warn("Recover ifcfg file failed:%s", cp_result) return False return True diff --git a/virttest/utils_test/libvirt.py b/virttest/utils_test/libvirt.py index cbff1090f9..166eff666e 100644 --- a/virttest/utils_test/libvirt.py +++ b/virttest/utils_test/libvirt.py @@ -84,6 +84,8 @@ ping = utils_net.ping +LOG = logging.getLogger('avocado.' + __name__) + class LibvirtNetwork(object): @@ -375,7 +377,7 @@ def check_blockjob(vm_name, target, check_point="none", value="0"): :return: Boolean value, true for pass, false for fail """ if check_point not in ["progress", "bandwidth", "none"]: - logging.error("Check point must be: progress, bandwidth or none") + LOG.error("Check point must be: progress, bandwidth or none") return False try: cmd_result = virsh.blockjob( @@ -384,15 +386,15 @@ def check_blockjob(vm_name, target, check_point="none", value="0"): err = cmd_result.stderr_text.strip() status = cmd_result.exit_status except Exception as e: - logging.error("Error occurred: %s", e) + LOG.error("Error occurred: %s", e) return False if status: - logging.error("Run blockjob command fail") + LOG.error("Run blockjob command fail") return False # libvirt print block job progress to stderr if check_point == 'none': if len(err): - logging.error("Expect no job but find block job:\n%s", err) + LOG.error("Expect no job but find block job:\n%s", err) return False return True if check_point == "progress": @@ -417,12 +419,12 @@ def check_blockjob(vm_name, target, check_point="none", value="0"): unit = 'M' u_value = utils_misc.normalize_data_size(value, unit) if float(u_value) == float(bandwidth): - logging.debug("Bandwidth is equal to %s", bandwidth) + LOG.debug("Bandwidth is equal to %s", bandwidth) return True - logging.error("Bandwidth is not equal to %s", bandwidth) + LOG.error("Bandwidth is not equal to %s", bandwidth) return False except Exception as e: - logging.error("Fail to get bandwidth: %s", e) + LOG.error("Fail to get bandwidth: %s", e) return False @@ -493,13 +495,13 @@ def setup_or_cleanup_nfs(is_setup, mount_dir="nfs-mount", is_mount=False, if not ubuntu and utils_selinux.is_enforcing(): if set_selinux_permissive: utils_selinux.set_status("permissive") - logging.debug("selinux set to permissive mode, " - "this is not recommended, potential access " - "control error could be missed.") + LOG.debug("selinux set to permissive mode, " + "this is not recommended, potential access " + "control error could be missed.") else: - logging.debug("selinux is in enforcing mode, libvirt needs " - "\"setsebool virt_use_nfs on\" to get " - "nfs access right.") + LOG.debug("selinux is in enforcing mode, libvirt needs " + "\"setsebool virt_use_nfs on\" to get " + "nfs access right.") _nfs.setup() nfs_mount_info = process.run('nfsstat -m', shell=True).stdout_text.strip().split(",") for i in nfs_mount_info: @@ -552,10 +554,10 @@ def setup_or_cleanup_iscsi(is_setup, is_login=True, iscsi_device = utils_misc.wait_for(_iscsi.get_device_name, 5, 0, 1, "Searching iscsi device name.") if iscsi_device: - logging.debug("iscsi device: %s", iscsi_device) + LOG.debug("iscsi device: %s", iscsi_device) return iscsi_device if not iscsi_device: - logging.error("Not find iscsi device.") + LOG.error("Not find iscsi device.") # Cleanup and return "" - caller needs to handle that # _iscsi.export_target() will have set the emulated_id and # export_flag already on success... @@ -646,7 +648,7 @@ def define_pool(pool_name, pool_type, pool_target, cleanup_flag, **kwargs): # Prepare gluster service and create volume hostip = gluster.setup_or_cleanup_gluster(True, gluster_source_name, pool_name=pool_name, **kwargs) - logging.debug("hostip is %s", hostip) + LOG.debug("hostip is %s", hostip) # create image in gluster volume file_path = "gluster://%s/%s" % (hostip, gluster_source_name) for i in range(gluster_vol_number): @@ -673,7 +675,7 @@ def define_pool(pool_name, pool_type, pool_target, cleanup_flag, **kwargs): result = virsh.pool_define_as(pool_name, pool_type, pool_target, extra, ignore_status=True) except process.CmdError: - logging.error("Define '%s' type pool fail.", pool_type) + LOG.error("Define '%s' type pool fail.", pool_type) return result @@ -692,33 +694,33 @@ def verify_virsh_console(session, user, passwd, timeout=10, debug=False): if match == 0: if debug: - logging.debug("Got '^]', sending '\\n'") + LOG.debug("Got '^]', sending '\\n'") session.sendline() elif match == 1: if debug: - logging.debug("Got 'login:', sending '%s'", user) + LOG.debug("Got 'login:', sending '%s'", user) session.sendline(user) elif match == 2: if debug: - logging.debug("Got 'Password:', sending '%s'", passwd) + LOG.debug("Got 'Password:', sending '%s'", passwd) session.sendline(passwd) elif match == 3: if debug: - logging.debug("Got Shell prompt -- logged in") + LOG.debug("Got Shell prompt -- logged in") break status, output = session.cmd_status_output(console_cmd) - logging.info("output of command:\n%s", output) + LOG.info("output of command:\n%s", output) session.close() except (aexpect.ShellError, aexpect.ExpectError) as detail: log = session.get_output() - logging.error("Verify virsh console failed:\n%s\n%s", detail, log) + LOG.error("Verify virsh console failed:\n%s\n%s", detail, log) session.close() return False if not re.search("processor", output): - logging.error("Verify virsh console failed: Result does not match.") + LOG.error("Verify virsh console failed: Result does not match.") return False return True @@ -787,7 +789,7 @@ def mk_part(disk, size="100M", fs_type='ext4', session=None): output = to_text(run_cmd(print_cmd)) current_label = re.search(r'Partition Table: (\w+)', output).group(1) if current_label not in support_lable: - logging.error('Not support create partition on %s disk', current_label) + LOG.error('Not support create partition on %s disk', current_label) return disk_size = re.search(r"Disk %s: (\w+)" % disk, output).group(1) @@ -861,7 +863,7 @@ def check_actived_pool(pool_name): raise exceptions.TestFail("Can't find pool %s" % pool_name) if not sp.is_pool_active(pool_name): raise exceptions.TestFail("Pool %s is not active." % pool_name) - logging.debug("Find active pool %s", pool_name) + LOG.debug("Find active pool %s", pool_name) return True @@ -1036,7 +1038,7 @@ def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, hostip = gluster.setup_or_cleanup_gluster(True, source_name, pool_name=pool_name, **kwargs) - logging.debug("hostip is %s", hostip) + LOG.debug("hostip is %s", hostip) extra = "--source-host %s --source-path %s" % (hostip, source_name) extra += " --source-format %s" % source_format @@ -1067,12 +1069,12 @@ def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, else: ip_addr = "127.0.0.1" if iscsi_chap_user and iscsi_chap_password and iscsi_secret_usage: - logging.debug("setup %s pool with chap authentication", pool_type) + LOG.debug("setup %s pool with chap authentication", pool_type) extra = (" --auth-type chap --auth-username %s " "--secret-usage %s" % (iscsi_chap_user, iscsi_secret_usage)) else: - logging.debug("setup %s pool without authentication", pool_type) + LOG.debug("setup %s pool without authentication", pool_type) setup_or_cleanup_iscsi(is_setup=True, emulated_image=emulated_image, image_size=image_size, @@ -1114,17 +1116,17 @@ def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, scsi_pool_source_xml.adp_wwnn = pool_wwnn scsi_pool_xml.set_source(scsi_pool_source_xml) - logging.debug("SCSI pool XML %s:\n%s", scsi_pool_xml.xml, - str(scsi_pool_xml)) + LOG.debug("SCSI pool XML %s:\n%s", scsi_pool_xml.xml, + str(scsi_pool_xml)) scsi_xml_file = scsi_pool_xml.xml self.params['scsi_xml_file'] = scsi_xml_file elif pool_type == "gluster": source_path = kwargs.get('source_path') - logging.info("source path is %s" % source_path) + LOG.info("source path is %s" % source_path) hostip = gluster.setup_or_cleanup_gluster(True, source_name, pool_name=pool_name, **kwargs) - logging.debug("Gluster host ip address: %s", hostip) + LOG.debug("Gluster host ip address: %s", hostip) extra = "--source-host %s --source-path %s --source-name %s" % \ (hostip, source_path, source_name) elif pool_type == "mpath": @@ -1134,8 +1136,8 @@ def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, mpath_pool_xml.name = pool_name mpath_pool_xml.pool_type = "mpath" mpath_pool_xml.target_path = pool_target - logging.debug("mpath pool XML %s:\n%s", - mpath_pool_xml.xml, str(mpath_pool_xml)) + LOG.debug("mpath pool XML %s:\n%s", + mpath_pool_xml.xml, str(mpath_pool_xml)) mpath_xml_file = mpath_pool_xml.xml self.params['mpath_xml_file'] = mpath_xml_file @@ -1167,9 +1169,9 @@ def pre_pool(self, pool_name, pool_type, pool_target, emulated_image, emulated_image, **kwargs) raise exceptions.TestFail("Prepare pool failed") xml_str = virsh.pool_dumpxml(pool_name) - logging.debug("New prepared pool XML: %s", xml_str) + LOG.debug("New prepared pool XML: %s", xml_str) - logging.info("Refreshing pool") + LOG.info("Refreshing pool") virsh.pool_refresh(pool_name) def pre_vol(self, vol_name, vol_format, capacity, allocation, pool_name): @@ -1243,7 +1245,7 @@ def check_result(result, stderr = result.stderr_text stdout = result.stdout_text all_msg = '\n'.join([stdout, stderr]) - logging.debug("Command result: %s", all_msg) + LOG.debug("Command result: %s", all_msg) try: unicode @@ -1276,7 +1278,7 @@ def check_result(result, "but failed with:\n%s" % (expected_fails, all_msg)) else: - logging.info("Get expect error msg:%s" % msg_to_search) + LOG.info("Get expect error msg:%s" % msg_to_search) else: raise exceptions.TestFail( "Expect should succeed, but got: %s" % all_msg) @@ -1295,9 +1297,9 @@ def check_result(result, "but failed with: %s" % (expected_match, all_msg)) else: - logging.debug('Found expected content:\n%s', - [r.group(0) for r in search_result - if r is not None]) + LOG.debug('Found expected content:\n%s', + [r.group(0) for r in search_result + if r is not None]) def check_exit_status(result, expect_error=False): @@ -1311,8 +1313,8 @@ def check_exit_status(result, expect_error=False): if result.exit_status != 0: raise exceptions.TestFail(result.stderr_text) else: - logging.debug("Command output:\n%s", - result.stdout_text.strip()) + LOG.debug("Command output:\n%s", + result.stdout_text.strip()) elif expect_error and result.exit_status == 0: raise exceptions.TestFail("Run '%s' expect fail, but run " "successfully." % result.command) @@ -1386,23 +1388,23 @@ def check_iface(iface_name, checkpoint, extra="", **dargs): result.stdout_text) if list(filter(lambda x: x[0] == iface_name, output[1:])): list_find = True - logging.debug("Find '%s' in virsh iface-list output: %s", - iface_name, list_find) + LOG.debug("Find '%s' in virsh iface-list output: %s", + iface_name, list_find) # Check network script independent of distro iface_script = utils_net.get_network_cfg_file(iface_name) ifcfg_find = os.path.exists(iface_script) - logging.debug("Find '%s': %s", iface_script, ifcfg_find) + LOG.debug("Find '%s': %s", iface_script, ifcfg_find) check_pass = list_find and ifcfg_find elif checkpoint == "mac": # extra is the MAC address to compare iface_mac = iface.get_mac().lower() check_pass = iface_mac == extra - logging.debug("MAC address of %s: %s", iface_name, iface_mac) + LOG.debug("MAC address of %s: %s", iface_name, iface_mac) elif checkpoint == "ip": # extra is the IP address to compare iface_ip = iface.get_ip() check_pass = iface_ip == extra - logging.debug("IP address of %s: %s", iface_name, iface_ip) + LOG.debug("IP address of %s: %s", iface_name, iface_ip) elif checkpoint == "state": # check iface State result = virsh.iface_list(extra, ignore_status=True) @@ -1421,8 +1423,8 @@ def check_iface(iface_name, checkpoint, extra="", **dargs): timeout=timeout,) check_pass = ping_s == 0 else: - logging.debug("Support check points are: %s", support_check) - logging.error("Unsupport check point: %s", checkpoint) + LOG.debug("Support check points are: %s", support_check) + LOG.error("Unsupport check point: %s", checkpoint) except Exception as detail: raise exceptions.TestFail("Interface check failed: %s" % detail) return check_pass @@ -1461,7 +1463,7 @@ def create_hostdev_xml(pci_id, boot_order=None, hostdev_xml.teaming = eval(teaming) # Create attributes dict for device's address element - logging.info("pci_id/device id is %s" % pci_id) + LOG.info("pci_id/device id is %s" % pci_id) if dev_type in ["pci", "usb"]: device_domain = pci_id.split(':')[0] @@ -1489,7 +1491,7 @@ def create_hostdev_xml(pci_id, boot_order=None, **(dict(adapter_name="scsi_host%s" % id_parts[0], bus=id_parts[1], target=id_parts[2], unit=id_parts[3]))) - logging.debug("Hostdev XML:\n%s", str(hostdev_xml)) + LOG.debug("Hostdev XML:\n%s", str(hostdev_xml)) return hostdev_xml @@ -1700,7 +1702,7 @@ def create_disk_xml(params): if label: sec_dict.update({'label': label}) sec_xml.update(sec_dict) - logging.debug("The sec xml is %s", sec_xml.xmltreefile) + LOG.debug("The sec xml is %s", sec_xml.xmltreefile) source_seclabel.append(sec_xml) source_params = {"attrs": source_attrs, "seclabels": source_seclabel, @@ -1780,8 +1782,8 @@ def create_disk_xml(params): diskxml.rawio = rawio diskxml.xmltreefile.write() except Exception as detail: - logging.error("Fail to create disk XML:\n%s", detail) - logging.debug("Disk XML %s:\n%s", diskxml.xml, str(diskxml)) + LOG.error("Fail to create disk XML:\n%s", detail) + LOG.debug("Disk XML %s:\n%s", diskxml.xml, str(diskxml)) # Wait for file completed def file_exists(): @@ -1811,15 +1813,14 @@ def set_disk_attr(vmxml, target, tag, attr): if tag in ["driver", "boot", "address", "alias", "source"]: for key in attr: disk.find(tag).set(key, attr[key]) - logging.debug("key '%s' value '%s' pair is " - "set", key, attr[key]) + LOG.debug("key '%s' value '%s' pair is set", key, attr[key]) vmxml.xmltreefile.write() else: - logging.debug("tag '%s' is not supported now", tag) + LOG.debug("tag '%s' is not supported now", tag) return False except AttributeError: - logging.error("Fail to set attribute '%s' with value " - "'%s'.", key, attr[key]) + LOG.error("Fail to set attribute '%s' with value " + "'%s'.", key, attr[key]) return False return True @@ -1997,7 +1998,7 @@ def create_net_xml(net_name, params): if vf_list: netxml.vf_list = [netxml.new_vf_address(**{'attrs': attr}) for attr in vf_list] - logging.debug("New network xml file: %s", netxml) + LOG.debug("New network xml file: %s", netxml) netxml.xmltreefile.write() return netxml except Exception as detail: @@ -2114,7 +2115,7 @@ def create_nwfilter_xml(params): rulexml = rule.backup_rule() filterxml.xmltreefile.write() - logging.info("The network filter xml is:\n%s" % filterxml) + LOG.info("The network filter xml is:\n%s" % filterxml) wait_for_file_over('', filterxml.xml) return filterxml @@ -2171,7 +2172,7 @@ def create_channel_xml(params, alias=False, address=False): 'bus': '0'} channel_params['address'] = channel_address channelxml = channel.Channel.new_from_dict(channel_params) - logging.debug("Channel XML:\n%s", channelxml) + LOG.debug("Channel XML:\n%s", channelxml) return channelxml @@ -2200,7 +2201,7 @@ def add_panic_device(vm_name, model='isa', addr_type='isa', addr_iobase='0x505') vmxml = vm_xml.VMXML.new_from_dumpxml(vm_name) panic_dev = vmxml.xmltreefile.find('devices/panic') if panic_dev is not None: - logging.info("Panic device already exists") + LOG.info("Panic device already exists") return False else: panic_dev = panic.Panic() @@ -2266,7 +2267,7 @@ def create_vsock_xml(model, auto_cid='yes', invalid_cid=False): chars = string.ascii_letters + string.digits + '-_' alias_name = 'ua-' + ''.join(random.choice(chars) for _ in list(range(64))) vsock_dev.alias = {'name': alias_name} - logging.debug(vsock_dev) + LOG.debug(vsock_dev) return vsock_dev @@ -2306,7 +2307,7 @@ def create_rng_xml(dparams): if rng_alias: rng_xml.alias = dict(name=rng_alias) - logging.debug("Rng xml: %s", rng_xml) + LOG.debug("Rng xml: %s", rng_xml) return rng_xml @@ -2329,7 +2330,7 @@ def update_memballoon_xml(vmxml, membal_dict): if membal_alias_name: memballoon_xml.alias_name = membal_alias_name vmxml.add_device(memballoon_xml) - logging.info(memballoon_xml) + LOG.info(memballoon_xml) vmxml.sync() @@ -2398,13 +2399,13 @@ def set_guest_agent(vm): :param vm: the vm object """ - logging.warning("This function is going to be deprecated. " - "Please use vm.prepare_guest_agent() instead.") + LOG.warning("This function is going to be deprecated. " + "Please use vm.prepare_guest_agent() instead.") # reset domain state if vm.is_alive(): vm.destroy(gracefully=False) vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) - logging.debug("Attempting to set guest agent channel") + LOG.debug("Attempting to set guest agent channel") vmxml.set_agent_channel() vmxml.sync() vm.start() @@ -2469,7 +2470,7 @@ def set_vm_disk(vm, params, tmp_dir=None, test=None): :param params: dict, dict include setup vm disk xml configurations """ vmxml = vm_xml.VMXML.new_from_inactive_dumpxml(vm.name) - logging.debug("original xml is: %s", vmxml.xmltreefile) + LOG.debug("original xml is: %s", vmxml.xmltreefile) disk_device = params.get("disk_device", "disk") disk_snapshot_attr = params.get("disk_snapshot_attr") disk_type = params.get("disk_type", "file") @@ -2491,7 +2492,7 @@ def set_vm_disk(vm, params, tmp_dir=None, test=None): exp_opt = params.get("export_options", "rw,no_root_squash,fsid=0") exp_dir = params.get("export_dir", "nfs-export") first_disk = vm.get_first_disk_devices() - logging.debug("first disk is %s", first_disk) + LOG.debug("first disk is %s", first_disk) blk_source = first_disk['source'] blk_source = params.get("blk_source_name", blk_source) disk_xml = vmxml.devices.by_device_tag('disk')[0] @@ -2594,7 +2595,7 @@ def set_vm_disk(vm, params, tmp_dir=None, test=None): # Setup gluster. host_ip = gluster.setup_or_cleanup_gluster(True, brick_path=brick_path, **params) - logging.debug("host ip: %s " % host_ip) + LOG.debug("host ip: %s " % host_ip) dist_img = "gluster.%s" % disk_format if image_convert: @@ -2645,14 +2646,14 @@ def set_vm_disk(vm, params, tmp_dir=None, test=None): src_file_path = "%s/%s" % (mnt_path, dist_img) if params.get("change_file_uid") and params.get("change_file_gid"): - logging.debug("Changing the ownership of {} to {}.{}." - .format(src_file_path, params["change_file_uid"], - params["change_file_gid"])) + LOG.debug("Changing the ownership of {} to {}.{}." + .format(src_file_path, params["change_file_uid"], + params["change_file_gid"])) os.chown(src_file_path, params["change_file_uid"], params["change_file_gid"]) res = os.stat(src_file_path) - logging.debug("The ownership of {} is updated, uid: {}, gid: {}." - .format(src_file_path, res.st_uid, res.st_gid)) + LOG.debug("The ownership of {} is updated, uid: {}, gid: {}." + .format(src_file_path, res.st_uid, res.st_gid)) disk_params_src = {'source_file': src_file_path} params["source_file"] = src_file_path src_file_list.append(src_file_path) @@ -2742,7 +2743,7 @@ def set_vm_disk(vm, params, tmp_dir=None, test=None): dom_iothreads = params.get("dom_iothreads") if dom_iothreads: vmxml.iothreads = int(dom_iothreads) - logging.debug("The vm xml now is: %s" % vmxml.xmltreefile) + LOG.debug("The vm xml now is: %s" % vmxml.xmltreefile) vmxml.sync() vm.start() @@ -2756,7 +2757,7 @@ def attach_additional_device(vm_name, targetdev, disk_path, params, config=True) :param targetdev: target of disk device :param params: dict include necessary configurations of device """ - logging.info("Attaching disk...") + LOG.info("Attaching disk...") # Update params for source file params['source_file'] = disk_path @@ -2858,10 +2859,10 @@ def create_scsi_disk(scsi_option, scsi_size="2048"): result = process.run("lsscsi|grep scsi_debug|awk '{print $6}'", shell=True) scsi_disk = result.stdout_text.strip() - logging.info("scsi disk: %s" % scsi_disk) + LOG.info("scsi disk: %s" % scsi_disk) return scsi_disk except Exception as e: - logging.error(str(e)) + LOG.error(str(e)) return None @@ -2915,7 +2916,7 @@ def set_controller_multifunction(vm_name, controller_type='scsi'): expanded_controllers[key] = new_controller index += 1 - logging.debug("Expanded controllers: %s", list(expanded_controllers.values())) + LOG.debug("Expanded controllers: %s", list(expanded_controllers.values())) vmxml.del_controller(controller_type) vmxml.set_controller(list(expanded_controllers.values())) vmxml.sync() @@ -3018,7 +3019,7 @@ def generate_disks_index(count, target="virtio"): if result.exit_status: raise exceptions.TestFail("Attach device %s failed." % target_dev) - logging.debug("New VM XML:\n%s", vm.get_xml()) + LOG.debug("New VM XML:\n%s", vm.get_xml()) return added_disks @@ -3033,7 +3034,7 @@ def define_new_vm(vm_name, new_name): vmxml.define() return True except xcepts.LibvirtXMLError as detail: - logging.error(detail) + LOG.error(detail) return False @@ -3047,7 +3048,7 @@ def remotely_control_libvirtd(server_ip, server_user, server_pwd, session = remote.wait_for_login('ssh', server_ip, '22', server_user, server_pwd, r"[\#\$]\s*$") - logging.info("%s libvirt daemon\n", action) + LOG.info("%s libvirt daemon\n", action) service_libvirtd_control(action, session) session.close() except (remote.LoginError, aexpect.ShellError, process.CmdError) as detail: @@ -3057,7 +3058,7 @@ def remotely_control_libvirtd(server_ip, server_user, server_pwd, raise exceptions.TestFail("Failed to %s libvirtd service on " "server: %s\n", action, detail) else: - logging.info("It is an expect %s", detail) + LOG.info("It is an expect %s", detail) def connect_libvirtd(uri, read_only="", virsh_cmd="list", auth_user=None, @@ -3094,7 +3095,7 @@ def connect_libvirtd(uri, read_only="", virsh_cmd="list", auth_user=None, if su_user != "": command = "su %s -c '%s'" % (su_user, command) - logging.info("Execute %s", command) + LOG.info("Execute %s", command) # setup shell session session = aexpect.ShellSession(command, echo=True) @@ -3118,37 +3119,37 @@ def connect_libvirtd(uri, read_only="", virsh_cmd="list", auth_user=None, timeout=30, internal_timeout=1) if match == -patterns_list_len: - logging.info("Matched 'yes/no', details: <%s>", text) + LOG.info("Matched 'yes/no', details: <%s>", text) session.sendline("yes") continue elif match == -patterns_list_len + 1 or match == -patterns_list_len + 2: - logging.info("Matched 'username', details: <%s>", text) + LOG.info("Matched 'username', details: <%s>", text) session.sendline(auth_user) continue elif match == -patterns_list_len + 3: - logging.info("Matched 'password', details: <%s>", text) + LOG.info("Matched 'password', details: <%s>", text) if match_dict_item and second_pass: - logging.info('Prompt for a password when there is a ' - 'password in extra dict, trying that ' - 'one:{}.'.format(second_pass)) + LOG.info('Prompt for a password when there is a ' + 'password in extra dict, trying that ' + 'one:{}.'.format(second_pass)) session.sendline(second_pass) else: session.sendline(auth_pwd) continue elif match == -patterns_list_len + 4: - logging.info("Expected output of virsh command: <%s>", text) + LOG.info("Expected output of virsh command: <%s>", text) break if patterns_list_len > 5: extra_len = len(patterns_extra_dict) index_in_extra_dict = match + extra_len key = list(patterns_extra_dict.keys())[index_in_extra_dict] value = patterns_extra_dict.get(key, "") - logging.info("Matched '%s', details:<%s>", key, text) + LOG.info("Matched '%s', details:<%s>", key, text) session.sendline(value) match_dict_item = True continue else: - logging.error("The real prompt text: <%s>", text) + LOG.error("The real prompt text: <%s>", text) break log = session.get_output() @@ -3157,7 +3158,7 @@ def connect_libvirtd(uri, read_only="", virsh_cmd="list", auth_user=None, except (aexpect.ShellError, aexpect.ExpectError) as details: log = session.get_output() session.close() - logging.error("Failed to connect libvirtd: %s\n%s", details, log) + LOG.error("Failed to connect libvirtd: %s\n%s", details, log) return (False, log) @@ -3169,7 +3170,7 @@ def get_all_vol_paths(): sp = libvirt_storage.StoragePool() for pool_name in list(sp.list_pools().keys()): if sp.list_pools()[pool_name]['State'] != "active": - logging.warning( + LOG.warning( "Inactive pool '%s' cannot be processed" % pool_name) continue pv = libvirt_storage.PoolVolume(pool_name) @@ -3194,7 +3195,7 @@ def do_migration(vm_name, uri, extra, auth_pwd, auth_user="root", if su_user != "": command = "su %s -c '%s'" % (su_user, command) - logging.info("Execute %s", command) + LOG.info("Execute %s", command) # setup shell session session = aexpect.ShellSession(command, echo=True) @@ -3207,19 +3208,19 @@ def do_migration(vm_name, uri, extra, auth_pwd, auth_user="root", timeout=timeout, internal_timeout=1) if match == -4: - logging.info("Matched 'yes/no', details: <%s>", text) + LOG.info("Matched 'yes/no', details: <%s>", text) session.sendline("yes") elif match == -3: - logging.info("Matched 'username', details: <%s>", text) + LOG.info("Matched 'username', details: <%s>", text) session.sendline(auth_user) elif match == -2: - logging.info("Matched 'password', details: <%s>", text) + LOG.info("Matched 'password', details: <%s>", text) session.sendline(auth_pwd) elif match == -1: - logging.info("Expected output of virsh migrate: <%s>", text) + LOG.info("Expected output of virsh migrate: <%s>", text) break else: - logging.error("The real prompt text: <%s>", text) + LOG.error("The real prompt text: <%s>", text) break log = session.get_output() session.close() @@ -3228,7 +3229,7 @@ def do_migration(vm_name, uri, extra, auth_pwd, auth_user="root", except (aexpect.ShellError, aexpect.ExpectError) as details: log = session.get_output() session.close() - logging.error("Failed to migrate %s: %s\n%s", vm_name, details, log) + LOG.error("Failed to migrate %s: %s\n%s", vm_name, details, log) return (False, log) @@ -3252,16 +3253,16 @@ def update_vm_disk_driver_cache(vm_name, driver_cache="none", disk_index=0): driver_dict = disk.driver driver_dict['cache'] = driver_cache disk.driver = driver_dict - logging.debug("The new vm disk driver cache is %s", disk.driver['cache']) + LOG.debug("The new vm disk driver cache is %s", disk.driver['cache']) vmxml.devices = devices # SYNC VM XML change - logging.debug("The new VM XML:\n%s", vmxml) + LOG.debug("The new VM XML:\n%s", vmxml) vmxml.sync() return True except Exception as e: - logging.error("Can't update disk driver cache!! %s", e) + LOG.error("Can't update disk driver cache!! %s", e) return False @@ -3274,7 +3275,7 @@ def update_vm_disk_source(vm_name, disk_source_path, :param source_type: it may be 'dev' or 'file' type, which is default """ if not os.path.isdir(disk_source_path): - logging.error("Require disk source path!!") + LOG.error("Require disk source path!!") return False # Prepare to update VM first disk source file @@ -3285,11 +3286,11 @@ def update_vm_disk_source(vm_name, disk_source_path, # Generate a disk image name if it doesn't exist if not disk_image_name: disk_source = disks.source.get_attrs().get(source_type) - logging.debug("The disk source file of the VM: %s", disk_source) + LOG.debug("The disk source file of the VM: %s", disk_source) disk_image_name = os.path.basename(disk_source) new_disk_source = os.path.join(disk_source_path, disk_image_name) - logging.debug("The new disk source file of the VM: %s", new_disk_source) + LOG.debug("The new disk source file of the VM: %s", new_disk_source) # Update VM disk source file try: @@ -3297,11 +3298,11 @@ def update_vm_disk_source(vm_name, disk_source_path, "%s" % new_disk_source}}) # SYNC VM XML change vmxml.devices = devices - logging.debug("The new VM XML:\n%s", vmxml.xmltreefile) + LOG.debug("The new VM XML:\n%s", vmxml.xmltreefile) vmxml.sync() return True except Exception as e: - logging.error("Can't update disk source!! %s", e) + LOG.error("Can't update disk source!! %s", e) return False @@ -3313,7 +3314,7 @@ def exec_virsh_edit(source, edit_cmd, connect_uri="qemu:///system"): :param edit_cmd: Edit command list to execute. :return: True if edit is successful, False if edit is failure. """ - logging.info("Trying to edit xml with cmd %s", edit_cmd) + LOG.info("Trying to edit xml with cmd %s", edit_cmd) session = aexpect.ShellSession("sudo -s") try: session.sendline("virsh -c %s edit %s" % (connect_uri, source)) @@ -3326,7 +3327,7 @@ def exec_virsh_edit(source, edit_cmd, connect_uri="qemu:///system"): return True except Exception as e: session.close() - logging.error("Error occurred: %s", e) + LOG.error("Error occurred: %s", e) return False @@ -3341,7 +3342,7 @@ def new_disk_vol_name(pool_name): """ poolxml = pool_xml.PoolXML.new_from_dumpxml(pool_name) if poolxml.get_type(pool_name) != "disk": - logging.error("This is not a disk pool") + LOG.error("This is not a disk pool") return None disk = poolxml.get_source().device_path[5:] part_num = len(list(filter(lambda s: s.startswith(disk), @@ -3367,10 +3368,10 @@ def update_polkit_rule(params, pattern, new_value): polkit_f.truncate() polkit_f.write(new_rule) polkit_f.close() - logging.debug("New polkit config rule is:\n%s", new_rule) + LOG.debug("New polkit config rule is:\n%s", new_rule) polkit.polkitd.restart() except IOError as e: - logging.error(e) + LOG.error(e) def get_vol_list(pool_name, vol_check=True, timeout=5): @@ -3428,7 +3429,7 @@ def get_iothreadsinfo(vm_name, options=None): ret = virsh.iothreadinfo(vm_name, options, debug=True, ignore_status=True) if ret.exit_status: - logging.warning(ret.stderr_text.strip()) + LOG.warning(ret.stderr_text.strip()) return info_dict info_list = re.findall(r"(\d+) +(\S+)", ret.stdout_text, re.M) for info in info_list: @@ -3453,7 +3454,7 @@ def virsh_cmd_has_option(cmd, option, raise_skip=True): if not found and raise_skip: raise exceptions.TestSkipError(msg) else: - logging.debug(msg) + LOG.debug(msg) return found @@ -3499,7 +3500,7 @@ def create_secret(params, remote_args=None): if sec_usage_type in ['iscsi']: sec_xml.target = sec_target sec_xml.xmltreefile.write() - logging.debug("The secret xml is: %s" % sec_xml) + LOG.debug("The secret xml is: %s" % sec_xml) # define the secret and get its uuid if remote_args: @@ -3658,7 +3659,7 @@ def modify_vm_iface(vm_name, oper, iface_dict, index=0, virsh_instance=virsh): vmxml.xmltreefile.write() vmxml.sync() elif oper == "get_xml": - logging.info("iface xml is %s", iface) + LOG.info("iface xml is %s", iface) wait_for_file_over('', iface.xml) return iface.xml @@ -3750,11 +3751,11 @@ def customize_libvirt_config(params, "virtstoraged", "virtinterfaced", "virtnodedevd", "virtnwfilterd", "virtsecretd", "libvirt"] if config_type not in config_list_support: - logging.debug("'%s' is not in the support list '%s'", - config_type, config_list_support) + LOG.debug("'%s' is not in the support list '%s'", + config_type, config_list_support) return None else: - logging.debug("The '%s' config file will be updated.", config_type) + LOG.debug("The '%s' config file will be updated.", config_type) if not is_recover: target_conf = None @@ -3765,8 +3766,8 @@ def customize_libvirt_config(params, #if params and isinstance(params, dict): for key, value in params.items(): target_conf[key] = value - logging.debug("The '%s' config file is updated with:\n %s", - target_conf.conf_path, params) + LOG.debug("The '%s' config file is updated with:\n %s", + target_conf.conf_path, params) if restart_libvirt: libvirtd = utils_libvirtd.Libvirtd() libvirtd.restart() @@ -3819,7 +3820,7 @@ def check_logfile(search_str, log_file, str_in_log=True, .format(search_str, "is not" if str_in_log else "is", log_file)) else: - logging.debug('Log check for "%s" PASS', search_str) + LOG.debug('Log check for "%s" PASS', search_str) def check_qemu_cmd_line(content, err_ignore=False, @@ -3840,7 +3841,7 @@ def check_qemu_cmd_line(content, err_ignore=False, cmd_result = remote_old.run_remote_cmd(cmd, remote_params, runner_on_target) qemu_line = cmd_result.stdout if re.search(r'%s' % content, qemu_line): - logging.info("Expected '%s' was found in qemu command line" % content) + LOG.info("Expected '%s' was found in qemu command line" % content) else: if err_ignore: return False @@ -3891,10 +3892,10 @@ def get_disk_alias(vm, source_file=None): try: if ((find_source and disk.source.attrs['file'] == source_file) or (not find_source and not source_file)): - logging.info("Get alias name %s", disk.alias['name']) + LOG.info("Get alias name %s", disk.alias['name']) return disk.alias['name'] except KeyError as e: - logging.info("Ignore error of source attr getting for file: %s" % e) + LOG.info("Ignore error of source attr getting for file: %s" % e) pass return None diff --git a/virttest/utils_test/libvirt_domjobinfo.py b/virttest/utils_test/libvirt_domjobinfo.py index 2d7fafcb21..37d54ed691 100644 --- a/virttest/utils_test/libvirt_domjobinfo.py +++ b/virttest/utils_test/libvirt_domjobinfo.py @@ -11,6 +11,8 @@ from virttest import virsh +LOG = logging.getLogger('avocado.' + __name__) + # pylint: disable=E1121 def check_domjobinfo(vm, params, option="", remote_virsh_dargs=None): @@ -34,7 +36,7 @@ def _search_jobinfo(jobinfo, ignore_status=False): for item in jobinfo.stdout.splitlines(): if item.count(jobinfo_item): groups = re.findall(r'[0-9.]+', item.strip()) - logging.debug("In '%s' search '%s'\n", item, groups[0]) + LOG.debug("In '%s' search '%s'\n", item, groups[0]) if (math.fabs(float(groups[0]) - float(compare_to_value)) // float(compare_to_value) > diff_rate): err_msg = ("{} {} has too much difference from " @@ -42,7 +44,7 @@ def _search_jobinfo(jobinfo, ignore_status=False): groups[0], compare_to_value)) if ignore_status: - logging.error(err_msg) + LOG.error(err_msg) else: raise exceptions.TestFail(err_msg) break @@ -50,7 +52,7 @@ def _search_jobinfo(jobinfo, ignore_status=False): jobinfo_item = params.get("jobinfo_item") compare_to_value = params.get("compare_to_value") ignore_status = params.get("domjob_ignore_status", False) - logging.debug("compare_to_value:%s", compare_to_value) + LOG.debug("compare_to_value:%s", compare_to_value) diff_rate = float(params.get("diff_rate", "0")) if not jobinfo_item or not compare_to_value: return diff --git a/virttest/utils_test/qemu/__init__.py b/virttest/utils_test/qemu/__init__.py index 728f7f05d3..f235079195 100644 --- a/virttest/utils_test/qemu/__init__.py +++ b/virttest/utils_test/qemu/__init__.py @@ -34,6 +34,8 @@ from virttest.qemu_devices import qdevices from virttest.staging import utils_memory +LOG = logging.getLogger('avocado.' + __name__) + def guest_active(vm): o = vm.monitor.info("status") @@ -71,8 +73,8 @@ def get_numa_status(numa_node_info, qemu_pid, debug=True): cpu = [_ for _ in cpus if _ in numa_node_info.nodes[node_id].cpus] qemu_cpu.append(cpu) if debug: - logging.debug("qemu-kvm process using %s pages and cpu %s in " - "node %s" % (memory, " ".join(cpu), node_id)) + LOG.debug("qemu-kvm process using %s pages and cpu %s in " + "node %s" % (memory, " ".join(cpu), node_id)) return (qemu_memory, qemu_cpu) @@ -85,21 +87,21 @@ def pin_vm_threads(vm, node): """ if len(vm.vcpu_threads) + len(vm.vhost_threads) < len(node.cpus): for i in vm.vcpu_threads: - logging.info("pin vcpu thread(%s) to cpu(%s)" % - (i, node.pin_cpu(i))) + LOG.info("pin vcpu thread(%s) to cpu(%s)" % + (i, node.pin_cpu(i))) for i in vm.vhost_threads: - logging.info("pin vhost thread(%s) to cpu(%s)" % - (i, node.pin_cpu(i))) + LOG.info("pin vhost thread(%s) to cpu(%s)" % + (i, node.pin_cpu(i))) elif (len(vm.vcpu_threads) <= len(node.cpus) and len(vm.vhost_threads) <= len(node.cpus)): for i in vm.vcpu_threads: - logging.info("pin vcpu thread(%s) to cpu(%s)" % - (i, node.pin_cpu(i))) + LOG.info("pin vcpu thread(%s) to cpu(%s)" % + (i, node.pin_cpu(i))) for i in vm.vhost_threads: - logging.info("pin vhost thread(%s) to extra cpu(%s)" % - (i, node.pin_cpu(i, extra=True))) + LOG.info("pin vhost thread(%s) to extra cpu(%s)" % + (i, node.pin_cpu(i, extra=True))) else: - logging.info("Skip pinning, no enough nodes") + LOG.info("Skip pinning, no enough nodes") def _check_driver_verifier(session, driver, verifier_flags=None, timeout=300): @@ -112,7 +114,7 @@ def _check_driver_verifier(session, driver, verifier_flags=None, timeout=300): :param timeout: Timeout in seconds """ - logging.info("Check %s driver verifier status" % driver) + LOG.info("Check %s driver verifier status" % driver) query_cmd = "verifier /querysettings" output = session.cmd_output(query_cmd, timeout=timeout) status = True @@ -141,7 +143,7 @@ def setup_win_driver_verifier(session, driver, vm, timeout=300): win_verifier_flags)[0] if not verifier_status: error_context.context("Enable %s driver verifier" % driver, - logging.info) + LOG.info) if win_verifier_flags: verifier_setup_cmd = "verifier /flags %s /driver %s.sys" % ( win_verifier_flags, ".sys ".join(driver.split())) @@ -155,7 +157,7 @@ def setup_win_driver_verifier(session, driver, vm, timeout=300): if not verifier_status: msg = "%s verifier is not enabled, details: %s" % (driver, output) raise exceptions.TestFail(msg) - logging.info("%s verifier is enabled already" % driver) + LOG.info("%s verifier is enabled already" % driver) return session @@ -171,7 +173,7 @@ def clear_win_driver_verifier(driver, vm, timeout=300): try: verifier_status = _check_driver_verifier(session, driver)[1] if verifier_status: - logging.info("Clear driver verifier") + LOG.info("Clear driver verifier") verifier_clear_cmd = "verifier /reset" session.cmd(verifier_clear_cmd, timeout=timeout, @@ -203,7 +205,7 @@ def _check_driver_stat(): return False for drv in driver.split(): - error_context.context("Check %s driver state." % drv, logging.info) + error_context.context("Check %s driver state." % drv, LOG.info) driver_check_cmd = (r'wmic sysdriver where PathName="C:\\Windows\\System32' r'\\drivers\\%s.sys" get State /value') % drv @@ -246,14 +248,14 @@ def setup_runlevel(params, session): ori_runlevel = ori_runlevel.split()[-1] if ori_runlevel == expect_runlevel: - logging.info("Guest runlevel is already %s as expected" % ori_runlevel) + LOG.info("Guest runlevel is already %s as expected" % ori_runlevel) else: session.cmd("init %s" % expect_runlevel) tmp_runlevel = session.cmd(cmd) tmp_runlevel = tmp_runlevel.split()[-1] if tmp_runlevel != expect_runlevel: - logging.warn("Changing runlevel from %s to %s failed (%s)!", - ori_runlevel, expect_runlevel, tmp_runlevel) + LOG.warn("Changing runlevel from %s to %s failed (%s)!", + ori_runlevel, expect_runlevel, tmp_runlevel) class GuestSuspend(object): @@ -308,11 +310,11 @@ def setup_bg_program(self, **args): suspend_bg_program_setup_cmd = args.get("suspend_bg_program_setup_cmd") error_context.context( - "Run a background program as a flag", logging.info) + "Run a background program as a flag", LOG.info) session = self._get_session() self._open_session_list.append(session) - logging.debug("Waiting all services in guest are fully started.") + LOG.debug("Waiting all services in guest are fully started.") time.sleep(self.services_up_timeout) session.sendline(suspend_bg_program_setup_cmd) @@ -325,7 +327,7 @@ def check_bg_program(self, **args): suspend_bg_program_chk_cmd = args.get("suspend_bg_program_chk_cmd") error_context.context( - "Verify background program is running", logging.info) + "Verify background program is running", LOG.info) session = self._get_session() s, _ = self._session_cmd_close(session, suspend_bg_program_chk_cmd) if s: @@ -341,13 +343,13 @@ def kill_bg_program(self, **args): session = self._get_session() self._session_cmd_close(session, suspend_bg_program_kill_cmd) except Exception as e: - logging.warn("Could not stop background program: '%s'", e) + LOG.warn("Could not stop background program: '%s'", e) pass @error_context.context_aware def _check_guest_suspend_log(self, **args): error_context.context("Check whether guest supports suspend", - logging.info) + LOG.info) suspend_support_chk_cmd = args.get("suspend_support_chk_cmd") session = self._get_session() @@ -364,7 +366,7 @@ def verify_guest_support_suspend(self, **args): def start_suspend(self, **args): suspend_start_cmd = args.get("suspend_start_cmd") error_context.context( - "Start suspend [%s]" % (suspend_start_cmd), logging.info) + "Start suspend [%s]" % (suspend_start_cmd), LOG.info) session = self._get_session() self._open_session_list.append(session) @@ -392,7 +394,7 @@ def resume_guest_disk(self, **args): @error_context.context_aware def verify_guest_up(self, **args): - error_context.context("Verify guest system log", logging.info) + error_context.context("Verify guest system log", LOG.info) suspend_log_chk_cmd = args.get("suspend_log_chk_cmd") session = self._get_session() @@ -409,13 +411,13 @@ def action_before_suspend(self, **args): @error_context.context_aware def action_during_suspend(self, **args): error_context.context( - "Sleep a while before resuming guest", logging.info) + "Sleep a while before resuming guest", LOG.info) time.sleep(10) if self.os_type == "windows": # Due to WinXP/2003 won't suspend immediately after issue S3 cmd, # delay 10~60 secs here, maybe there's a bug in windows os. - logging.info("WinXP/2003 need more time to suspend, sleep 50s.") + LOG.info("WinXP/2003 need more time to suspend, sleep 50s.") time.sleep(50) @error_context.context_aware @@ -455,8 +457,8 @@ def get_vm_mem(self, vm): obj_devs = map(lambda x: vm.devices.get_by_qid(x)[0], obj_ids) obj_size = map(lambda x: x.get_param('size'), obj_devs) total_mem += sum(map(self.normalize_mem_size, obj_size)) - logging.info("Assigned %s%s " % (total_mem, self.UNIT) + - "memory to '%s'" % vm.name) + LOG.info("Assigned %s%s " % (total_mem, self.UNIT) + + "memory to '%s'" % vm.name) return total_mem @classmethod @@ -472,7 +474,7 @@ def normalize_mem_size(cls, str_size): size = utils_misc.normalize_data_size(*args) return int(float(size)) except ValueError as details: - logging.debug("Convert memory size error('%s')" % details) + LOG.debug("Convert memory size error('%s')" % details) return 0 @classmethod @@ -638,7 +640,7 @@ def hotplug_memory(self, vm, name): dev.set_param("addr", addr) dev_type = "pc-dimm" step = "Hotplug %s '%s' to VM" % (dev_type, dev.get_qid()) - error_context.context(step, logging.info) + error_context.context(step, LOG.info) _, ver_out = vm.devices.simple_hotplug(dev, vm.monitor) if ver_out is False: raise exceptions.TestFail("Verify hotplug memory failed") @@ -661,10 +663,10 @@ def unplug_memory(self, vm, name): try: dimm = vm.devices.get_by_qid(qid_dimm)[0] except IndexError: - logging.warn("'%s' is not used by any dimm" % qid_mem) + LOG.warn("'%s' is not used by any dimm" % qid_mem) else: step = "Unplug pc-dimm '%s'" % qid_dimm - error_context.context(step, logging.info) + error_context.context(step, LOG.info) _, ver_out = vm.devices.simple_unplug(dimm, vm.monitor) if ver_out is False: raise exceptions.TestFail("Verify unplug memory failed") @@ -672,15 +674,15 @@ def unplug_memory(self, vm, name): self.update_vm_after_unplug(vm, dimm) step = "Unplug memory object '%s'" % qid_mem - error_context.context(step, logging.info) + error_context.context(step, LOG.info) try: mem = vm.devices.get_by_qid(qid_mem)[0] except IndexError: output = vm.monitor.query("memory-devices") - logging.debug("Memory devices: %s" % output) + LOG.debug("Memory devices: %s" % output) msg = "Memory object '%s' not exists" % qid_mem raise exceptions.TestError(msg) - error_context.context(step, logging.info) + error_context.context(step, LOG.info) vm.devices.simple_unplug(mem, vm.monitor) devices.append(mem) self.update_vm_after_unplug(vm, mem) @@ -694,13 +696,13 @@ def get_mem_addr(self, vm, qid): :param vm: VM object :param qid: memory device qid """ - error_context.context("Get hotpluged memory address", logging.info) + error_context.context("Get hotpluged memory address", LOG.info) if not isinstance(vm.monitor, qemu_monitor.QMPMonitor): raise NotImplementedError for info in vm.monitor.info("memory-devices"): if str(info['data']['id']) == qid: address = info['data']['addr'] - logging.info("Memory address: %s" % address) + LOG.info("Memory address: %s" % address) return address @error_context.context_aware @@ -710,7 +712,7 @@ def check_memory(self, vm=None): :param vm: VM object, get VM object from env if vm is None. """ - error_context.context("Verify memory info", logging.info) + error_context.context("Verify memory info", LOG.info) if not vm: vm = self.env.get_vm(self.params["main_vm"]) vm.verify_alive() @@ -734,7 +736,7 @@ def check_memory(self, vm=None): def memory_operate(self, vm, memory, operation='online'): error_context.context( "%s %s in guest OS" % - (operation, memory), logging.info) + (operation, memory), LOG.info) mem_sys_path = "/sys/devices/system/memory/%s" % memory mem_state_path = os.path.join(mem_sys_path, 'state') session = self.get_session(vm) diff --git a/virttest/utils_test/qemu/migration.py b/virttest/utils_test/qemu/migration.py index 834e5abb6c..96a309b9fc 100644 --- a/virttest/utils_test/qemu/migration.py +++ b/virttest/utils_test/qemu/migration.py @@ -40,6 +40,8 @@ except ImportError: from virttest import aexpect +LOG = logging.getLogger('avocado.' + __name__) + def guest_active(vm): o = vm.monitor.info("status") @@ -198,7 +200,7 @@ def wait_for_migration(): finally: if (dest_host == 'localhost') and stable_check and clean: - logging.debug("Cleaning the state files") + LOG.debug("Cleaning the state files") if os.path.isfile(save1): os.remove(save1) if os.path.isfile(save2): @@ -206,7 +208,7 @@ def wait_for_migration(): # Report migration status if mig_succeeded(): - logging.info("Migration finished successfully") + LOG.info("Migration finished successfully") elif mig_failed(): raise exceptions.TestFail("Migration failed") else: @@ -216,7 +218,7 @@ def wait_for_migration(): if dest_host == 'localhost': if dest_vm.monitor.verify_status("paused"): - logging.debug("Destination VM is paused, resuming it") + LOG.debug("Destination VM is paused, resuming it") dest_vm.resume() # Kill the source VM @@ -412,7 +414,7 @@ def mig_wrapper(vm, cancel_delay, dsthost, vm_ports, vm_ports, not_wait_for_migration, None, mig_data) - logging.info("Start migrating now...") + LOG.info("Start migrating now...") cancel_delay = mig_data.params.get("cancel_delay") if cancel_delay is not None: cancel_delay = int(cancel_delay) @@ -459,11 +461,11 @@ def master_id(self): def _hosts_barrier(self, hosts, session_id, tag, timeout): from autotest.client.shared.syncdata import SyncData - logging.debug("Barrier timeout: %d tags: %s" % (timeout, tag)) + LOG.debug("Barrier timeout: %d tags: %s" % (timeout, tag)) tags = SyncData(self.master_id(), self.hostid, hosts, "%s,%s,barrier" % (str(session_id), tag), self.sync_server).sync(tag, timeout) - logging.debug("Barrier tag %s" % (tags)) + LOG.debug("Barrier tag %s" % (tags)) def preprocess_env(self): """ @@ -482,15 +484,15 @@ def _check_vms_source(self, mig_data): sync = SyncData(self.master_id(), self.hostid, mig_data.hosts, mig_data.mig_id, self.sync_server) mig_data.vm_ports = sync.sync(timeout=240)[mig_data.dst] - logging.info("Received from destination the migration port %s", - str(mig_data.vm_ports)) + LOG.info("Received from destination the migration port %s", + str(mig_data.vm_ports)) def _check_vms_dest(self, mig_data): from autotest.client.shared.syncdata import SyncData mig_data.vm_ports = {} for vm in mig_data.vms: - logging.info("Communicating to source migration port %s", - vm.migration_port) + LOG.info("Communicating to source migration port %s", + vm.migration_port) mig_data.vm_ports[vm.name] = vm.migration_port if mig_data.params.get("host_mig_offline") != "yes": @@ -527,12 +529,12 @@ def _quick_check_vms(self, mig_data): :param vms: list of vms. :param source: Must be True if is source machine. """ - logging.info("Try check vms %s" % (mig_data.vms_name)) + LOG.info("Try check vms %s" % (mig_data.vms_name)) for vm in mig_data.vms_name: if self.env.get_vm(vm) not in mig_data.vms: mig_data.vms.append(self.env.get_vm(vm)) for vm in mig_data.vms: - logging.info("Check vm %s on host %s" % (vm.name, self.hostid)) + LOG.info("Check vm %s on host %s" % (vm.name, self.hostid)) vm.verify_alive() def prepare_for_migration(self, mig_data, migration_mode): @@ -614,9 +616,9 @@ def check_vms_dst(self, mig_data): if not guest_active(vm): raise exceptions.TestFail("Guest not active after migration") - logging.info("Migrated guest appears to be running") + LOG.info("Migrated guest appears to be running") - logging.info("Logging into migrated guest after migration...") + LOG.info("Logging into migrated guest after migration...") for vm in mig_data.vms: if self.regain_ip_cmd is not None: session_serial = vm.wait_for_serial_login( @@ -686,8 +688,8 @@ def migrate(self, vms_name, srchost, dsthost, start_work=None, """ def migrate_wrap(vms_name, srchost, dsthost, start_work=None, check_work=None, params_append=None): - logging.info("Starting migrate vms %s from host %s to %s" % - (vms_name, srchost, dsthost)) + LOG.info("Starting migrate vms %s from host %s to %s" % + (vms_name, srchost, dsthost)) pause = self.params.get("paused_after_start_vm") mig_error = None mig_data = MigrationData(self.params, srchost, dsthost, @@ -720,8 +722,7 @@ def migrate_wrap(vms_name, srchost, dsthost, start_work=None, for vm in mig_data.vms: vm.resume() wait = self.params.get("start_migration_timeout", 0) - logging.debug("Wait for migration %s seconds." % - (wait)) + LOG.debug("Wait for migration %s seconds." % (wait)) time.sleep(int(wait)) self.before_migration(mig_data) @@ -857,7 +858,7 @@ def mig_wrapper(vm, cancel_delay, mig_offline, dsthost, vm_ports, self.post_migration(vm, cancel_delay, mig_offline, dsthost, vm_ports, not_wait_for_migration, fd, mig_data) - logging.info("Start migrating now...") + LOG.info("Start migrating now...") cancel_delay = mig_data.params.get("cancel_delay") if cancel_delay is not None: cancel_delay = int(cancel_delay) @@ -943,7 +944,7 @@ def migrate_wait(self, vms_name, srchost, dsthost, start_work=None, mig_ports = sync.sync(mig_ports, timeout=120) mig_ports = mig_ports[srchost] - logging.debug("Migration port %s" % (mig_ports)) + LOG.debug("Migration port %s" % (mig_ports)) if self.params.get("hostid") != srchost: sockets = [] @@ -953,8 +954,8 @@ def migrate_wait(self, vms_name, srchost, dsthost, start_work=None, fds = {} for s, vm_name in list(zip(sockets, vms_name)): fds["migration_fd_%s" % vm_name] = s.fileno() - logging.debug("File descriptors %s used for" - " migration." % (fds)) + LOG.debug("File descriptors %s used for" + " migration." % (fds)) super_cls = super(MultihostMigrationFd, self) super_cls.migrate_wait(vms_name, srchost, dsthost, @@ -974,8 +975,8 @@ def migrate_wait(self, vms_name, srchost, dsthost, start_work=None, fds = {} for conn, vm_name in list(zip(conns, vms_name)): fds["migration_fd_%s" % vm_name] = conn.fileno() - logging.debug("File descriptors %s used for" - " migration." % (fds)) + LOG.debug("File descriptors %s used for" + " migration." % (fds)) # Prohibits descriptor inheritance. for fd in list(fds.values()): @@ -1033,7 +1034,7 @@ def mig_wrapper(vm, cancel_delay, mig_offline, dsthost, mig_exec_cmd, dsthost, mig_exec_cmd, not_wait_for_migration, None, mig_data) - logging.info("Start migrating now...") + LOG.info("Start migrating now...") cancel_delay = mig_data.params.get("cancel_delay") if cancel_delay is not None: cancel_delay = int(cancel_delay) @@ -1096,7 +1097,7 @@ def migrate_wait(self, vms_name, srchost, dsthost, start_work=None, mig_ports = sync.sync(mig_ports, timeout=120) mig_ports = mig_ports[dsthost] - logging.debug("Migration port %s" % (mig_ports)) + LOG.debug("Migration port %s" % (mig_ports)) mig_cmds = {} for mig_port, vm_name in list(zip(mig_ports, vms_name)): mig_dst_cmd = "nc -l %s %s" % (dsthost, mig_port) @@ -1137,7 +1138,7 @@ def migrate_wait(self, vms_name, srchost, dsthost, start_work=None, mig_params["migration_exec_cmd_dst_%s" % (vm_name)] = ( mig_dst_cmd % mig_fs[dsthost][vm_name]) - logging.debug("Exec commands %s", mig_cmds) + LOG.debug("Exec commands %s", mig_cmds) super_cls = super(MultihostMigrationExec, self) super_cls.migrate_wait(vms_name, srchost, dsthost, @@ -1193,7 +1194,7 @@ def mig_wrapper(vm, cancel_delay, dsthost, vm_ports, vm_ports, not_wait_for_migration, None, mig_data) - logging.info("Start migrating now...") + LOG.info("Start migrating now...") # Use of RDMA during migration requires pinning and registering memory # with the hardware. enable_rdma_pin_all = mig_data.params.get("enable_rdma_pin_all", @@ -1285,7 +1286,7 @@ def run_pre_sub_test(self): vm = self.env.get_vm(self.params["main_vm"]) vm.wait_for_login(timeout=self.login_timeout) error.context("Run sub test '%s' before migration on src" - % self.pre_sub_test, logging.info) + % self.pre_sub_test, LOG.info) utils_test.run_virt_sub_test(self.test, self.params, self.env, self.pre_sub_test) @@ -1298,7 +1299,7 @@ def run_post_sub_test(self): if not self.is_src: if self.post_sub_test: error.context("Run sub test '%s' after migration on dst" - % self.post_sub_test, logging.info) + % self.post_sub_test, LOG.info) utils_test.run_virt_sub_test(self.test, self.params, self.env, self.post_sub_test) @@ -1328,10 +1329,10 @@ def start_worker(self): """ if self.is_src: - logging.info("Try to login guest before migration test.") + LOG.info("Try to login guest before migration test.") vm = self.env.get_vm(self.params["main_vm"]) session = vm.wait_for_login(timeout=self.login_timeout) - logging.debug("Sending command: '%s'" % self.mig_bg_command) + LOG.debug("Sending command: '%s'" % self.mig_bg_command) s, o = session.cmd_status_output(self.mig_bg_command) if s != 0: raise exceptions.TestError("Failed to run bg cmd in guest," @@ -1344,19 +1345,19 @@ def check_worker(self): """ if not self.is_src: - logging.info("Try to login guest after migration test.") + LOG.info("Try to login guest after migration test.") vm = self.env.get_vm(self.params["main_vm"]) serial_login = self.params.get("serial_login") if serial_login == "yes": session = vm.wait_for_serial_login(timeout=self.login_timeout) else: session = vm.wait_for_login(timeout=self.login_timeout) - logging.info("Check the background command in the guest.") + LOG.info("Check the background command in the guest.") s, o = session.cmd_status_output(self.mig_bg_check_command) if s: raise exceptions.TestFail("Background command not found," " Output is '%s'." % o) - logging.info("Kill the background command in the guest.") + LOG.info("Kill the background command in the guest.") session.sendline(self.mig_bg_kill_command) session.close() @@ -1374,7 +1375,7 @@ def ping_pong_migrate(self, mig_type, sync, start_work=None, while True: if self.stop_migrate: break - logging.info("ping pong migration...") + LOG.info("ping pong migration...") mig_type(self.test, self.params, self.env).migrate_wait( [self.vm], self.srchost, self.dsthost, start_work=start_work, check_work=check_work) @@ -1399,17 +1400,17 @@ def get_migration_info(self, vm): error.context("Get 'xbzrle-cache/status/setup-time/downtime/" "total-time/ram' info after migration.", - logging.info) + LOG.info) xbzrle_cache = vm.monitor.info("migrate").get("xbzrle-cache") status = vm.monitor.info("migrate").get("status") setup_time = vm.monitor.info("migrate").get("setup-time") downtime = vm.monitor.info("migrate").get("downtime") total_time = vm.monitor.info("migrate").get("total-time") ram = vm.monitor.info("migrate").get("ram") - logging.info("Migration info:\nxbzrle-cache: %s\nstatus: %s\n" - "setup-time: %s\ndowntime: %s\ntotal-time: " - "%s\nram: %s" % (xbzrle_cache, status, setup_time, - downtime, total_time, ram)) + LOG.info("Migration info:\nxbzrle-cache: %s\nstatus: %s\n" + "setup-time: %s\ndowntime: %s\ntotal-time: " + "%s\nram: %s" % (xbzrle_cache, status, setup_time, + downtime, total_time, ram)) @error.context_aware def get_migration_capability(self, index=0): @@ -1422,7 +1423,7 @@ def get_migration_capability(self, index=0): if self.is_src: for i in range(index, len(self.capabilitys)): error.context("Get capability '%s' state." - % self.capabilitys[i], logging.info) + % self.capabilitys[i], LOG.info) vm = self.env.get_vm(self.params["main_vm"]) self.state = vm.monitor.get_migrate_capability( self.capabilitys[i]) @@ -1445,7 +1446,7 @@ def set_migration_capability(self, state, capability): if self.is_src: error.context("Set '%s' state to '%s'." % (capability, state), - logging.info) + LOG.info) vm = self.env.get_vm(self.params["main_vm"]) vm.monitor.set_migrate_capability(state, capability) @@ -1458,10 +1459,10 @@ def get_migration_cache_size(self, index=0): """ if self.is_src: - error.context("Try to get cache size.", logging.info) + error.context("Try to get cache size.", LOG.info) vm = self.env.get_vm(self.params["main_vm"]) cache_size = vm.monitor.get_migrate_cache_size() - error.context("Get cache size: %s" % cache_size, logging.info) + error.context("Get cache size: %s" % cache_size, LOG.info) if cache_size != int(self.cache_size[index]): raise exceptions.TestFail( "The expected cache size: %s," @@ -1477,7 +1478,7 @@ def set_migration_cache_size(self, value): """ if self.is_src: - error.context("Set cache size to %s." % value, logging.info) + error.context("Set cache size to %s." % value, LOG.info) vm = self.env.get_vm(self.params["main_vm"]) qemu_migration.set_cache_size(vm, value) @@ -1492,7 +1493,7 @@ def get_migration_parameter(self, index=0): if self.is_src: for i in range(index, len(self.parameters)): error.context("Get parameter '%s' value." - % self.parameters[i], logging.info) + % self.parameters[i], LOG.info) vm = self.env.get_vm(self.params["main_vm"]) self.value = vm.monitor.get_migrate_parameter( self.parameters[i]) @@ -1516,7 +1517,7 @@ def set_migration_parameter(self, index=0): for i in range(index, len(self.parameters)): error.context("Set '%s' value to '%s'." % ( self.parameters[i], - self.parameters_value[i]), logging.info) + self.parameters_value[i]), LOG.info) vm = self.env.get_vm(self.params["main_vm"]) vm.monitor.set_migrate_parameter(self.parameters[i], int(self.parameters_value[i])) @@ -1530,7 +1531,7 @@ def set_migration_speed(self, value): """ if self.is_src: - error.context("Set migration speed to %s." % value, logging.info) + error.context("Set migration speed to %s." % value, LOG.info) vm = self.env.get_vm(self.params["main_vm"]) qemu_migration.set_speed(vm, "%sB" % value) @@ -1543,7 +1544,7 @@ def set_migration_downtime(self, value): """ if self.is_src: - error.context("Set downtime to %s." % value, logging.info) + error.context("Set downtime to %s." % value, LOG.info) vm = self.env.get_vm(self.params["main_vm"]) qemu_migration.set_downtime(vm, value) @@ -1554,7 +1555,7 @@ def set_migration_cancel(self): """ if self.is_src: - error.context("Cancel migration.", logging.info) + error.context("Cancel migration.", LOG.info) vm = self.env.get_vm(self.params["main_vm"]) vm.monitor.cmd("migrate_cancel") @@ -1584,20 +1585,20 @@ def clean_up(self, kill_bg_cmd, vm): """ error.context("Kill the background test by '%s' in guest" - "." % kill_bg_cmd, logging.info) + "." % kill_bg_cmd, LOG.info) session = vm.wait_for_login(timeout=self.login_timeout) if session.cmd_status(self.check_running_cmd) != 0: - logging.info("The background test in guest is finished, " - "no need to kill.") + LOG.info("The background test in guest is finished, " + "no need to kill.") else: try: s, o = session.cmd_status_output(kill_bg_cmd) - logging.info("The output after run kill command: %r" % o) + LOG.info("The output after run kill command: %r" % o) if "No such process" in o or "not found" in o \ or "no running instance" in o: if session.cmd_status(self.check_running_cmd) != 0: - logging.info("The background test in guest is " - "finished before kill it.") + LOG.info("The background test in guest is " + "finished before kill it.") elif s: raise exceptions.TestFail("Failed to kill the background" " test in guest.") @@ -1611,10 +1612,10 @@ def start_stress(self): start stress test on src before migration """ - logging.info("Try to login guest before migration test.") + LOG.info("Try to login guest before migration test.") vm = self.env.get_vm(self.params["main_vm"]) session = vm.wait_for_login(timeout=self.login_timeout) - error.context("Do stress test before migration.", logging.info) + error.context("Do stress test before migration.", LOG.info) bg = utils_misc.InterruptedThread( utils_test.run_virt_sub_test, args=(self.test, self.params, self.env,), @@ -1649,7 +1650,7 @@ def install_stressapptest(self): stressapptest_insatll_cmd = \ self.params.get("stressapptest_insatll_cmd", stressapptest_insatll_cmd) - error.context("Install stressapptest.", logging.info) + error.context("Install stressapptest.", LOG.info) s, o = session.cmd_status_output(stressapptest_insatll_cmd) session.close() if s: diff --git a/virttest/utils_time.py b/virttest/utils_time.py index 12726e7bdb..19ec99baba 100644 --- a/virttest/utils_time.py +++ b/virttest/utils_time.py @@ -12,6 +12,8 @@ # so get explicit command 'grep' with path grep_binary = path.find_command("grep") +LOG = logging.getLogger('avocado.' + __name__) + @error_context.context_aware def get_host_timezone(): @@ -20,7 +22,7 @@ def get_host_timezone(): """ timezone_cmd = 'timedatectl | %s "Time zone"' % grep_binary timezone_pattern = '^(?:\s+Time zone:\s)(\w+\/\S+|UTC)(?:\s\(\S+,\s)([+|-]\d{4})\)$' - error_context.context("Get host's timezone", logging.info) + error_context.context("Get host's timezone", LOG.info) host_timezone = process.run(timezone_cmd, timeout=240, shell=True).stdout_text try: host_timezone_set = re.match(timezone_pattern, host_timezone).groups() @@ -37,7 +39,7 @@ def verify_timezone_linux(session): :param session: VM session """ - error_context.context("Verify guest's timezone", logging.info) + error_context.context("Verify guest's timezone", LOG.info) timezone_cmd = 'timedatectl | %s "Time zone"' % grep_binary timezone_pattern = '(?:\s+Time zone:\s)(\w+\/\S+|UTC)(?:\s\(\S+,\s)([+|-]\d{4})\)' guest_timezone = session.cmd_output_safe(timezone_cmd, timeout=240) @@ -57,7 +59,7 @@ def sync_timezone_linux(vm, login_timeout=360): :param login_timeout: Time (seconds) to keep trying to log in. """ session = vm.wait_for_login(timeout=login_timeout, serial=True) - error_context.context("Sync guest's timezone", logging.info) + error_context.context("Sync guest's timezone", LOG.info) set_timezone_cmd = "timedatectl set-timezone %s" if not verify_timezone_linux(session): host_timezone_city = get_host_timezone()['timezone_city'] @@ -107,7 +109,7 @@ def get_timezone_name(timezone_code): return value[1] return None - error_context.context("Verify guest's timezone", logging.info) + error_context.context("Verify guest's timezone", LOG.info) timezone_cmd = 'tzutil /g' host_timezone_code = get_host_timezone()['timezone_code'] # Workaround to handle two line prompts in serial session @@ -130,12 +132,12 @@ def sync_timezone_win(vm, login_timeout=360): (ver_result, output) = verify_timezone_win(session) if ver_result is not True: - error_context.context("Sync guest's timezone.", logging.info) + error_context.context("Sync guest's timezone.", LOG.info) session.cmd(set_timezone_cmd % output) vm_params = vm.params - error_context.context("Shutdown guest...", logging.info) + error_context.context("Shutdown guest...", LOG.info) vm.destroy() - error_context.context("Boot guest...", logging.info) + error_context.context("Boot guest...", LOG.info) vm.create(params=vm_params) vm.verify_alive() session = vm.wait_for_login(serial=True) @@ -162,7 +164,7 @@ def execute(cmd, timeout=360, session=None): else: ret = process.getoutput(cmd) target = 'guest' if session else 'host' - logging.debug("(%s) Execute command('%s')" % (target, cmd)) + LOG.debug("(%s) Execute command('%s')" % (target, cmd)) return ret @@ -173,7 +175,7 @@ def verify_clocksource(expected, session=None): :param expected: Expected clocksource :param session: VM session """ - error_context.context("Check the current clocksource", logging.info) + error_context.context("Check the current clocksource", LOG.info) cmd = "cat /sys/devices/system/clocksource/" cmd += "clocksource0/current_clocksource" return expected in execute(cmd, session=session) @@ -185,7 +187,7 @@ def sync_time_with_ntp(session=None): Sync guest or host time with ntp server :param session: VM session or None """ - error_context.context("Sync time from ntp server", logging.info) + error_context.context("Sync time from ntp server", LOG.info) cmd = "ntpdate clock.redhat.com; hwclock -w" return execute(cmd, session) @@ -205,7 +207,7 @@ def update_clksrc(vm, clksrc=None): error_context.context("Update guest kernel cli to '%s'" % (clksrc or "kvm-clock"), - logging.info) + LOG.info) if clksrc: boot_option_added = "clocksource=%s" % clksrc utils_test.update_boot_option(vm, args_added=boot_option_added) diff --git a/virttest/utils_v2v.py b/virttest/utils_v2v.py index c7da582402..dd6d1e6c63 100644 --- a/virttest/utils_v2v.py +++ b/virttest/utils_v2v.py @@ -43,6 +43,8 @@ except path.CmdNotFoundError: V2V_EXEC = None +LOG = logging.getLogger('avocado.' + __name__) + class Uri(object): @@ -133,7 +135,7 @@ def cleanup_authorized_keys(self): for session, key, server_type in self.authorized_keys: if not session or not key: continue - logging.debug( + LOG.debug( "session=%s key=%s server_type=%s", session, key, @@ -145,7 +147,7 @@ def cleanup_authorized_keys(self): # session, although it could not happen in general. for session, _, _ in self.authorized_keys: if session: - logging.debug("closed session = %s", session) + LOG.debug("closed session = %s", session) session.close() def get_cmd_options(self, params): @@ -210,14 +212,14 @@ def _compose_vmx_filename(): self._vmx_filename_fullpath = vmxfiles[0] self._vmx_filename = os.path.basename(vmxfiles[0]) - logging.debug( + LOG.debug( 'vmx file full path is %s' % self._vmx_filename_fullpath) else: # This only works for -i vmx -it ssh, because it only needs an vmx filename, # and doesn't have to mount the nfs storage. If the guessed name is wrong, # v2v will report an error. - logging.info( + LOG.info( 'vmx_nfs_src is not set in cfg file, try to guess vmx filename') # some guest's directory name ends with '_1', # e.g. esx5.5-win10-x86_64_1/esx5.5-win10-x86_64.vmx @@ -237,7 +239,7 @@ def _compose_vmx_filename(): self._vmx_filename = self._nfspath self._vmx_filename = self._vmx_filename + '.vmx' - logging.debug( + LOG.debug( 'Guessed vmx file name is %s' % self._vmx_filename) @@ -255,7 +257,7 @@ def _compose_input_transport_options(): self.vddk_libdir): # Invalid nfs mount source if no ':' if self.vddk_libdir_src is None or ':' not in self.vddk_libdir_src: - logging.error( + LOG.error( 'Neither vddk_libdir nor vddk_libdir_src was set') raise exceptions.TestError( "VDDK library directory or NFS mount point must be set") @@ -275,7 +277,7 @@ def _compose_input_transport_options(): mount_point = v2v_mount( self.vddk_libdir_src, 'vddk_libdir') - logging.info('Preparing vddklib on local server') + LOG.info('Preparing vddklib on local server') if os.path.exists(vddk_lib_rootdir): if os.path.exists(vddk_libdir): os.unlink(vddk_libdir) @@ -306,7 +308,7 @@ def _compose_input_transport_options(): shutil.copytree(mount_point, vddk_lib) os.symlink(vddk_lib, vddk_libdir, True) - logging.info('vddklib on local server is %s', vddk_lib) + LOG.info('vddklib on local server is %s', vddk_lib) self.vddk_libdir = vddk_libdir utils_misc.umount(self.vddk_libdir_src, mount_point, None) @@ -358,14 +360,14 @@ def _compose_input_transport_options(): # Just warning invalid values in case for negative testing if len(mac_i_list) != 3 or mac_i_list[1] not in [ 'bridge', 'network']: - logging.warning( + LOG.warning( "Invalid value for --mac '%s'" % mac_i_list) mac, net_type, netname = mac_i_list self.net_vm_opts += " --mac %s:%s:%s" % ( mac, net_type, netname) else: - logging.info("auto set --mac option") + LOG.info("auto set --mac option") for mac, _ in self._iface_list: # Randomly cover both 'bridge' and 'network' even thought there is no # difference. @@ -381,7 +383,7 @@ def _compose_input_transport_options(): if not self.net_vm_opts: if supported_mac: - logging.warning("auto set --mac failed, roll back to -b/-n") + LOG.warning("auto set --mac failed, roll back to -b/-n") if self.bridge: self.net_vm_opts += " -b %s" % self.bridge if self.network: @@ -422,7 +424,7 @@ def _get_os_directory(self): # Pass the json directory to testcase for checking self.params.get('params').update({'os_directory': self.os_directory}) - logging.debug( + LOG.debug( 'The os directory(-os DIRECTORY) is %s.', self.os_directory) return self.os_directory @@ -547,7 +549,7 @@ def __init__(self, test, params, env): self.session = None if self.name is None: - logging.error("vm name not exist") + LOG.error("vm name not exist") # libvirt is a default target if self.target == "libvirt" or self.target is None: @@ -562,20 +564,20 @@ def __init__(self, test, params, env): def create_session(self, timeout=480): if self.session: - logging.debug('vm session %s exists', self.session) + LOG.debug('vm session %s exists', self.session) return self.session = self.vm.wait_for_login(nic_index=self.nic_index, timeout=timeout, username=self.username, password=self.password) - logging.debug('A new vm session %s was created', self.session) + LOG.debug('A new vm session %s was created', self.session) def cleanup(self): """ Cleanup VM and remove all of storage files about guest """ if self.session: - logging.debug('vm session %s is closing', self.session) + LOG.debug('vm session %s is closing', self.session) self.session.close() self.session = None @@ -593,7 +595,7 @@ def cleanup(self): self.vm.undefine() if self.target == "ovirt": - logging.debug("Deleting VM %s in Ovirt", self.name) + LOG.debug("Deleting VM %s in Ovirt", self.name) self.vm.delete() # When vm is deleted, the disk will also be removed from # data domain, so it's not necessary to delete disk from @@ -627,8 +629,8 @@ def run_cmd(self, cmd, debug=True): raise exceptions.TestError("Incorrect cmd: %s" % cmd) if debug: - logging.debug("Command return status: %s", status) - logging.debug("Command output:\n%s", output) + LOG.debug("Command return status: %s", status) + LOG.debug("Command output:\n%s", output) return status, output @@ -660,7 +662,7 @@ def get_vm_os_info(self): else: os_info = re.search(r'PRETTY_NAME="(.+)"', output).group(1) except Exception as e: - logging.error("Fail to get os distribution: %s", e) + LOG.error("Fail to get os distribution: %s", e) return os_info def get_vm_os_vendor(self): @@ -680,8 +682,8 @@ def get_vm_os_vendor(self): vendor = 'Debian' else: vendor = 'Unknown' - logging.debug("The os vendor of VM '%s' is: %s" % - (self.vm.name, vendor)) + LOG.debug("The os vendor of VM '%s' is: %s" % + (self.vm.name, vendor)) return vendor def get_vm_dmesg(self): @@ -801,7 +803,7 @@ def vm_xorg_search(self, substr): xorg_log_chk, break_if_found) get_xorg_logs = "for i in $(%s);do found=false; %s done" % ( extract_normal_users, xorg_logs_loop) - logging.debug("Get xorg logs shell script:\n%s", get_xorg_logs) + LOG.debug("Get xorg logs shell script:\n%s", get_xorg_logs) # The first element is a malformed get_xorg_logs string, it # should be removed. @@ -810,9 +812,9 @@ def vm_xorg_search(self, substr): if len(xorg_files) > 0: xorg_file_list.extend(xorg_files[1:]) else: - logging.debug("Get UID_MIN failed: %s", uid_min) + LOG.debug("Get UID_MIN failed: %s", uid_min) - logging.debug("xorg files: %s", xorg_file_list) + LOG.debug("xorg files: %s", xorg_file_list) for file_i in xorg_file_list: cmd = 'grep -i "%s" "%s"' % (substr, file_i) if self.run_cmd(cmd)[0] == 0: @@ -874,7 +876,7 @@ def is_net_virtio(self): if re.search("virtio", output.split('/')[-1]): return True except IndexError: - logging.error("Fail to find virtio driver") + LOG.error("Fail to find virtio driver") return False def is_disk_virtio(self): @@ -997,12 +999,12 @@ def wait_for_match(self, images, similar_degree=0.98, timeout=300): ppm_utils.image_crop_save(vm_screenshot, vm_screenshot) img_index = 0 for image in images: - logging.debug("Compare vm screenshot with image %s", image) + LOG.debug("Compare vm screenshot with image %s", image) ppm_utils.image_crop_save(image, cropped_image) h_degree = ppm_utils.image_histogram_compare(cropped_image, vm_screenshot) if h_degree >= similar_degree: - logging.debug("Image %s matched", image) + LOG.debug("Image %s matched", image) image_matched = True break img_index += 1 @@ -1023,7 +1025,7 @@ def boot_windows(self, timeout=300): Click buttons to activate windows and install ethernet controller driver to boot windows. """ - logging.info("Booting Windows in %s seconds", timeout) + LOG.info("Booting Windows in %s seconds", timeout) compare_screenshot_vms = ["win2003"] timeout_msg = "No matching screenshots found after %s seconds" % timeout timeout_msg += ", trying to log into the VM directly" @@ -1034,14 +1036,14 @@ def boot_windows(self, timeout=300): for image_name in image_name_list: match_image = os.path.join(data_dir.get_data_dir(), image_name) if not os.path.exists(match_image): - logging.error( + LOG.error( "Screenshot '%s' does not exist", match_image) return match_image_list.append(match_image) img_match_ret = self.wait_for_match(match_image_list, timeout=timeout) if img_match_ret < 0: - logging.error(timeout_msg) + LOG.error(timeout_msg) else: if self.os_version == "win2003": if img_match_ret == 0: @@ -1076,8 +1078,8 @@ def boot_windows(self, timeout=300): self.click_install_driver() else: # No need sendkey/click button for any os except Win2003 - logging.info("%s is booting up without program intervention", - self.os_version) + LOG.info("%s is booting up without program intervention", + self.os_version) def reboot_windows(self): """ @@ -1114,19 +1116,19 @@ def get_driver_info(self, signed=True): # Try 5 times to get driver info output, count = '', 5 while count > 0: - logging.debug('%d times remaining for getting driver info' % count) + LOG.debug('%d times remaining for getting driver info' % count) try: # Clean up output self.session.cmd('cls') output = self.session.cmd_output(cmd) except Exception as detail: - logging.error(detail) + LOG.error(detail) count -= 1 else: break if not output: - logging.error('Fail to get driver info') - logging.debug("Command output:\n%s", output) + LOG.error('Fail to get driver info') + LOG.debug("Command output:\n%s", output) return output def get_windows_event_info(self): @@ -1261,7 +1263,7 @@ def _v2v_post_cmd(): global_params = params.get('params', {}) if not global_params: # For the back compatibility reason, only report a warning message - logging.warning( + LOG.warning( "The global params in run() need to be passed into v2v_cmd as an" "item of params, like {'params': params}. " "If not, some latest functions may not work as expected.") @@ -1405,7 +1407,7 @@ def cmd_run(cmd, obj_be_cleaned=None, auto_clean=True, timeout=18000): ignore_status=True) finally: if auto_clean and obj_be_cleaned: - logging.debug('Running cleanup for %s', obj_be_cleaned) + LOG.debug('Running cleanup for %s', obj_be_cleaned) if isinstance(obj_be_cleaned, list): for obj in obj_be_cleaned: obj.cleanup() @@ -1428,17 +1430,17 @@ def import_vm_to_ovirt(params, address_cache, timeout=600): output_method = params.get('output_method') # Check oVirt status dc = ovirt.DataCenterManager(params) - logging.info("Current data centers list: %s", dc.list()) + LOG.info("Current data centers list: %s", dc.list()) cm = ovirt.ClusterManager(params) - logging.info("Current cluster list: %s", cm.list()) + LOG.info("Current cluster list: %s", cm.list()) hm = ovirt.HostManager(params) - logging.info("Current host list: %s", hm.list()) + LOG.info("Current host list: %s", hm.list()) sdm = ovirt.StorageDomainManager(params) - logging.info("Current storage domain list: %s", sdm.list()) + LOG.info("Current storage domain list: %s", sdm.list()) vm = ovirt.VMManager(vm_name, params, address_cache=address_cache) - logging.info("Current VM list: %s", vm.list()) + LOG.info("Current VM list: %s", vm.list()) if vm_name in vm.list() and output_method != 'rhv_upload': - logging.error("%s already exist", vm_name) + LOG.error("%s already exist", vm_name) return False wait_for_up = True if os_type == 'windows': @@ -1453,22 +1455,22 @@ def import_vm_to_ovirt(params, address_cache, timeout=600): storage_name, cluster_name, timeout=timeout) - logging.info("The latest VM list: %s", vm.list()) + LOG.info("The latest VM list: %s", vm.list()) except Exception as e: # Try to delete the vm from export domain vm.delete_from_export_domain(export_name) - logging.error("Import %s failed: %s", vm.name, e) + LOG.error("Import %s failed: %s", vm.name, e) return False try: if not is_option_in_v2v_cmd(v2v_cmd, '--no-copy'): # Start VM vm.start(wait_for_up=wait_for_up) else: - logging.debug( + LOG.debug( 'Skip starting VM: --no-copy is in cmdline:\n%s', v2v_cmd) except Exception as e: - logging.error("Start %s failed: %s", vm.name, e) + LOG.error("Start %s failed: %s", vm.name, e) vm.delete() if output_method != 'rhv_upload': vm.delete_from_export_domain(export_name) @@ -1490,15 +1492,15 @@ def _check_log(pattern_list, expect=True): for pattern in pattern_list: line = r'\s*'.join(pattern.split()) expected = 'expected' if expect else 'not expected' - logging.info('Searching for %s log: %s' % (expected, pattern)) + LOG.info('Searching for %s log: %s' % (expected, pattern)) compiled_pattern = re.compile(line, flags=re.S) search = re.search(compiled_pattern, log) if search: - logging.info('Found log: %s', search.group(0)) + LOG.info('Found log: %s', search.group(0)) if not expect: return False else: - logging.info('Not find log: %s', pattern) + LOG.info('Not find log: %s', pattern) if expect: return False return True @@ -1506,13 +1508,13 @@ def _check_log(pattern_list, expect=True): expect_msg = params.get('expect_msg') ret = '' if not expect_msg: - logging.info('No need to check v2v log') + LOG.info('No need to check v2v log') else: expect = expect_msg == 'yes' if params.get('msg_content'): msg_list = params['msg_content'].split('%') if _check_log(msg_list, expect=expect): - logging.info('Finish checking v2v log') + LOG.info('Finish checking v2v log') else: ret = 'Check v2v log failed' else: @@ -1575,7 +1577,7 @@ def get_vddk_thumbprint(host, password, uri_type, prompt=r"[\#\$\[\]]"): prompt=prompt, preferred_authenticaton='password,keyboard-interactive') cmdresult = r_runner.run(cmd) - logging.debug("vddk thumbprint:\n%s", cmdresult.stdout) + LOG.debug("vddk thumbprint:\n%s", cmdresult.stdout) vddk_thumbprint = cmdresult.stdout.strip().split('=')[1] return vddk_thumbprint @@ -1608,8 +1610,8 @@ def v2v_setup_ssh_key( :return: A tuple (public_key, session) will always be returned """ session = None - logging.debug('Performing SSH key setup on %s:%d as %s.' % - (hostname, port, username)) + LOG.debug('Performing SSH key setup on %s:%d as %s.' % + (hostname, port, username)) try: # Both Xen and ESX can work with following settings. if not preferred_authenticaton: @@ -1649,7 +1651,7 @@ def v2v_setup_ssh_key( session.cmd("echo '%s' >> ~/.ssh/authorized_keys; " % public_key) session.cmd('chmod 600 ~/.ssh/authorized_keys') - logging.debug('SSH key setup complete, session is %s', session) + LOG.debug('SSH key setup complete, session is %s', session) return public_key, session except Exception as err: @@ -1658,7 +1660,7 @@ def v2v_setup_ssh_key( raise exceptions.TestFail("SSH key setup failed: '%s'" % err) finally: if auto_close and session: - logging.debug('cleaning session: %s', session) + LOG.debug('cleaning session: %s', session) session.close() @@ -1687,7 +1689,7 @@ def v2v_setup_ssh_key_cleanup(session=None, key=None, server_type=None): session.cmd(cmd) finally: if session: - logging.debug('cleaning session: %s', session) + LOG.debug('cleaning session: %s', session) session.close() @@ -1747,7 +1749,7 @@ def create_virsh_instance( :param remote_pwd: Password to use, or None for host/pubkey :param debug: Whether to enable debug """ - logging.debug( + LOG.debug( "virsh connection info: hypervisor=%s uri=%s ip=%s", hypervisor, uri, @@ -1762,7 +1764,7 @@ def create_virsh_instance( 'auto_close': True, 'debug': debug} v2v_virsh = wait_for(virsh.VirshPersistent, **virsh_dargs) - logging.debug('A new virsh persistent session %s was created', v2v_virsh) + LOG.debug('A new virsh persistent session %s was created', v2v_virsh) return v2v_virsh @@ -1773,7 +1775,7 @@ def close_virsh_instance(virsh_instance=None): :param v2v_virsh_instance: a virsh instance """ - logging.debug('Closing session (%s) in VT', virsh_instance) + LOG.debug('Closing session (%s) in VT', virsh_instance) if virsh_instance and hasattr(virsh_instance, 'close_session'): virsh_instance.close_session() @@ -1799,7 +1801,7 @@ def get_all_ifaces_info(vm_name, virsh_instance): for mac, iface in interfaces.items(): vm_ifaces.append((mac, iface.get('type'))) - logging.debug("Iface information for vm %s: %s", vm_name, vm_ifaces) + LOG.debug("Iface information for vm %s: %s", vm_name, vm_ifaces) return vm_ifaces @@ -1853,7 +1855,7 @@ def _parse_file_info(path): disks_info[file_info[0]][file_info[1]] = [] disks_info[file_info[0]][file_info[1]].append(file_info[2]) - logging.debug("source disk info vm %s: %s", vm_name, disks_info) + LOG.debug("source disk info vm %s: %s", vm_name, disks_info) return disks_info @@ -1907,7 +1909,7 @@ def wait_for(func, timeout=300, interval=10, *args, **kwargs): count += 1 time.sleep(interval) - logging.debug("Tried %s times", count) + LOG.debug("Tried %s times", count) # Run once more, raise exception or success return func(*args, **kwargs) @@ -2077,20 +2079,20 @@ def handle_prompts(session, timeout=300, interval=1.0): match, _ = session.read_until_last_line_matches( LAST_LINE_PROMPTS, timeout=timeout, internal_timeout=0.5) if match in [0, 1]: # "username:" - logging.debug( + LOG.debug( "Got username prompt; sending '%s'", username) session.sendline(username) elif match in [2, 3, 4, 5]: - logging.debug( + LOG.debug( "Got password prompt, sending '%s'", asterisk_passwd(password)) session.sendline(password) elif match == 6: # Wait for custom input - logging.debug( + LOG.debug( "Got console '%s', send input list %s", match, choices) session.sendline(choices) elif match == 7: # LUKS password - logging.debug( + LOG.debug( "Got password prompt, sending '%s'", asterisk_passwd(luks_password)) session.sendline(luks_password) @@ -2102,7 +2104,7 @@ def handle_prompts(session, timeout=300, interval=1.0): # when free_running is true and timeout happens, it means # the command doesn't have any response, may be dead or # performance is quite poor. - logging.debug("timeout happens") + LOG.debug("timeout happens") if free_running: raise @@ -2115,7 +2117,7 @@ def handle_prompts(session, timeout=300, interval=1.0): if nonempty_lines: new_last_line = nonempty_lines[-1] if last_line and last_line == new_last_line: - logging.debug( + LOG.debug( 'v2v command may be dead or have bad performance') raise last_line = new_last_line @@ -2123,7 +2125,7 @@ def handle_prompts(session, timeout=300, interval=1.0): # Set a big timeout value when interaction finishes for pattern in FREE_RUNNING_PROMPTS: if re.search(pattern, cont): - logging.debug( + LOG.debug( "interaction finished and begin running freely") free_running = True timeout = running_timeout @@ -2131,7 +2133,7 @@ def handle_prompts(session, timeout=300, interval=1.0): try: subproc = aexpect.Expect(*args, **kwargs) - logging.debug('Running command: %s', subproc.command) + LOG.debug('Running command: %s', subproc.command) handle_prompts(subproc, timeout) except aexpect.ExpectProcessTerminatedError: # v2v cmd is dead or finished @@ -2142,7 +2144,7 @@ def handle_prompts(session, timeout=300, interval=1.0): subproc.sendcontrol('c') raise finally: - logging.debug( + LOG.debug( "Command '%s' finished with status %s", subproc.command, subproc.get_status()) diff --git a/virttest/utils_virtio_port.py b/virttest/utils_virtio_port.py index 7c64476d75..42f704c865 100644 --- a/virttest/utils_virtio_port.py +++ b/virttest/utils_virtio_port.py @@ -6,6 +6,8 @@ from . import error_context from . import qemu_virtio_port +LOG = logging.getLogger('avocado.' + __name__) + class VirtioPortTest(object): @@ -64,7 +66,7 @@ def get_vm_with_ports(self, no_consoles=0, no_serialports=0, spread=None, out += "consoles(%d), " % no_consoles if spread != _spread: out += "spread(%s), " % spread - logging.warning(out[:-2] + ". Modify config to speedup tests.") + LOG.warning(out[:-2] + ". Modify config to speedup tests.") params['serials'] = params.objects('serials')[0] if spread: @@ -83,9 +85,9 @@ def get_vm_with_ports(self, no_consoles=0, no_serialports=0, spread=None, params['serial_type_%s' % name] = "virtserialport" if quiet: - logging.debug("Recreating VM with more virtio ports.") + LOG.debug("Recreating VM with more virtio ports.") else: - logging.warning("Recreating VM with more virtio ports.") + LOG.warning("Recreating VM with more virtio ports.") env_process.preprocess_vm(self.test, params, self.env, main_vm) vm = self.env.get_vm(main_vm) diff --git a/virttest/utils_windows/virtio_win.py b/virttest/utils_windows/virtio_win.py index 03eb1796ab..5993908295 100644 --- a/virttest/utils_windows/virtio_win.py +++ b/virttest/utils_windows/virtio_win.py @@ -13,6 +13,8 @@ ARCH_MAP_ISO = {"32-bit": "x86", "64-bit": "amd64"} ARCH_MAP_VFD = {"32-bit": "i386", "64-bit": "amd64"} +LOG = logging.getLogger('avocado.' + __name__) + def arch_dirname_iso(session): """ @@ -133,7 +135,7 @@ def _get_netkvmco_path(session): find_cmd = 'dir /b /s %s\\netkvmco.dll | findstr "\\%s\\\\"' find_cmd %= (viowin_ltr, middle_path) netkvmco_path = session.cmd(find_cmd).strip() - logging.info("Found netkvmco.dll file at %s" % netkvmco_path) + LOG.info("Found netkvmco.dll file at %s" % netkvmco_path) return netkvmco_path @@ -143,7 +145,7 @@ def prepare_netkvmco(vm): param vm: the target vm """ - logging.info("Prepare the netkvmco.dll") + LOG.info("Prepare the netkvmco.dll") session = vm.wait_for_login(timeout=360) try: netkvmco_path = _get_netkvmco_path(session) diff --git a/virttest/video_maker.py b/virttest/video_maker.py index f482a9fb9d..7c72ee8896 100644 --- a/virttest/video_maker.py +++ b/virttest/video_maker.py @@ -56,6 +56,8 @@ CONTAINER_PREFERENCE = ['ogg', 'webm'] ENCODER_PREFERENCE = ['theora', 'vp8'] +LOG = logging.getLogger('avocado.' + __name__) + class EncodingError(Exception): @@ -130,7 +132,7 @@ def normalize_images(self, input_dir): image_size[1] = 480 if self.verbose: - logging.debug('Normalizing image files to size: %s' % (image_size,)) + LOG.debug('Normalizing image files to size: %s' % (image_size,)) image_files = glob.glob(os.path.join(input_dir, '*.jpg')) for f in image_files: i = PIL.Image.open(f) @@ -182,14 +184,14 @@ def encode(self, input_dir, output_file): no_files = len(file_list) if no_files == 0: if self.verbose: - logging.debug("Number of files to encode as video is zero") + LOG.debug("Number of files to encode as video is zero") return index_list = [int(path[-8:-4]) for path in file_list] index_list.sort() if self.verbose: - logging.debug('Number of files to encode as video: %s' % no_files) + LOG.debug('Number of files to encode as video: %s' % no_files) # Define the gstreamer pipeline pipeline = Gst.Pipeline() @@ -247,7 +249,7 @@ def encode(self, input_dir, output_file): if t == Gst.MessageType.EOS: pipeline.set_state(Gst.State.NULL) if self.verbose: - logging.debug("Video %s encoded successfully" % output_file) + LOG.debug("Video %s encoded successfully" % output_file) break elif t == Gst.MessageType.ERROR: err, debug = msg.parse_error() @@ -319,7 +321,7 @@ def normalize_images(self, input_dir): image_size = (800, 600) if self.verbose: - logging.debug('Normalizing image files to size: %s', image_size) + LOG.debug('Normalizing image files to size: %s', image_size) image_files = glob.glob(os.path.join(input_dir, '*.jpg')) for f in image_files: i = PIL.Image.open(f) @@ -359,7 +361,7 @@ def get_element(self, name): Makes and returns and element from the gst factory interface """ if self.verbose: - logging.debug('GStreamer element requested: %s', name) + LOG.debug('GStreamer element requested: %s', name) return gst.element_factory_make(name, name) def encode(self, input_dir, output_file): @@ -371,21 +373,21 @@ def encode(self, input_dir, output_file): no_files = len(file_list) if no_files == 0: if self.verbose: - logging.debug("Number of files to encode as video is zero") + LOG.debug("Number of files to encode as video is zero") return index_list = [] for ifile in file_list: index_list.append(int(re.findall(r"/+.*/(\d{4})\.jpg", ifile)[0])) index_list.sort() if self.verbose: - logging.debug('Number of files to encode as video: %s', no_files) + LOG.debug('Number of files to encode as video: %s', no_files) pipeline = gst.Pipeline("pipeline") source = self.get_element("multifilesrc") source_location = os.path.join(input_dir, "%04d.jpg") if self.verbose: - logging.debug("Source location: %s", source_location) + LOG.debug("Source location: %s", source_location) source.set_property('location', source_location) source.set_property('index', index_list[0]) source_caps = gst.Caps() @@ -424,8 +426,8 @@ def encode(self, input_dir, output_file): while True: if source.get_property('index') <= no_files: if self.verbose: - logging.debug("Currently processing image number: %s", - source.get_property('index')) + LOG.debug("Currently processing image number: %s", + source.get_property('index')) time.sleep(1) else: break diff --git a/virttest/virsh.py b/virttest/virsh.py index 8eef2ebb0a..afbdfc506f 100644 --- a/virttest/virsh.py +++ b/virttest/virsh.py @@ -48,6 +48,8 @@ from virttest import data_dir +LOG = logging.getLogger('avocado.' + __name__) + # list of symbol names NOT to wrap as Virsh class methods # Everything else from globals() will become a method of Virsh class NOCLOSE = list(globals().keys()) + [ @@ -195,8 +197,8 @@ def __init__(self, virsh_exec=None, uri=None, a_id=None, # fail if libvirtd is not running if check_libvirtd: if self.cmd_status('list', timeout=60) != 0: - logging.debug("Persistent virsh session is not responding, " - "libvirtd may be dead.") + LOG.debug("Persistent virsh session is not responding, " + "libvirtd may be dead.") self.auto_close = True raise aexpect.ShellStatusError(virsh_exec, 'list') @@ -243,7 +245,7 @@ def cmd_result(self, cmd, ignore_status=False, debug=False, timeout=60): raise process.CmdError(cmd, result, "Virsh Command returned non-zero exit status") if debug: - logging.debug(result) + LOG.debug(result) return result def read_until_output_matches(self, patterns, filter_func=lambda x: x, @@ -650,7 +652,7 @@ def start_get_event(vm_name): """ virsh_session = aexpect.ShellSession(VIRSH_EXEC) event_cmd = 'event %s --all --loop' % vm_name - logging.info('Sending "%s" to virsh shell', event_cmd) + LOG.info('Sending "%s" to virsh shell', event_cmd) virsh_session.sendline(event_cmd) return virsh_session @@ -674,7 +676,7 @@ def finish_get_event(virsh_session): time.sleep(5) event_output = virsh_session.get_stripped_output() virsh_session.close() - logging.debug('Event output is %s:', event_output) + LOG.debug('Event output is %s:', event_output) return event_output @@ -700,7 +702,7 @@ def _get_arg_value(arg): def _get_event_output(session): output = session.get_stripped_output() - logging.debug(output) + LOG.debug(output) return output wait_for_event = _get_arg_value('wait_for_event') @@ -712,7 +714,7 @@ def _get_event_output(session): ret = func(*args, **kwargs) if ret and ret.exit_status: - logging.error('Command execution failed. Skip waiting for event') + LOG.error('Command execution failed. Skip waiting for event') virsh_session.close() return ret @@ -765,19 +767,19 @@ def command(cmd, **dargs): session = None if debug: - logging.debug("Running virsh command: %s", cmd) + LOG.debug("Running virsh command: %s", cmd) if timeout: try: timeout = int(timeout) except ValueError: - logging.error("Ignore the invalid timeout value: %s", timeout) + LOG.error("Ignore the invalid timeout value: %s", timeout) timeout = None if session: # Utilize persistent virsh session, not suit for readonly mode if readonly: - logging.debug("Ignore readonly flag for this virsh session") + LOG.debug("Ignore readonly flag for this virsh session") if timeout is None: timeout = 60 ret = session.cmd_result(cmd, ignore_status=ignore_status, @@ -816,9 +818,9 @@ def command(cmd, **dargs): # Always log debug info, if persistent session or not if debug: - logging.debug("status: %s", ret.exit_status) - logging.debug("stdout: %s", ret.stdout_text.strip()) - logging.debug("stderr: %s", ret.stderr_text.strip()) + LOG.debug("status: %s", ret.exit_status) + LOG.debug("stdout: %s", ret.stdout_text.strip()) + LOG.debug("stderr: %s", ret.stderr_text.strip()) # Return CmdResult instance when ignore_status is True return ret @@ -1224,10 +1226,10 @@ def screenshot(name, filename, **dargs): command("screenshot %s %s" % (name, filename), **dargs) except process.CmdError as detail: if SCREENSHOT_ERROR_COUNT < 1: - logging.error("Error taking VM %s screenshot. You might have to " - "set take_regular_screendumps=no on your " - "tests.cfg config file \n%s. This will be the " - "only logged error message.", name, detail) + LOG.error("Error taking VM %s screenshot. You might have to " + "set take_regular_screendumps=no on your " + "tests.cfg config file \n%s. This will be the " + "only logged error message.", name, detail) SCREENSHOT_ERROR_COUNT += 1 return filename @@ -1423,7 +1425,7 @@ def is_dead(name, **dargs): return True if state not in ('running', 'idle', 'paused', 'in shutdown', 'shut off', 'crashed', 'pmsuspended', 'no state'): - logging.debug("State '%s' not known", state) + LOG.debug("State '%s' not known", state) if state in ('shut off', 'crashed', 'no state'): return True return False @@ -1547,7 +1549,7 @@ def define(xml_path, options=None, **dargs): cmd = "define --file %s" % xml_path if options is not None: cmd += " %s" % options - logging.debug("Define VM from %s", xml_path) + LOG.debug("Define VM from %s", xml_path) return command(cmd, **dargs) @@ -1567,7 +1569,7 @@ def undefine(name, options=None, **dargs): if options is None or "--nvram" not in options: cmd += " --nvram" - logging.debug("Undefine VM %s", name) + LOG.debug("Undefine VM %s", name) return command(cmd, **dargs) @@ -1586,7 +1588,7 @@ def remove_domain(name, options=None, **dargs): dargs['ignore_status'] = False undefine(name, options, **dargs) except process.CmdError as detail: - logging.error("Undefine VM %s failed:\n%s", name, detail) + LOG.error("Undefine VM %s failed:\n%s", name, detail) return False return True @@ -1604,9 +1606,9 @@ def domain_exists(name, **dargs): command("domstate %s" % name, **dargs) return True except process.CmdError as detail: - logging.warning("VM %s does not exist", name) + LOG.warning("VM %s does not exist", name) if dargs.get('debug', False): - logging.warning(str(detail)) + LOG.warning(str(detail)) return False @@ -2136,8 +2138,8 @@ def _pool_type_check(pool_type): 'gluster', 'rbd', 'scsi', 'iscsi-direct'] if pool_type and pool_type not in valid_types: - logging.error("Specified pool type '%s' not in '%s'", - pool_type, valid_types) + LOG.error("Specified pool type '%s' not in '%s'", + pool_type, valid_types) pool_type = None elif not pool_type: # take the first element as default pool_type @@ -2169,7 +2171,7 @@ def pool_destroy(name, **dargs): command(cmd, **dargs) return True except process.CmdError as detail: - logging.error("Failed to destroy pool: %s.", detail) + LOG.error("Failed to destroy pool: %s.", detail) return False @@ -2198,13 +2200,13 @@ def pool_create_as(name, pool_type, target, extra="", **dargs): """ if not name: - logging.error("Please give a pool name") + LOG.error("Please give a pool name") pool_type = _pool_type_check(pool_type) if pool_type is None: return False - logging.info("Create %s type pool %s", pool_type, name) + LOG.info("Create %s type pool %s", pool_type, name) cmd = "pool-create-as --name %s --type %s --target %s %s" \ % (name, pool_type, target, extra) dargs['ignore_status'] = False @@ -2212,7 +2214,7 @@ def pool_create_as(name, pool_type, target, extra="", **dargs): command(cmd, **dargs) return True except process.CmdError as detail: - logging.error("Failed to create pool: %s.", detail) + LOG.error("Failed to create pool: %s.", detail) return False @@ -2359,7 +2361,7 @@ def pool_define_as(name, pool_type, target="", extra="", **dargs): if pool_type is None: return False - logging.debug("Try to define %s type pool %s", pool_type, name) + LOG.debug("Try to define %s type pool %s", pool_type, name) cmd = "pool-define-as --name %s --type %s %s" \ % (name, pool_type, extra) # Target is not a must @@ -2797,7 +2799,7 @@ def memtune_get(name, key): :return: the memory value of a key in Kbs """ memtune_output = memtune_list(name).stdout_text.strip() - logging.info("memtune output is %s" % memtune_output) + LOG.info("memtune output is %s" % memtune_output) memtune_value = re.findall(r"%s\s*:\s+(\S+)" % key, str(memtune_output)) if memtune_value: return int(memtune_value[0] if memtune_value[0] != "unlimited" else -1) @@ -3861,7 +3863,7 @@ def secret_define(xml_file, options=None, **dargs): cmd = "secret-define --file %s" % xml_file if options is not None: cmd += " %s" % options - logging.debug("Define secret from %s", xml_file) + LOG.debug("Define secret from %s", xml_file) return command(cmd, **dargs) @@ -3877,7 +3879,7 @@ def secret_undefine(uuid, options=None, **dargs): if options is not None: cmd += " %s" % options - logging.debug("Undefine secret %s", uuid) + LOG.debug("Undefine secret %s", uuid) return command(cmd, **dargs) @@ -3964,7 +3966,7 @@ def nodedev_create(xml_file, options=None, **dargs): if options is not None: cmd += " %s" % options - logging.debug("Create the device from %s", xml_file) + LOG.debug("Create the device from %s", xml_file) return command(cmd, **dargs) @@ -3980,7 +3982,7 @@ def nodedev_destroy(dev_name, options=None, **dargs): if options is not None: cmd += " %s" % options - logging.debug("Destroy the device %s on the node", dev_name) + LOG.debug("Destroy the device %s on the node", dev_name) return command(cmd, **dargs) diff --git a/virttest/virt_admin.py b/virttest/virt_admin.py index c3dfb67863..c2c2db3db7 100644 --- a/virttest/virt_admin.py +++ b/virttest/virt_admin.py @@ -67,6 +67,8 @@ " will not function normally") VIRTADMIN_EXEC = '/bin/true' +LOG = logging.getLogger('avocado.' + __name__) + class VirtadminBase(propcan.PropCanBase): @@ -147,7 +149,7 @@ def __init__(self, virtadmin_exec=None, uri=None, a_id=None, # Special handling if setting up a remote session if ssh_remote_auth: # remote to remote - logging.error("remote session is not supported by virt-admin yet.") + LOG.error("remote session is not supported by virt-admin yet.") if remote_pwd: pref_auth = "-o PreferredAuthentications=password" else: @@ -185,8 +187,8 @@ def __init__(self, virtadmin_exec=None, uri=None, a_id=None, # fail if libvirtd is not running if check_libvirtd: if self.cmd_status('uri', timeout=60) != 0: - logging.debug("Persistent virt-admin session is not responding, " - "libvirtd may be dead.") + LOG.debug("Persistent virt-admin session is not responding, " + "libvirtd may be dead.") self.auto_close = True raise aexpect.ShellStatusError(virtadmin_exec, 'uri') @@ -237,7 +239,7 @@ def cmd_result(self, cmd, ignore_status=False, debug=False, timeout=60): raise process.CmdError(cmd, result, "Virtadmin Command returned non-zero exit status") if debug: - logging.debug(result) + LOG.debug(result) return result def read_until_output_matches(self, patterns, filter_func=lambda x: x, @@ -650,19 +652,19 @@ def command(cmd, **dargs): session = None if debug: - logging.debug("Running virtadmin command: %s", cmd) + LOG.debug("Running virtadmin command: %s", cmd) if timeout: try: timeout = int(timeout) except ValueError: - logging.error("Ignore the invalid timeout value: %s", timeout) + LOG.error("Ignore the invalid timeout value: %s", timeout) timeout = None if session: # Utilize persistent virtadmin session, not suit for readonly mode if readonly: - logging.debug("Ignore readonly flag for this virtadmin session") + LOG.debug("Ignore readonly flag for this virtadmin session") if timeout is None: timeout = 60 ret = session.cmd_result(cmd, ignore_status=ignore_status, @@ -673,7 +675,7 @@ def command(cmd, **dargs): # Normal call to run virtadmin command # Readonly mode if readonly: - logging.error("readonly mode is not supported by virt-admin yet.") + LOG.error("readonly mode is not supported by virt-admin yet.") # cmd = " -r " + cmd if uri: @@ -697,9 +699,9 @@ def command(cmd, **dargs): # Always log debug info, if persistent session or not if debug: - logging.debug("status: %s", ret.exit_status) - logging.debug("stdout: %s", ret.stdout_text.strip()) - logging.debug("stderr: %s", ret.stderr_text.strip()) + LOG.debug("status: %s", ret.exit_status) + LOG.debug("stdout: %s", ret.stdout_text.strip()) + LOG.debug("stderr: %s", ret.stderr_text.strip()) # Return CmdResult instance when ignore_status is True return ret diff --git a/virttest/virt_vm.py b/virttest/virt_vm.py index 5396e68a30..dbb902b730 100644 --- a/virttest/virt_vm.py +++ b/virttest/virt_vm.py @@ -26,6 +26,9 @@ from virttest import vt_console +LOG = logging.getLogger('avocado.' + __name__) + + class VMError(Exception): def __init__(self, *args): @@ -183,7 +186,7 @@ class VMDeadKernelCrashError(VMError): def __init__(self, kernel_crash): VMError.__init__(self, kernel_crash) - logging.debug(kernel_crash) + LOG.debug(kernel_crash) def __str__(self): return ("VM is dead due to a kernel crash, " @@ -656,10 +659,10 @@ def needs_restart(self, name, params, basedir): need_restart = (self.make_create_command() != self.make_create_command(name, params, basedir)) except Exception: - logging.error(traceback.format_exc()) + LOG.error(traceback.format_exc()) need_restart = True if need_restart: - logging.debug( + LOG.debug( "VM params in env don't match requested, restarting.") return True else: @@ -667,14 +670,14 @@ def needs_restart(self, name, params, basedir): # TODO: Check more than just networking other_virtnet = utils_net.VirtNet(params, name, self.instance) if self.virtnet != other_virtnet: - logging.debug("VM params in env match, but network differs, " - "restarting") - logging.debug("\t" + str(self.virtnet)) - logging.debug("\t!=") - logging.debug("\t" + str(other_virtnet)) + LOG.debug("VM params in env match, but network differs, " + "restarting") + LOG.debug("\t" + str(self.virtnet)) + LOG.debug("\t!=") + LOG.debug("\t" + str(other_virtnet)) return True else: - logging.debug( + LOG.debug( "VM params in env do match requested, continuing.") return False @@ -721,8 +724,7 @@ def sosreport(self, path=None, connect_uri=None): """ log_path = None if not self.params["os_type"] == "linux": - logging.warn("sosreport not applicable for %s", - self.params["os_type"]) + LOG.warn("sosreport not applicable for %s", self.params["os_type"]) return None try: pkg = "sos" @@ -843,7 +845,7 @@ def _get_address(self, index=0, ip_version="ipv4", session=None, msg = "Could not verify DHCP lease: %s-> %s." % (mac, ip_addr) msg += " Maybe %s is not in the same subnet " % ip_addr msg += "as the host (%s in use)" % nic_backend - logging.error(msg) + LOG.error(msg) return ip_addr @@ -918,7 +920,7 @@ def _get_address(): msg = 'Found/Verified IP %s for VM %s NIC %s' % (ipaddr, self.name, nic_index) - logging.debug(msg) + LOG.debug(msg) return ipaddr # Adding/setup networking devices methods split between 'add_*' for @@ -940,7 +942,7 @@ def add_nic(self, **params): self.virtnet.append(params) nic = self.virtnet[nic_name] if 'mac' not in nic: # generate random mac - logging.debug("Generating random mac address for nic") + LOG.debug("Generating random mac address for nic") self.virtnet.generate_mac_address(nic_name) # mac of '' or invalid format results in not setting a mac if 'ip' in nic and 'mac' in nic: @@ -974,7 +976,7 @@ def get_nic_index_by_mac(self, mac): continue elif nic.mac == mac: return index - logging.warn("Not find nic by '%s'", mac) + LOG.warn("Not find nic by '%s'", mac) return -1 def verify_kernel_crash(self): @@ -991,7 +993,7 @@ def verify_kernel_crash(self): if self.serial_console: data = self.serial_console.get_output() if data is None: - logging.warn("Unable to read serial console") + LOG.warn("Unable to read serial console") return match = re.search(panic_re, data, re.DOTALL | re.MULTILINE | re.I) if match: @@ -1044,7 +1046,7 @@ def verify_illegal_instruction(self): if self.serial_console is not None: data = self.serial_console.get_output() if data is None: - logging.warn("Unable to read serial console") + LOG.warn("Unable to read serial console") return match = re.findall(r".*trap invalid opcode.*\n", data, re.MULTILINE) @@ -1182,17 +1184,17 @@ def print_guest_network_info(): txt = ["Guest network status:\n %s" % out] out = session.cmd_output("ip route || route print", timeout=60) txt += ["Guest route table:\n %s" % out] - logging.error("\n".join(txt)) + LOG.error("\n".join(txt)) except Exception as e: - logging.error("Can't get guest network status " - "information, reason: %s", e) + LOG.error("Can't get guest network status " + "information, reason: %s", e) finally: if session: session.close() error = None - logging.debug("Attempting to log into '%s' (timeout %ds)", - self.name, timeout) + LOG.debug("Attempting to log into '%s' (timeout %ds)", + self.name, timeout) start_time = time.time() try: self.wait_for_get_address(nic_index, @@ -1382,8 +1384,8 @@ def wait_for_serial_login(self, timeout=LOGIN_WAIT_TIMEOUT, eg. during reboot or pause) :return: ConsoleSession instance. """ - logging.debug("Attempting to log into '%s' via serial console " - "(timeout %ds)", self.name, timeout) + LOG.debug("Attempting to log into '%s' via serial console " + "(timeout %ds)", self.name, timeout) end_time = time.time() + timeout while time.time() < end_time: try: @@ -1402,7 +1404,7 @@ def wait_for_serial_login(self, timeout=LOGIN_WAIT_TIMEOUT, timeout) if restart_network: try: - logging.debug("Attempting to restart guest network") + LOG.debug("Attempting to restart guest network") os_type = self.params.get('os_type') utils_net.restart_guest_network(session, os_type=os_type) except (ShellError, ExpectError): diff --git a/virttest/xml_utils.py b/virttest/xml_utils.py index 276f0eb57f..bdb116d6df 100644 --- a/virttest/xml_utils.py +++ b/virttest/xml_utils.py @@ -52,6 +52,8 @@ class should be an ElementTree.TreeBuilder class or subclass. Instances EXSFX = '_exception_retained' ENCODING = "UTF-8" +LOG = logging.getLogger('avocado.' + __name__) + class TempXMLFile(object): @@ -78,7 +80,7 @@ def _info(self): """ Inform user that file was not auto-deleted due to exceptional exit. """ - logging.info("Retaining %s", self.name + EXSFX) + LOG.info("Retaining %s", self.name + EXSFX) def unlink(self): """ @@ -88,7 +90,7 @@ def unlink(self): os.unlink(self.name) self.close() except (OSError, IOError): - logging.info("unlink file fail") + LOG.info("unlink file fail") def close(self): """ @@ -97,7 +99,7 @@ def close(self): try: self.open_file.close() except IOError: - logging.info("close file fail") + LOG.info("close file fail") def seek(self, offset, whence=0): """ @@ -106,7 +108,7 @@ def seek(self, offset, whence=0): try: self.open_file.seek(offset, whence) except IOError: - logging.info("seek file fail") + LOG.info("seek file fail") def flush(self): """ @@ -115,7 +117,7 @@ def flush(self): try: self.open_file.flush() except IOError: - logging.info("flush file fail") + LOG.info("flush file fail") def truncate(self, size): """ @@ -124,7 +126,7 @@ def truncate(self, size): try: self.open_file.truncate(size) except IOError: - logging.info("truncate file fail") + LOG.info("truncate file fail") def tell(self): """ @@ -133,7 +135,7 @@ def tell(self): try: return self.open_file.tell() except IOError: - logging.info("tell file fail") + LOG.info("tell file fail") def write(self, content): """ @@ -142,7 +144,7 @@ def write(self, content): try: self.open_file.write(content) except IOError: - logging.info("write file fail") + LOG.info("write file fail") def read(self, size=None): """ @@ -154,7 +156,7 @@ def read(self, size=None): else: return self.open_file.read() except IOError: - logging.info("read file fail") + LOG.info("read file fail") def readline(self, size=None): """ @@ -166,7 +168,7 @@ def readline(self, size=None): else: return self.open_file.readline() except IOError: - logging.info("readline file fail") + LOG.info("readline file fail") def readlines(self, size=None): """ @@ -178,7 +180,7 @@ def readlines(self, size=None): else: return self.open_file.readlines() except IOError: - logging.info("readlines file fail") + LOG.info("readlines file fail") def __exit__(self, exc_type, exc_value, traceback): """ @@ -228,8 +230,8 @@ def _info(self): """ Inform user that file was not auto-deleted due to exceptional exit. """ - logging.info("Retaining backup of %s in %s", self.sourcefilename, - self.name + EXSFX) + LOG.info("Retaining backup of %s in %s", self.sourcefilename, + self.name + EXSFX) def backup(self): """