diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..1b12d31 --- /dev/null +++ b/.gitignore @@ -0,0 +1,45 @@ +# make output +/dist +# apps +src/orchestrator/orchestrator* +src/reporter/reporter* +src/collector/collector +src/collector/collector.exe +src/collector/collector_arm64 +src/rdmsr/rdmsr +src/msrbusy/msrbusy +src/calcfreq/calcfreq +# src +async-profiler +burn +cpuid +dmidecode +ethtool +fio +flamegraph +ipmitool +linux +lshw +mlc +pmu-checker +spectre-meltdown-checker +sshpass +stress-ng +sysstat +src/async-profiler-* +src/linux-* +src/cpuid-* +src/libcrypt* +src/glibc* +src/zlib* +# build output in config +collector_deps_a* +# run/debug output +src/collector/collector.log +src/collector/collector.pid +src/orchestrator/orchestrator_20* +src/reporter/debug_out/* +__debug_bin*.log +# other +src/orchestrator/targets +config/sums.md5 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..7e69232 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +Copyright (C) 2023 Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), +to deal in the Software without restriction, including without limitation +the rights to use, copy, modify, merge, publish, distribute, sublicense, +and/or sell copies of the Software, and to permit persons to whom +the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included +in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES +OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE +OR OTHER DEALINGS IN THE SOFTWARE. + +SPDX-License-Identifier: MIT \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..4ba1c6d --- /dev/null +++ b/Makefile @@ -0,0 +1,95 @@ +#!make +# +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: MIT +# +COMMIT_ID := $(shell git rev-parse --short=8 HEAD) +COMMIT_DATE := $(shell git show -s --format=%cd --date=short HEAD) +VERSION_FILE := version.txt +VERSION_NUMBER := $(shell cat ${VERSION_FILE}) +VERSION := $(VERSION_NUMBER)_$(COMMIT_DATE)_$(COMMIT_ID) +TARBALL := svr-info-$(VERSION_NUMBER).tgz + +default: apps collector-deps dist-amd64 +.PHONY: default clean test apps tools dist dist-amd64 collector-deps collector-deps-amd64 collector-deps-arm64 + +apps: + cd src && VERSION=$(VERSION) make apps + +tools: + cd src && VERSION=$(VERSION) make tools + +collector-deps-amd64: + $(eval TMPDIR := $(shell mktemp -d build.XXXXXX)) + cp src/calcfreq/calcfreq $(TMPDIR) + cp src/cpuid/cpuid $(TMPDIR) + cp src/dmidecode/dmidecode $(TMPDIR) + cp src/ethtool/ethtool $(TMPDIR) + cp src/fio/fio $(TMPDIR) + cp src/ipmitool/src/ipmitool.static $(TMPDIR)/ipmitool + cp src/lshw/src/lshw-static $(TMPDIR)/lshw + -cp src/mlc/mlc $(TMPDIR) + cp src/msrbusy/msrbusy $(TMPDIR) + cp src/linux/tools/perf/perf $(TMPDIR) + cp -R src/async-profiler $(TMPDIR) + cp src/flamegraph/stackcollapse-perf.pl $(TMPDIR) + cp src/rdmsr/rdmsr $(TMPDIR) + cp src/spectre-meltdown-checker/spectre-meltdown-checker.sh $(TMPDIR) + cp src/stress-ng/stress-ng $(TMPDIR) + cp src/sysstat/mpstat $(TMPDIR) + cp src/sysstat/iostat $(TMPDIR) + cp src/sysstat/sar $(TMPDIR) + cp src/sysstat/sadc $(TMPDIR) + cp src/linux/tools/power/x86/turbostat/turbostat $(TMPDIR) + -cp -r bin/* $(TMPDIR) + for f in $(TMPDIR)/*; do strip -s -p --strip-unneeded $$f; done + cd $(TMPDIR) && tar -czf ../config/collector_deps_amd64.tgz . + rm -rf $(TMPDIR) + +collector-deps-arm64: + $(eval TMPDIR := $(shell mktemp -d build.XXXXXX)) + cp src/spectre-meltdown-checker/spectre-meltdown-checker.sh $(TMPDIR) + cd $(TMPDIR) && tar -czf ../config/collector_deps_arm64.tgz . + rm -rf $(TMPDIR) + +collector-deps: collector-deps-amd64 collector-deps-arm64 + +dist-amd64: + rm -rf dist/svr-info + mkdir -p dist/svr-info/tools + cp src/orchestrator/orchestrator dist/svr-info/tools + cp src/collector/collector dist/svr-info/tools + cp src/collector/collector_arm64 dist/svr-info/tools + cp src/reporter/reporter dist/svr-info/tools + cp src/sshpass/sshpass dist/svr-info/tools + cp src/burn/burn dist/svr-info/tools + mkdir -p dist/svr-info/config/extras + rsync config/* dist/svr-info/config + cp LICENSE dist/svr-info + cp README dist/svr-info + cp RELEASE_NOTES dist/svr-info + cp third-party-programs.txt dist/svr-info + cp src/orchestrator/targets.example dist/svr-info + cp documentation/ServerInfoUserGuide.pdf dist/svr-info/UserGuide.pdf + cd dist/svr-info && ln -s tools/orchestrator ./svr-info + cd dist/svr-info && find . -type f -exec md5sum {} + > config/sums.md5 + cd dist && tar -czf $(TARBALL) svr-info + cd dist && md5sum $(TARBALL) > $(TARBALL).md5 + cp dist/svr-info/config/sums.md5 config/sums.md5 + rm -rf dist/svr-info/ + +dist: apps tools collector-deps dist-amd64 oss + +oss: + cd src && make oss-source + mv src/oss_source* dist/ + +clean: + cd src && make clean + rm -rf dist + +test: + rm -rf test/svr-info + cd test && tar -xf ../dist/$(TARBALL) + cd test && ./functional + rm -rf test/svr-info diff --git a/README b/README new file mode 100644 index 0000000..d411155 --- /dev/null +++ b/README @@ -0,0 +1,14 @@ +Intel® System Health Inspector (aka svr-info) is a Linux OS utility for assessing +the state and health of Intel Xeon computers. + +HOW TO REPORT SECURITY VULNERABILITIES: If you have information about a +security issue or vulnerability with Intel® System Health Inspector, please +send an e-mail to secure@intel.com. Encrypt sensitive information using our +PGP public key. + +To see program options and usage, run the program with the -h option, e.g., + ./svr-info -h + +See the included UserGuide.pdf for more details. + +Note: additional data collection tools can be used by svr-info by placing them in the config/extras directory. For example, Intel® Memory Latency Checker can be downloaded from here: https://www.intel.com/content/www/us/en/download/736633/intel-memory-latency-checker-intel-mlc.html. Once downloaded, extract the Linux executable and place in the config/extras directory. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..2d91a40 --- /dev/null +++ b/README.md @@ -0,0 +1,55 @@ +# Intel® System Health Inspector (aka svr-info) + +## Getting svr-info + +Download the latest release of svr-info from the repository's Releases page. + +## Running svr-info + +### Extract files from the archive and step into the directory + +`tar -xf svr-info.tgz` + +`cd svr-info` + +### View options + +`./svr-info -h` + +### Collect configuration data on local host + +`./svr-info` + +## Contributing + +We welcome bug reports, questions and feature requests. Please submit via Github Issues. + +## Building svr-info + +Due to the large number of build dependencies required, a Docker container-based build environment is provided. Assuming your system has Docker installed (instructions not provided here), the following steps are required to build svr-info: + +- `builder/build_docker_image` creates the docker image +- `builder/build` runs `make dist` in the container + +After a successful build, you will find the build output in the `dist` folder. + +Other builder commands available: + +- `builder/test` runs the automated tests in the container via `make test` +- `builder/shell` starts the container and provides a bash prompt useful for debugging build problems + +### Incremental Builds +After a complete build using the build container, you can perform incremental builds directly on your host assuming dependencies are installed there. This can make the code/build/test cycle much quicker than rebuilding everything using the Docker container. You can look at the Dockerfile in the builder directory to get the build dependencies for everything or, more likely, you only need go(lang) so install the latest and get to work. + +From the project's root directory, you can use the makefile. There are quite a few targets. Most useful may be `make apps`. This will build all the go-based apps. + +If you are working on a single go-based app. You can run `go build` in the app's source directory to build it. + +### Additional Collection Tools +Additional data collection tools can be built into svr-info by placing binaries in the bin directory before starting the build. For example, Intel® Memory Latency Checker is a useful tool for identifying the health and performance of a server's memory subsystem. It can be downloaded from here: https://www.intel.com/content/www/us/en/download/736633/intel-memory-latency-checker-intel-mlc.html. Once downloaded, extract the Linux executable and place in the bin directory before starting the build. + +## Architecture +There are three primary applications that make up svr-info. They are written in go and can all be run/tested independently. +1. orchestrator - runs on local host, communicates with remote targets via SSH, configures and runs the collector component on selected targets, then runs the reporter component to generate reports. Svr-info is a symbolic link to the orchestrator application. +2. collector - runs on local and/or remote targets to collect information to be fed into the reporter +3. reporter - generates reports from collector output diff --git a/RELEASE_NOTES b/RELEASE_NOTES new file mode 100644 index 0000000..72aef5b --- /dev/null +++ b/RELEASE_NOTES @@ -0,0 +1,185 @@ +RELEASE NOTES +Intel® System Health Inspector (AKA svr-info) + +Fully Supported Platforms +- Xeon Micro-Architectures: SPR,SNR,CPX,ICX,CLX,SKX,BDX,HSX +- Core Micro-Architectures: TGL,RKL,CFL,KBL,SKL,BDW,HSW +- Operating Systems: Ubuntu 16.04 and newer, CentOS 7 and newer + Note: svr-info may work on other micro-architectures and Linux distributions, but has not been thoroughly tested + +2.2.0 - First Open Source Release +Features Added +- New/changed command line arguments. + - "-all" deprecated, use "-benchmark all" + - "-profile" deprecated, use "-profile all" + - "-analyze " added + - "-cmd_timeout added for limiting the time on individual collection commands + - if in doubt, check the built-in help with the "-h" option +- Recommendations are now called Insights +- Insights expanded to include hi/low CPU utilization, system vulnerabilities, old CPU. +- Insights format now includes recommendation and justification. +- Option added to collect software call stacks for system and Java processes with -analyze system|java|all options. +- Memory profiling option added to Profile option. +- Profiling collection executed before configuration collection for more deterministic timeframe. +- Summary profile table added for profiling data. +- Benchmark Health table renamed to Benchmark Summary and moved to top of Benchmark tab. +- Virtualization feature indicator added to CPU table. +- CPU QDF, if accessible, will be collected by the -megadata data collection option. +- Accelerators table in Configuration now includes full name and description. +- Targets file now supports leading label field and trailing comment, see targets.example file. +Bugs Fixed +- Log error if no targets defined in targets file +- Support passwords that contain '$' in targets file +Known Issues +- The storage micro-benchmark may not run on CentOS due to locale settings. +- HTML report may scroll out of view when menu is minimized and window is small. +- CPU cache sizes are reported in aggregate on Ubuntu 20.04 and newer. +- DRAM population may be incorrectly reported on public cloud IaaS VMs. + +2.1.2 +Bugs Fixed: +- Correctly identify on-CPU QAT accelerator devices for SPR MCC SKUs. Previous release correctly identified these devices for SPR XCC SKUs but not MCC SKUs. + +2.1.1 +Bugs Fixed: +- HTML encode (escape) data for insertion into HTML report to limit possibility of script injection security issue + +2.1.0 +Features Added +- Added perf FlameGraph to Profile report (svr-info -profile). +- JSON report format simplified. +- CXL devices listed in Configuration report. +- PCIe slots listed in Configuration report. +- Uncore table added to Configuration report. +- SPR tunable features listed in Configuration report. +- User Guide now included in installation package. +- Improve usability of charts in HTML report. +- Include Brief report content in JSON report. +Bugs Fixed +- CPU prefetcher settings incorrectly reported +- Incorrectly reporting DDR5 DIMMs as "" +Known Issues +- The storage micro-benchmark may not run on CentOS due to locale settings. +- HTML report may scroll out of view when menu is minimized and window is small. +- CPU cache sizes are reported in aggregate on Ubuntu 20.04 and newer. +- DRAM population may be incorrectly reported on public cloud IaaS VMs. + +2.0.3 +Bugs Fixed +- properly identify SPR Architecture when stepping is 8 +- add check for missing 2022 CVEs + +2.0.2 +Bugs Fixed +- JSON report now includes all data included in HTML and XLSX reports...previously included configuration data only. +- Fix Y-Axis labels on charts in HTML report's profile tab. +- Add missing profile data to XLSX report. +- Do not include loopback NICs or Ubuntu's Snap "loop#" devices in profile data. +- Year was missing from the data field of mktg. claim table in the XLSX brief report. +- Add missing DLB accelerator count field. + +2.0.1 +Bugs Fixed +- Expand path and file arguments that contain '~' to user's home directory for cases where svr-info is not launched from a shell. +- Fix runtime error that can occur in HTML report generation when some types of PMEM are present in system. + +2.0.0 +Notable Interface Changes (from 1.x to 2.x) + - The program name has changed from svr_info to svr-info. + - The command line arguments can be accessed with a single or double dash, e.g., -cpu or --cpu. + - HTML, JSON, and XLSX report types are generated by default. Override with the -format command line argument. + - HTML and XLSX report data is presented on multiple tabs. + - Combined (all_hosts.*) HTML and XLSX reports will be generated by default when there are two or more hosts specified in the targets file. + - The JSON report data structure has changed significantly. + - The CSV report type has been removed. +Features Added + - System profiling option added (-profile). Collects system performance metrics at configurable frequency (see help). + - A rules engine is used to offer system configuration recommendations based on current configuration. Rules are configurable. + - Megadata (--megadata) feature optionally captures additional information in a series of text files. Information collected is configurable. + - Data from multiple hosts combined into one sheet in combined/multi-host Excel report + - Excel, HTML and JSON reports now rendered from same data structure for consistency across report types + - Display versions of Python, Java, and OpenSSL in report + - Temporary directories now include "svr-info" in their name to more easily identify their origin + - Full support for Sapphire Rapids - performance reference data, PMU registers, frequency measurement, accelerators + Bugs Fixed + - Display full name of CentOS release in report + - Correctly render DIMM population chart for SPR 8 socket system + - Do not attempt to resolve hostname during input parameter validation as it may fail under certain circumstances + - Passwords that include a dollar sign are now supported in targets file if dollar sign proceeded by a backslash + - If the tar utility is not on the target, display a reasonable error message +Known Issues +- The storage micro-benchmark may not run on CentOS due to locale settings. +- HTML report may scroll out of view when menu is minimized when window is small. + +1.4.0 +Features Added +- Various report additions and format improvements +- Detect Intel GPUs and report model name +- Report more ISA extensions, e.g. AMX and VAES +- Detection of QAT on platform +- Display PPIN for all installed CPUs +- Ice Lake reference performance data updated +- Sapphire Rapids (D-Step) reference performance data added +- Command line option --input will now search the given directory recursively for .out files +- Align csv/Excel report structure with HTML report +Bugs Fixed +- Can now measure core frequency on Sapphire Rapids +Known Issues +- The storage micro-benchmark may not run on CentOS 8 due to locale settings. +- svr_info will fail to run on a fresh install of CentOS 8 because tar is not installed by default +- passwords that include a dollar sign are not supported for remote target data collection + +1.3.0 +Features Added +- Support for Sapphire Rapids (no reference performance data) +- Update Ice Lake reference performance data +- Cleanup HTML report layout and add new Power section +- Improved default host data collection performance +Bugs Fixed +- Excel formatted report generation would fail when host names were longer than 32 characters +- Removed micro-benchmarks that measure network metrics due to inconsistency +Known Issues +- The storage micro-benchmark may not run on CentOS 8 due to locale settings. + +1.2.4 +Features Added +- Recognize additional CPUs for report generation + +1.2.3 +Bugs Fixed +- Update collector to fix issue with PATH on CentOS 7 + +1.2.2 +Bugs Fixed +- CSV report crash on GCP due to NIC naming scheme in lshw output + +1.2.1 +Bugs Fixed +- Error when retrieving maximum frequency from dmidecode output + +1.2.0 +Features Added +- DIMM rank now included in DIMM table +- Maximum Frequency and All-core Maximum Frequency now included in CPU details +Bugs Fixed +- Prefetcher MSRs now retrieved after installing msr kernel driver +- Base Frequency now retrieved from more reliable source +- Data collection now succeeds when PATH is exported in ~/.bashrc on target machine + +1.1.0 +Features Added +- add support for Ice Lake and Cooper Lake Xeon processors +- add support for Kaby Lake and Coffee Lake Core processors +- reports now include PMU in-use detection +- csv and xlsx reports expanded and reorganized +Bugs Fixed +- crash when output directory is set to current directory using the --output flag +- connection to remote target times out when no user_id is specified +- connection to remote target fails -- retry up to 3 times +- using --ip without --user fails to connect to remote target +- failed to collect requested information if user is 'root' +- crash on AWS EC2 g4dn.metal +- crash if ports used for network performance test are in use + +1.0.0 +- First Official Release diff --git a/bin/.README b/bin/.README new file mode 100644 index 0000000..bd0523f --- /dev/null +++ b/bin/.README @@ -0,0 +1 @@ +The build will add files placed in this directory to the collector dependencies tarball...for use when the collector is running on target. \ No newline at end of file diff --git a/builder/Dockerfile b/builder/Dockerfile new file mode 100644 index 0000000..afa9bbe --- /dev/null +++ b/builder/Dockerfile @@ -0,0 +1,46 @@ +FROM ubuntu:16.04 +VOLUME /scripts +VOLUME /workdir +ENV LANG en_US.UTF-8 +ARG DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y apt-utils locales wget curl git netcat-openbsd +RUN locale-gen en_US.UTF-8 && echo "LANG=en_US.UTF-8" > /etc/default/locale + +# Needed for Celine CI/CD +RUN apt-get update && apt-get install -y software-properties-common +RUN add-apt-repository ppa:git-core/ppa -y +RUN apt-get update && apt-get install -y jq zip unzip git + +# Install Go +RUN wget https://go.dev/dl/go1.20.linux-amd64.tar.gz +RUN tar -C /usr/local -xzf go1.20.linux-amd64.tar.gz +RUN rm go1.20.linux-amd64.tar.gz +ENV PATH="$PATH:/usr/local/go/bin" + +# Install build dependencies for third party tools +RUN apt-get update && apt-get install -y build-essential autotools-dev automake \ + gawk zlib1g-dev libtool libaio-dev libaio1 pandoc pkgconf libcap-dev docbook-utils \ + libreadline-dev default-jre default-jdk cmake flex bison + +ENV JAVA_HOME=/usr/lib/jvm/java-1.11.0-openjdk-amd64 + +# need up-to-date zlib (used by fio and stress-ng static builds) to fix security vulnerabilities +RUN git clone https://github.com/madler/zlib.git && cd zlib && ./configure && make install +RUN cp /usr/local/lib/libz.a /usr/lib/x86_64-linux-gnu/libz.a + +# so that build output files have the correct owner +# add non-root user +ARG USERNAME +ARG USERID +ARG LOCALBUILD +RUN if [ ! -z "${LOCALBUILD}" ] ; then \ + adduser --disabled-password --uid ${USERID} --gecos '' ${USERNAME} \ + && adduser ${USERNAME} sudo \ + && echo "${USERNAME} ALL=(ALL) NOPASSWD: ALL" >> /etc/sudoers; \ + fi + +# Run container as non-root user from here onwards +USER ${USERNAME} + +# run bash script and process the input command +ENTRYPOINT [ "/bin/bash", "/scripts/entrypoint"] diff --git a/builder/build b/builder/build new file mode 100755 index 0000000..bf26f6c --- /dev/null +++ b/builder/build @@ -0,0 +1,10 @@ +#!/usr/bin/env bash +docker container run \ + --volume "$(pwd)"/builder/scripts:/scripts \ + --volume "$(pwd)":/workdir \ + --volume "/home/$USER/.ssh":/home/$USER/.ssh \ + --user $(id -u):$(id -g) \ + --rm \ + --name build_svr_info4 \ + svr_info_builder:v4 \ + build diff --git a/builder/build_docker_image b/builder/build_docker_image new file mode 100755 index 0000000..8de7381 --- /dev/null +++ b/builder/build_docker_image @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +docker image build \ + --build-arg USERNAME="${USER}" \ + --build-arg USERID="$(id -u ${USER})" \ + --build-arg LOCALBUILD="true" \ + --file builder/Dockerfile \ + --tag svr_info_builder:v4 \ + . diff --git a/builder/scripts/entrypoint b/builder/scripts/entrypoint new file mode 100755 index 0000000..bb7c3bb --- /dev/null +++ b/builder/scripts/entrypoint @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +if [ "$1" = "shell" ]; then + echo "Starting Bash Shell" + /bin/bash +elif [ "$1" = "build" ]; then + echo "Starting Build" + cd workdir && make clean && make dist +elif [ "$1" = "test" ]; then + echo "Starting Tests" + cd workdir && make test +fi \ No newline at end of file diff --git a/builder/shell b/builder/shell new file mode 100755 index 0000000..8086034 --- /dev/null +++ b/builder/shell @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +docker container run \ + --volume "$(pwd)"/builder/scripts:/scripts \ + --volume "$(pwd)":/workdir \ + --volume "/home/$USER/.ssh":/home/$USER/.ssh \ + --user $(id -u):$(id -g) \ + --rm \ + -it \ + --name build_svr_info4 \ + svr_info_builder:v4 \ + shell \ No newline at end of file diff --git a/builder/test b/builder/test new file mode 100755 index 0000000..9d659ad --- /dev/null +++ b/builder/test @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +docker container run \ + --volume "$(pwd)"/builder/scripts:/scripts \ + --volume "$(pwd)":/workdir \ + --volume "/home/$USER/.ssh":/home/$USER/.ssh \ + --rm \ + --name build_svr_info4 \ + svr_info_builder:v4 \ + test \ No newline at end of file diff --git a/config/accelerators.yaml b/config/accelerators.yaml new file mode 100644 index 0000000..2c202cf --- /dev/null +++ b/config/accelerators.yaml @@ -0,0 +1,34 @@ +######### +# Intel Accelerators (sorted by devid) +# references: +# https://pci-ids.ucw.cz/read/PC/8086 +######### +- name: DLB + mfgid: 8086 + devid: 2710 + full_name: Intel Dynamic Load Balancer + description: hardware managed system of queues and arbiters connecting producers and consumers + +- name: DSA + mfgid: 8086 + devid: B25 + full_name: Intel Data Streaming Accelerator + description: a high-performance data copy and transformation accelerator + +- name: IAX + mfgid: 8086 + devid: CFE + full_name: Intel Analytics Accelerator + description: accelerates compression and decompression for big data applications and in-memory analytic databases + +- name: QAT (on CPU) + mfgid: 8086 + devid: (4940|4942) + full_name: Intel Quick Assist Technology + description: accelerates data encryption and compression for applications from networking to enterprise, cloud to storage, and content delivery to database + +- name: QAT (on chipset) + mfgid: 8086 + devid: 37C8 + full_name: Intel Quick Assist Technology + description: accelerates data encryption and compression for applications from networking to enterprise, cloud to storage, and content delivery to database diff --git a/config/collector_megadata.yaml.tmpl b/config/collector_megadata.yaml.tmpl new file mode 100644 index 0000000..9f75585 --- /dev/null +++ b/config/collector_megadata.yaml.tmpl @@ -0,0 +1,270 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: MIT +# +# Template file used to generate the configuration (YAML) for the collector +############ +# Collector's YAML format +# Root level keys +# arguments +# commands +# Commands are list items. Command names label the command output. +# Required command attributes: +# label - a unique label for each command +# command - will be executed by bash +# Optional command attributes: +# superuser - bool indicates need for elevated privilege (default: false) +# run - bool indicates if command will be run (default: true) +# modprobe - comma separated list of kernel modules required to run command +# parallel - bool indicates if command can be run in parallel with other commands (default: false) +########### + +############ +# global arguments +############ +arguments: + name: + bin_path: + command_timeout: +############ +# commands -- +############ +commands: + - label: date_timestamp + command: date 2>&1 | tee date_timestamp + parallel: true + superuser: false + - label: hdparm + command: hdparm -I /dev/sd* 2>&1 | tee hdparm + parallel: true + superuser: true + - label: dmidecode + command: dmidecode 2>&1 | tee dmidecode + parallel: true + superuser: true + - label: dmidecode_bin + command: dmidecode --dump-bin dmidecode_bin + parallel: true + superuser: true + - label: lspci + command: lspci -vv 2>&1 | tee lspci + parallel: true + superuser: true + - label: lspci_tv + command: lspci -tv 2>&1 | tee lspci_tv + parallel: true + superuser: true + - label: conrep + command: hp-conrep -s -f conrep + parallel: true + superuser: true + - label: uname + command: uname -a 2>&1 | tee uname + parallel: true + superuser: false + - label: numactl + command: numactl --hardware 2>&1 | tee numactl + parallel: true + superuser: true + - label: hostname + command: hostname 2>&1 | tee hostname + parallel: true + superuser: false + - label: ifconfig + command: ifconfig 2>&1 | tee ifconfig + parallel: true + superuser: false + - label: dmesg + command: dmesg 2>&1 | tee dmesg + parallel: true + superuser: true + - label: emon_v + command: emon -v 2>&1 | tee emon_v + parallel: true + superuser: true + - label: emon_M + command: emon -M 2>&1 | tee emon_M + parallel: true + superuser: true + - label: lshal + command: lshal 2>&1 | tee lshal + parallel: true + superuser: true + - label: lsblk + command: lsblk -a 2>&1 | tee lsblk + parallel: true + superuser: false + - label: lscpu + command: lscpu 2>&1 | tee lscpu + parallel: true + superuser: false + - label: cpupower + command: cpupower frequency-info 2>&1 | tee cpupower + parallel: true + superuser: true + - label: cpupower_idle + command: cpupower idle-info 2>&1 | tee cpupower_idl + parallel: true + superuser: true + - label: lmi + command: lmi 2>&1 | tee lmi + parallel: true + superuser: true + - label: lsusb + command: lsusb -v 2>&1 | tee lsusb + parallel: true + superuser: true + - label: lsmod + command: lsmod 2>&1 | tee lsmod + parallel: true + superuser: true + - label: rpm + command: rpm -qa 2>&1 | tee rpm + parallel: true + superuser: true + - label: nstat + command: nstat -az 2>&1 | tee nstat + parallel: true + superuser: true + - label: netstat + command: netstat -sn 2>&1 | tee netstat + parallel: true + superuser: true + - label: dpkg + command: dpkg -l 2>&1 | tee dpkg + parallel: true + superuser: true + - label: iptables + command: iptables -L 2>&1 | tee iptables + parallel: true + superuser: true + - label: irqbalance + command: pgrep irqbalance 2>&1 | tee irqbalance + parallel: true + superuser: false + - label: getenforce + command: getenforce 2>&1 | tee getenforce + parallel: true + superuser: true + - label: sysctl + command: sysctl -a 2>&1 | tee sysctl + parallel: true + superuser: false + - label: smp_affinity + command: find /proc/irq/ -iname "*smp_affinity*" -print -exec cat {} \; 2>&1 | tee smp_affinity + parallel: true + superuser: false + - label: module_parameters + command: find /sys/module/*/parameters/* -print -exec cat {} \; 2>&1 | tee module_parameters + parallel: true + superuser: false + - label: systool + command: systool 2>&1 | tee systool + parallel: true + superuser: true + - label: ulimit + command: ulimit -a 2>&1 | tee ulimit + parallel: true + superuser: false + - label: boot_md5sum + command: md5sum /boot/* 2>&1 | tee boot_md5sum + parallel: true + superuser: true + - label: vmmctrl_v + command: vmmctrl -v &> vmmctrl_v + parallel: true + superuser: true + - label: vmmctrl + command: vmmctrl sysconf get 2>&1 | tee vmmctrl + parallel: true + superuser: true + - label: syscfg + command: bash -c "(/usr/bin/syscfg/syscfg /s bios.ini > /dev/null; mv bios.ini syscfg)" + parallel: true + superuser: true + - label: syscfg_dell + command: /opt/dell/toolkit/bin/syscfg --ox syscfg_dell + parallel: true + superuser: true + - label: mlc + command: mlc 2>&1 | tee mlc + parallel: false + superuser: true + - label: lsof + command: lsof 2>&1 | tee lsof + parallel: true + superuser: true + - label: lshw + command: lshw 2>&1 | tee lshw + parallel: true + superuser: true +# files + - label: release + command: cat /etc/*-release 2>&1 | tee release + parallel: true + - label: cmdline + command: cat /proc/cmdline 2>&1 | tee cmdline + parallel: true + - label: cpuinfo + command: cat /proc/cpuinfo 2>&1 | tee cpuinfo + parallel: true + - label: meminfo + command: cat /proc/meminfo 2>&1 | tee meminfo + parallel: true + - label: partitions + command: cat /proc/partitions 2>&1 | tee partitions + parallel: true + - label: scsi + command: cat /proc/scsi/scsi 2>&1 | tee scsi + parallel: true + - label: version + command: cat /proc/version 2>&1 | tee version + parallel: true + - label: modules + command: cat /proc/modules 2>&1 | tee modules + parallel: true + - label: mounts + command: cat /proc/mounts 2>&1 | tee mounts + parallel: true + - label: interrupts + command: cat /proc/interrupts 2>&1 | tee interrupts + parallel: true + - label: kernel_config + command: |- + uname_r = $(uname -r) + cat /boot/config-$uname_r 2>&1 | tee kernel_config + parallel: true + - label: modules_config + command: |- + uname_r = $(uname -r) + cat /lib/modules/$uname_r/source/.config 2>&1 | tee modules_config + parallel: true + - label: sysctl_conf + command: cat /etc/sysctl.conf 2>&1 | tee sysctl_conf + - label: hugepage_enable + command: cat /sys/kernel/mm/transparent_hugepage/enabled 2>&1 | tee hugepage_enable + parallel: true + - label: hugepage_defrag + command: cat /sys/kernel/mm/transparent_hugepage/defrag 2>&1 | tee hugepage_defrag + parallel: true + - label: nic info + command: |- + lshw -businfo -numeric | grep -E "^(pci|usb).*? \S+\s+network\s+\S.*?" \ + | while read -r a NIC c ; do + ethtool $NIC 2>&1 | tee ethtool_$NIC + ethtool -i $NIC 2>&1 | tee ethtool_i_$NIC + ethtool -k $NIC 2>&1 | tee ethtool_k_$NIC + ethtool -c $NIC 2>&1 | tee ethtool_c_$NIC + ethtool -l $NIC 2>&1 | tee ethtool_l_$NIC + done + parallel: true + superuser: true + - label: ipmitool_QDF_12 + command: LC_ALL=C ipmitool raw 0x3e 0x52 0x40 12 0x50 19 0 | tr "\n" " " | cut -d " " -f 17- | xxd -r -p | tee qdf_12 + superuser: true + modprobe: ipmi_devintf, ipmi_si + parallel: true + - label: ipmitool_QDF_13 + command: LC_ALL=C ipmitool raw 0x3e 0x52 0x40 13 0x50 19 0 | tr "\n" " " | cut -d " " -f 17- | xxd -r -p | tee qdf_13 + superuser: true + modprobe: ipmi_devintf, ipmi_si + parallel: true diff --git a/config/collector_reports.yaml.tmpl b/config/collector_reports.yaml.tmpl new file mode 100644 index 0000000..6b6890d --- /dev/null +++ b/config/collector_reports.yaml.tmpl @@ -0,0 +1,432 @@ +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: MIT +# +# Template file used to generate the configuration (YAML) for the collector +############ +# Collector's YAML format +# Root level keys +# arguments +# commands +# Commands are list items to maintain order. +# Required command attributes: +# label - a unique label for each command +# command - will be executed by bash +# Optional command attributes: +# superuser - bool indicates need for elevated privilege (default: false) +# run - bool indicates if command will be run (default: true) +# modprobe - comma separated list of kernel modules required to run command +# parallel - bool indicates if command can be run in parallel with other commands (default: false) +########### + +############ +# global arguments +############ +arguments: + name: + bin_path: + command_timeout: +############ +# commands -- +############ +commands: + - label: date -u + command: date -u + parallel: true + - label: date + command: date +%m/%d/%y + parallel: true + - label: lscpu + command: lscpu + parallel: true + - label: cpuid -1 + command: cpuid -1 + modprobe: cpuid + parallel: true + - label: max_cstate + command: |- + cat /sys/module/intel_idle/parameters/max_cstate + parallel: true + - label: cpu_freq_driver + command: |- + cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_driver + parallel: true + - label: cpu_freq_governor + command: |- + cat /sys/devices/system/cpu/cpu0/cpufreq/scaling_governor + parallel: true + - label: base frequency + command: cat /sys/devices/system/cpu/cpu0/cpufreq/base_frequency + parallel: true + - label: maximum frequency + command: cat /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq + parallel: true + - label: lsblk -r -o + command: lsblk -r -o NAME,MODEL,SIZE,MOUNTPOINT,FSTYPE,RQ-SIZE,MIN-IO -e7 -e1 + parallel: true + - label: df -h + command: df -h + parallel: true + - label: uname -a + command: uname -a + parallel: true + - label: ps -eo + command: ps -eo pid,ppid,%cpu,%mem,rss,command --sort=-%cpu,-pid | grep -v "]" | head -n 20 + parallel: false + - label: irqbalance + command: pgrep irqbalance + parallel: true + - label: /proc/cpuinfo + command: cat /proc/cpuinfo + parallel: true + - label: /proc/meminfo + command: cat /proc/meminfo + parallel: true + - label: /proc/cmdline + command: cat /proc/cmdline + parallel: true + - label: transparent huge pages + command: cat /sys/kernel/mm/transparent_hugepage/enabled + parallel: true + - label: automatic numa balancing + command: cat /proc/sys/kernel/numa_balancing + parallel: true + - label: /etc/*-release + command: cat /etc/*-release + parallel: true + - label: gcc version + command: gcc --version + parallel: true + - label: binutils version + command: ld -v + parallel: true + - label: glibc version + command: ldd --version + parallel: true + - label: python version + command: python --version 2>&1 + parallel: true + - label: python3 version + command: python3 --version + parallel: true + - label: java version + command: java -version 2>&1 + parallel: true + - label: openssl version + command: openssl version + parallel: true + - label: dmidecode + command: dmidecode + superuser: true + parallel: true + - label: lshw + command: lshw -businfo -numeric + superuser: true + parallel: true + - label: spectre-meltdown-checker + command: spectre-meltdown-checker.sh --batch text + superuser: true + parallel: true + - label: rdmsr 0x1a4 + command: rdmsr -f 3:0 0x1a4 # MSR_MISC_FEATURE_CONTROL: L2 and DCU Prefetcher enabled/disabled + superuser: true + modprobe: msr + parallel: true + - label: rdmsr 0x1b0 + command: rdmsr -f 3:0 0x1b0 # IA32_ENERGY_PERF_BIAS: Performance Energy Bias Hint (0 is highest perf, 15 is highest energy saving) + superuser: true + modprobe: msr + parallel: true + - label: rdmsr 0x1ad + command: rdmsr 0x1ad # MSR_TURBO_RATIO_LIMIT: Maximum Ratio Limit of Turbo Mode + superuser: true + modprobe: msr + parallel: true + - label: rdmsr 0x1ae + command: rdmsr 0x1ae # MSR_TURBO_GROUP_CORE_CNT: Group Size of Active Cores for Turbo Mode Operation + superuser: true + modprobe: msr + parallel: true + - label: rdmsr 0x4f + command: rdmsr -a 0x4f # MSR_PPIN: Protected Processor Inventory Number + superuser: true + modprobe: msr + parallel: true + - label: rdmsr 0x610 + command: rdmsr -f 14:0 0x610 # MSR_PKG_POWER_LIMIT: Package limit in bits 14:0 + superuser: true + modprobe: msr + parallel: true + - label: rdmsr 0x6d + command: rdmsr 0x6d # TODO: what is the name/ID of this MSR? SPR Features + superuser: true + modprobe: msr + parallel: true + - label: uncore cha count + command: rdmsr 0x702 + superuser: true + modprobe: msr + parallel: true + - label: uncore client cha count + command: rdmsr 0x396 + superuser: true + modprobe: msr + parallel: true + - label: uncore cha count spr + command: rdmsr 0x2FFE + superuser: true + modprobe: msr + parallel: true + - label: uncore max frequency + command: rdmsr -f 6:0 0x620 # MSR_UNCORE_RATIO_LIMIT: MAX_RATIO in bits 6:0 + superuser: true + modprobe: msr + parallel: true + - label: uncore min frequency + command: rdmsr -f 14:8 0x620 # MSR_UNCORE_RATIO_LIMIT: MIN_RATIO in bits 14:8 + superuser: true + modprobe: msr + parallel: true + - label: ipmitool sel time get + command: LC_ALL=C ipmitool sel time get + superuser: true + modprobe: ipmi_devintf, ipmi_si + parallel: true + - label: ipmitool sel elist + command: LC_ALL=C ipmitool sel elist | tail -n20 | cut -d'|' -f2- + superuser: true + modprobe: ipmi_devintf, ipmi_si + parallel: true + - label: ipmitool chassis status + command: LC_ALL=C ipmitool chassis status + superuser: true + modprobe: ipmi_devintf, ipmi_si + parallel: true + - label: ipmitool sdr list full + command: LC_ALL=C ipmitool sdr list full + superuser: true + modprobe: ipmi_devintf, ipmi_si + parallel: true + - label: dmesg + command: dmesg --kernel --human --nopager | tail -n20 + superuser: true + parallel: true + - label: msrbusy + command: msrbusy 0x30a 0x309 0x30b 0x30c 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 + superuser: true + modprobe: msr + parallel: true + - label: lspci -vmm + command: lspci -vmm + parallel: true + - label: hdparm + command: |- + lsblk -d -r -o NAME -e7 -e1 -n \ + | while read -r device ; do + hdparm -i /dev/"$device" + done + superuser: true + parallel: true + - label: nic info + command: |- + lshw -businfo -numeric | grep -E "^(pci|usb).*? \S+\s+network\s+\S.*?" \ + | while read -r a ifc c ; do + ethtool "$ifc" + ethtool -i "$ifc" + echo -n "MAC ADDRESS $ifc: " + cat /sys/class/net/"$ifc"/address + echo -n "NUMA NODE $ifc: " + cat /sys/class/net/"$ifc"/device/numa_node + echo -n "CPU AFFINITY $ifc: " + intlist=$( grep -e "$ifc" /proc/interrupts | cut -d':' -f1 | sed -e 's/^[[:space:]]*//' ) + for int in $intlist; do + cpu=$( cat /proc/irq/"$int"/smp_affinity_list ) + printf "%s:%s;" "$int" "$cpu" + done + printf "\n" + done + superuser: true + parallel: true +############ +# Profile command below +# Note that this is one command because we want the profiling options to run in parallel with +# each other but not with parallel commands, i.e., the configuration collection commands. +############ + - label: profile + run: false + superuser: true + command: |- + duration={{.Duration}} + interval={{.Interval}} + samples=$( awk -v d=$duration -v f=$interval 'BEGIN {print int(d / f)}') + if {{.ProfileCPU}}; then + mpstat -u -T -I SCPU -P ALL "$interval" "$samples" > mpstat.out & + fi + if {{.ProfileStorage}}; then + iostat -d -t "$interval" "$samples" | sed '/^loop/d' > iostat.out & + fi + if {{.ProfileMemory}}; then + sar -r "$interval" "$samples" > sar-memory.out & + fi + if {{.ProfileNetwork}}; then + sar -n DEV "$interval" "$samples" > sar-network.out & + fi + wait + if [ -f "iostat.out" ]; then + echo "########## iostat ##########" + cat iostat.out + fi + if [ -f "sar-memory.out" ]; then + echo "########## sar-memory ##########" + cat sar-memory.out + fi + if [ -f "sar-network.out" ]; then + echo "########## sar-network ##########" + cat sar-network.out + fi + if [ -f "mpstat.out" ]; then + echo "########## mpstat ##########" + cat mpstat.out + fi +############ +# Analyze command below +# Note that this is one command because we want the analyzing options to run in parallel with +# each other but not with parallel commands, i.e., the configuration collection commands. +############ + - label: analyze + run: false + superuser: true + command: |- + duration={{.Duration}} + frequency={{.Frequency}} + ap_interval=$( awk -v x=$frequency 'BEGIN {print int(1 / x * 1000000000)}' ) + PERF_EVENT_PARANOID=$( cat /proc/sys/kernel/perf_event_paranoid ) + echo -1 >/proc/sys/kernel/perf_event_paranoid + KPTR_RESTRICT=$( cat /proc/sys/kernel/kptr_restrict ) + echo 0 >/proc/sys/kernel/kptr_restrict + declare -a java_pids=() + declare -a java_cmds=() + if {{.AnalyzeJava}}; then + # JAVA app call stack collection (run in background) + for pid in $( pgrep java ) ; do + # verify pid is still running + if [ -d "/proc/$pid" ]; then + java_pids+=($pid) + java_cmds+=("$( tr '\000' ' ' < /proc/$pid/cmdline )") + # profile pid in background + async-profiler/profiler.sh start -i "$ap_interval" -o collapsed "$pid" + fi + done + fi + if {{.AnalyzeSystem}}; then + # system-wide call stack collection - frame pointer mode + perf record -F $frequency -a -g -o perf_fp.data -m 129 -- sleep $duration & + PERF_FP_PID=$! + # system-wide call stack collection - dwarf mode + perf record -F $frequency -a -g -o perf_dwarf.data -m 257 --call-graph dwarf,8192 -- sleep $duration & + PERF_SYS_PID=$! + fi + sleep $duration + if {{.AnalyzeJava}}; then + # stop java profiling for each java pid + for idx in "${!java_pids[@]}"; do + pid="${java_pids[$idx]}" + cmd="${java_cmds[$idx]}" + echo "########## async-profiler $pid $cmd ##########" + async-profiler/profiler.sh stop -o collapsed "$pid" + done + fi + if {{.AnalyzeSystem}}; then + # wait for perf to finish + wait ${PERF_FP_PID} + wait ${PERF_SYS_PID} + perf script -i perf_dwarf.data | stackcollapse-perf.pl > perf_dwarf.folded + perf script -i perf_fp.data | stackcollapse-perf.pl > perf_fp.folded + if [ -f "perf_dwarf.folded" ]; then + echo "########## perf_dwarf ##########" + cat perf_dwarf.folded + fi + if [ -f "perf_fp.folded" ]; then + echo "########## perf_fp ##########" + cat perf_fp.folded + fi + fi + echo "$PERF_EVENT_PARANOID" > /proc/sys/kernel/perf_event_paranoid + echo "$KPTR_RESTRICT" > /proc/sys/kernel/kptr_restrict +############ +# Benchmarking commands below +# Note that these do not run in parallel +############ + - label: Memory MLC Loaded Latency Test + run: false + command: |- + # measure memory loaded latency + mlc --loaded_latency + superuser: true + - label: Memory MLC Bandwidth + run: false + command: |- + # measure memory bandwidth matrix + mlc --bandwidth_matrix + superuser: true + - label: stress-ng cpu methods + run: false + command: |- + # measure cpu performance + methods=$( stress-ng --cpu 1 --cpu-method x 2>&1 | cut -d":" -f2 | cut -c 6- ) + for method in $methods; do + printf "%s " "$method" + stress-ng --cpu 0 -t 1 --cpu-method "$method" --metrics-brief 2>&1 | tail -1 | awk '{print $9}' + done + - label: Measure Turbo Frequencies + run: false + command: |- + # measure turbo frequencies using calcfreq utility + num_vcpus=$( lscpu | grep 'CPU(s):' | head -1 | awk '{print $2}' ) + threads_per_core=$( lscpu | grep 'Thread(s) per core:' | head -1 | awk '{print $4}' ) + num_sockets=$( lscpu | grep 'Socket(s):' | head -1 | awk '{print $2}' ) + num_cores=$(( num_vcpus / threads_per_core )) + num_cores_per_socket=$(( num_cores / num_sockets )) + # if the first two 'core ids' from /proc/cpuinfo are the same then use -a1 option + cpu_0_core_id=$( grep 'core id' /proc/cpuinfo | head -1 | awk '{print $4}' ) + cpu_1_core_id=$( grep 'core id' /proc/cpuinfo | head -2 | tail -1 | awk '{print $4}' ) + if [ "$cpu_0_core_id" -eq "$cpu_1_core_id" ]; then + calcfreq_option="-a1" + fi + calcfreq -t"$num_cores_per_socket" "$calcfreq_option" + superuser: true + modprobe: msr + - label: CPU Turbo Test + run: false + command: |- + # measure tdp and all-core turbo frequency + ((turbostat -i 2 2>/dev/null &) ; stress-ng --cpu 1 -t 9s 2>&1 ; stress-ng --cpu 0 -t 5s 2>&1 ; pkill -9 -f turbostat) | awk '$0~"stress" {print $0} $1=="Package" || $1=="CPU" || $1=="Core" || $1=="Node" {if(f!=1) print $0;f=1} $1=="-" {print $0}' + superuser: true + modprobe: msr + - label: CPU Idle + run: false + command: |- + # measure TDP at idle using turbostat + turbostat --show PkgWatt -n 1 | sed -n 2p + superuser: true + modprobe: msr + - label: fio + run: false + command: |- + # measure storage performance + file_dir={{.FioDir}} + file_name="fio_file" + file_size_g=4 + runtime=30 + if [[ -d "$file_dir" && -w "$file_dir" ]]; then + available_space=$(df -hP "$file_dir") + count=$( echo "$available_space" | awk '/[0-9]%/{print substr($4,1,length($4)-1)}' ) + unit=$( echo "$available_space" | awk '/[0-9]%/{print substr($4,length($4),1)}' ) + if [[ "$unit" == "G" && $(awk -v c="$count" -v f=$file_size_g 'BEGIN{print (c>f)?1:0}') == 1 ]] || (echo "TPEZY" | grep -F -q "$unit" ); then + fio --randrepeat=1 --ioengine=sync --direct=1 --gtod_reduce=1 --name=test --filename="$file_dir"/"$file_name" --runtime=$runtime --bs=4k --iodepth=64 --size="$file_size_g"G --readwrite=randrw --rwmixread=75 + rm "$file_dir"/"$file_name" + else + echo "$file_dir does not have enough available space - $file_size_g Gigabytes required" + fi + else + echo "$file_dir does not exist or is not writeable" + fi diff --git a/config/cpus.yaml b/config/cpus.yaml new file mode 100644 index 0000000..3dde1a4 --- /dev/null +++ b/config/cpus.yaml @@ -0,0 +1,163 @@ +########## +# CPUS - used to lookup architecture and channels by family, model, and stepping +# The model and stepping fields will be interpreted as regular expressions +# An empty stepping field means 'any' stepping + +########## +# Intel Core CPUs +########## +# Haswell +- architecture: HSW + family: 6 + model: (50|69|70) + stepping: + channels: 2 + +# Broadwell +- architecture: BDW + family: 6 + model: (61|71) + stepping: + channels: 2 + +# Skylake +- architecture: SKL + family: 6 + model: (78|94) + stepping: + channels: 2 + +# Kabylake +- architecture: KBL + family: 6 + model: (142|158) + stepping: 9 + channels: 2 + +# Coffelake +- architecture: CFL + family: 6 + model: (142|158) + stepping: (10|11|12|13) + channels: 2 + +# Rocket Lake +- architecture: RKL + family: 6 + model: 167 + stepping: + channels: 2 + +# Tiger Lake +- architecture: TGL + family: 6 + model: (140|141) + stepping: + channels: 2 + +# Alder Lake +- architecture: ADL + family: 6 + model: (151|154) + stepping: + channels: 2 + +########## +# Intel Xeon CPUs +########## +# Haswell +- architecture: HSX + family: 6 + model: 63 + stepping: + channels: 4 + +# Broadwell +- architecture: BDX + family: 6 + model: (79|86) + stepping: + channels: 4 + +# Skylake +- architecture: SKX + family: 6 + model: 85 + stepping: (0|1|2|3|4) + channels: 6 + +# Cascadelake +- architecture: CLX + family: 6 + model: 85 + stepping: (5|6|7) + channels: 6 + +# Cooperlake +- architecture: CPX + family: 6 + model: 85 + stepping: 11 + channels: 6 + +# Icelake +- architecture: ICX + family: 6 + model: (106|108) + stepping: + channels: 8 + +# Sapphire Rapids +- architecture: SPR + family: 6 + model: 143 + stepping: (0|1|2|3|4|5|6|7|8) + channels: 8 + +# Emerald Rapids +- architecture: EMR + family: 6 + model: 143 + stepping: (9|10|11|12) + channels: 8 + +########## +# AMD CPUs +########## +# Naples +- architecture: Zen + family: 23 + model: 1 + stepping: + channels: 8 + +# Rome +- architecture: Zen2 + family: 23 + model: 49 + stepping: + channels: 8 + +# Milan +- architecture: Zen3 + family: 25 + model: 1 + stepping: + channels: 8 + +########## +# ARM CPUs +######### +# AWS Graviton 2 +- architecture: Neoverse N1 + family: 0 + model: 1 + stepping: r3p1 + channels: 8 + +# AWS Graviton 3 +- architecture: Neoverse V1 + family: 0 + model: 1 + stepping: r1p1 + channels: 8 diff --git a/config/extras/.gitkeep b/config/extras/.gitkeep new file mode 100644 index 0000000..e69de29 diff --git a/config/gpus.yaml b/config/gpus.yaml new file mode 100644 index 0000000..e755c08 --- /dev/null +++ b/config/gpus.yaml @@ -0,0 +1,87 @@ +######### +# Intel Discrete GPUs (sorted by devid) +# references: +# https://pci-ids.ucw.cz/read/PC/8086 +# https://dgpu-docs.intel.com/devices/hardware-table.html +# +# The devid field will be interpreted as a regular expression. +######### +- model: ATS-P + mfgid: 8086 + devid: 201 + +- model: Ponte Vecchio 2T + mfgid: 8086 + devid: BD0 + +- model: Ponte Vecchio 1T + mfgid: 8086 + devid: BD5 + +- model: Intel® Iris® Xe MAX Graphics (DG1) + mfgid: 8086 + devid: 4905 + +- model: Intel® Iris® Xe Pod (DG1) + mfgid: 8086 + devid: 4906 + +- model: SG1 + mfgid: 8086 + devid: 4907 + +- model: Intel® Iris® Xe Graphics (DG1) + mfgid: 8086 + devid: 4908 + +- model: Intel® Iris® Xe MAX 100 (DG1) + mfgid: 8086 + devid: 4909 + +- model: DG2 + mfgid: 8086 + devid: (4F80|4F81|4F82) + +- model: Intel® Arc ™ A770M Graphics + mfgid: 8086 + devid: 5690 + +- model: Intel® Arc ™ A730M Graphics (Alchemist) + mfgid: 8086 + devid: 5691 + +- model: Intel® Arc ™ A550M Graphics (Alchemist) + mfgid: 8086 + devid: 5692 + +- model: Intel® Arc ™ A370M Graphics (Alchemist) + mfgid: 8086 + devid: 5693 + +- model: Intel® Arc ™ A350M Graphics (Alchemist) + mfgid: 8086 + devid: 5694 + +- model: Intel® Arc ™ A770 Graphics + mfgid: 8086 + devid: 56A0 + +- model: Intel® Arc ™ A750 Graphics (Alchemist) + mfgid: 8086 + devid: 56A1 + +- model: Intel® Arc ™ A380 Graphics (Alchemist) + mfgid: 8086 + devid: 56A5 + +- model: Intel® Arc ™ A310 Graphics (Alchemist) + mfgid: 8086 + devid: 56A6 + +- model: Intel® Data Center GPU Flex 170 + mfgid: 8086 + devid: 56C0 + +- model: Intel® Data Center GPU Flex 140 + mfgid: 8086 + devid: 56C1 diff --git a/config/insights.grl b/config/insights.grl new file mode 100644 index 0000000..25f87b2 --- /dev/null +++ b/config/insights.grl @@ -0,0 +1,209 @@ +// Copyright (C) 2023 Intel Corporation +// SPDX-License-Identifier: MIT + +// +// hardware insights +// +rule XeonGeneration { + when + Report.CompareMicroarchitecture(Report.GetValue("Configuration", "CPU", "Microarchitecture"), "ICX") == -1 + then + Report.AddInsight( + "CPU is 2 or more generations behind current generation Xeon.", + "Consider upgrading to the latest generation Xeon CPU." + ); + Retract("XeonGeneration"); +} + +rule DIMMSpeed { + when + Report.GetValue("Configuration", "DIMM", "Speed") != "" && Report.GetValue("Configuration", "DIMM", "Speed") != "Unknown" && + ( + (Report.GetValue("Configuration", "CPU", "Microarchitecture") == "CLX" && Report.GetValueAsInt("Configuration", "DIMM", "Speed") < 2933) || + (Report.GetValue("Configuration", "CPU", "Microarchitecture") == "ICX" && Report.GetValueAsInt("Configuration", "DIMM", "Speed") < 3200) || + (Report.GetValue("Configuration", "CPU", "Microarchitecture") == "SPR" && Report.GetValueAsInt("Configuration", "DIMM", "Speed") < 4800) || + (Report.GetValue("Configuration", "CPU", "Microarchitecture") == "EMR" && Report.GetValueAsInt("Configuration", "DIMM", "Speed") < 4800) + ) + then + Report.AddInsight( + "DRAM DIMMs are running at a speed less than the maximum speed supported by system's CPU.", + "Consider upgrading and/or configuring DIMMs for maximum supported speed." + ); + Retract("DIMMSpeed"); +} + +rule ConfiguredDIMMSpeed { + when + Report.GetValue("Configuration", "DIMM", "Configured Speed") != "" && Report.GetValue("Configuration", "DIMM", "Configured Speed") != "Unknown" && + Report.GetValue("Configuration", "DIMM", "Speed") != "" && Report.GetValue("Configuration", "DIMM", "Speed") != "Unknown" && + Report.GetValueAsInt("Configuration", "DIMM", "Configured Speed") < Report.GetValueAsInt("Configuration", "DIMM", "Speed") + then + Report.AddInsight( + "DRAM DIMMs are configured at less than their maximum supported speed.", + "Consider configuring DIMMs for their maximum supported speed." + ); + Retract("ConfiguredDIMMSpeed"); +} + +rule MemoryChannels { + when + Report.GetValue("Configuration", "CPU", "Memory Channels") != "" && + Report.GetValue("Configuration", "CPU", "Sockets") != "" && + Report.GetValue("Configuration", "Memory", "Populated Memory Channels") != "" && + Report.GetValueAsInt("Configuration", "CPU", "Memory Channels") * Report.GetValueAsInt("Configuration", "CPU", "Sockets") != + Report.GetValueAsInt("Configuration", "Memory", "Populated Memory Channels") + then + Report.AddInsight( + "Not all memory channels are populated with DRAM DIMMs.", + "Populate all memory channels for best memory bandwidth." + ); + Retract("MemoryChannels"); +} + +rule Vulnerabilities { + when + Report.GetValuesFromColumn("Configuration", "Vulnerability", 0).Count("Vuln") != 0 + then + Report.AddInsight( + "Detected '" + Report.GetValuesFromColumn("Configuration", "Vulnerability", 0).Count("Vuln") + "' security vulnerabilities.", + "Consider mitigating security vulnerabilities." + ); + Retract("Vulnerabilities"); +} + +// +// configuration insights +// +rule PowerPerfPolicy { + when + Report.GetValue("Configuration", "Power", "Power & Perf Policy") != "" && + Report.GetValue("Configuration", "Power", "Power & Perf Policy") != "Performance" + then + Report.AddInsight( + "Power and Performance policy is set to '" + Report.GetValue("Configuration", "Power", "Power & Perf Policy") + "'.", + "Consider setting the Power and Performance policy to 'Performance'." + ); + Retract("PowerPerfPolicy"); +} + +rule FrequencyDriver { + when + Report.GetValue("Configuration", "Power", "Frequency Driver") != "" && + Report.GetValue("Configuration", "Power", "Frequency Driver") != "intel_pstate" + then + Report.AddInsight( + "Frequency driver is '" + Report.GetValue("Configuration", "Power", "Frequency Driver") + "'.", + "Consider using the 'Intel PState' frequency driver." + ); + Retract("FrequencyDriver"); +} + +rule FrequencyGovernor { + when + Report.GetValue("Configuration", "Power", "Frequency Governor") != "" && + Report.GetValue("Configuration", "Power", "Frequency Governor") != "performance" + then + Report.AddInsight("CPU frequency governors are set to '" + Report.GetValue("Configuration", "Power", "Frequency Governor") + "'.", + "Consider setting the CPU frequency governors to 'performance'." + ); + Retract("FrequencyGovernor"); +} + +rule IRQBalance { + when + Report.GetValue("Configuration", "NIC", "IRQBalance") != "" && + Report.GetValue("Configuration", "NIC", "IRQBalance") == "Enabled" + then + Report.AddInsight( + "System is using the IRQ Balance service to manage IRQ CPU affinity.", + "Consider manually configuring IRQ CPU affinity for network intensive workloads." + ); + Retract("IRQBalance"); +} + +rule TurboBoost { + when + Report.GetValue("Configuration", "CPU", "Intel Turbo Boost") != "" && + Report.GetValue("Configuration", "CPU", "Intel Turbo Boost") != "Enabled" + then + Report.AddInsight( + "Intel Turbo Boost is not enabled.", + "Consider enabling Intel Turbo Boost." + ); + Retract("TurboBoost"); +} + +rule Hyperthreading { + when + Report.GetValue("Configuration", "CPU", "Hyperthreading") != "" && + Report.GetValue("Configuration", "CPU", "Hyperthreading") != "Enabled" + then + Report.AddInsight( + "Hyper-threading is not enabled.", + "Consider enabling hyper-threading." + ); + Retract("Hyperthreading"); +} + +// +// software insights +// +rule JAVAVersion { + when + Report.CompareVersions(Report.GetValue("Configuration", "Software Version", "Java"), "11.0.11") == -1 && + Report.CompareMicroarchitecture(Report.GetValue("Configuration", "CPU", "Microarchitecture"), "ICX") >= 0 + then + Report.AddInsight( + "Detected Java JDK '" + Report.GetValue("Configuration", "Software Version", "Java") +"' and Xeon '" + Report.GetValue("Configuration", "CPU", "Microarchitecture") + "' CPU.", + "Consider upgrading Java to extract the best performance from Xeon CPUs." + ); + Retract("JAVAVersion"); +} + +rule GLIBCVersion { + when + Report.CompareVersions(Report.GetValue("Configuration", "Software Version", "GLIBC"), "2.31") == -1 + then + Report.AddInsight( + "Detected GLIBC '" + Report.GetValue("Configuration", "Software Version", "GLIBC") + "'.", + "Consider upgrading GLIBC to extract the best performance from Xeon CPUs." + ); + Retract("GLIBCVersion"); +} + +rule OpenSSLVersion { + when + Report.CompareVersions(Report.GetValue("Configuration", "Software Version", "OpenSSL"), "1.1.1e") == -1 + then + Report.AddInsight( + "Detected OpenSSL '" + Report.GetValue("Configuration", "Software Version", "OpenSSL") + "'.", + "Consider upgrading OpenSSL to extract the best performance from Xeon CPUs." + ); + Retract("OpenSSLVersion"); +} + +// +// Profile insights +// +rule CPUUtilizationHigh { + when + Report.GetValueAsFloat("Profile", "Summary", "CPU Utilization (%)") > 80 + then + Report.AddInsight( + "Average CPU Utilization is approaching saturation...measured: '" + Report.GetValue("Profile", "Summary", "CPU Utilization (%)") + "%'.", + "Consider changing the distribution of work among available systems and/or applying hardware and software optimizations." + ); + Retract("CPUUtilizationHigh"); +} + +rule CPUUtilizationLow { + when + Report.GetValue("Profile", "Summary", "CPU Utilization (%)") != "" && + Report.GetValueAsFloat("Profile", "Summary", "CPU Utilization (%)") < 40 + then + Report.AddInsight( + "Average CPU Utilization is relatively low...measured: '" + Report.GetValue("Profile", "Summary", "CPU Utilization (%)") + "%'.", + "Consider adding more load to system or selecting a system with less CPU capacity." + ); + Retract("CPUUtilizationLow"); +} diff --git a/config/reference.yaml b/config/reference.yaml new file mode 100644 index 0000000..34f004c --- /dev/null +++ b/config/reference.yaml @@ -0,0 +1,333 @@ +# Note: section names, e.g., Host, Summary, etc. must match table names and +# field names, e.g., CPU Speed, Idle TDP, defined in report.go. +# Note: the Hostref section does not match a table name...this is intentional as +# it is used only to label the data from the other sections. +BDX_2: + Hostref: + Name: Reference (Intel 2S Xeon E5-2699A v4) + Summary: + CPU Speed: "403415 ops/s" + Single-core Turbo: "3509 MHz" + All-core Turbo: "2980 MHz" + Turbo TDP: "289.90 Watts" + Memory Peak Bandwidth: "138.1 GB/s" + Memory Minimum Latency: "78 ns" + Memory NUMA Bandwidth: + - - 67528.4 # 0, 0 + - 30178.1 # 0, 1 + - - 30177.9 # 1, 0 + - 66665.4 # 1, 1 + Memory Bandwidth and Latency: + - - 260.83 + - 138143.3 + - - 260.46 + - 138140.4 + - - 258.29 + - 138157.3 + - - 255.44 + - 138157.6 + - - 244.98 + - 137865.8 + - - 237.88 + - 137261.6 + - - 183.86 + - 134105.2 + - - 117.99 + - 104405.7 + - - 103.18 + - 79949.2 + - - 96.78 + - 64741.7 + - - 92.01 + - 46826.3 + - - 87.52 + - 33253.9 + - - 85.84 + - 25834.4 + - - 84.74 + - 19985.6 + - - 82.61 + - 13877.7 + - - 81.18 + - 10159.7 + - - 80.17 + - 7364.5 + - - 79.07 + - 4460.8 + - - 78.45 + - 2460.2 + +SKX_2: + Hostref: + Name: Reference (Intel 2S Xeon Platinum 8180) + Summary: + CPU Speed: "585157 ops/s" + Single-core Turbo: "3758 MHz" + All-core Turbo: "3107 MHz" + Turbo TDP: "429.07 Watts" + Memory Peak Bandwidth: "225.1 GB/s" + Memory Minimum Latency: "71 ns" + Memory NUMA Bandwidth: + - - 112716.9 # 0, 0 + - 34291.2 # 0, 1 + - - 34257.8 # 1, 0 + - 113079.8 # 1, 1 + Memory Bandwidth and Latency: + - - 261.65 + - 225060.9 + - - 261.63 + - 225040.5 + - - 261.54 + - 225073.3 + - - 261.03 + - 225391.4 + - - 259.18 + - 225531.5 + - - 258.36 + - 223341.7 + - - 130.38 + - 191690.7 + - - 104.12 + - 137739.3 + - - 95.75 + - 105822.1 + - - 91.50 + - 86089.3 + - - 88.78 + - 62492.2 + - - 85.23 + - 44499.9 + - - 81.25 + - 34629.0 + - - 80.27 + - 26765.5 + - - 78.69 + - 18546.2 + - - 77.70 + - 13517.3 + - - 77.02 + - 9729.6 + - - 75.32 + - 5800.3 + - - 71.93 + - 3120.3 + +CLX_2: + Hostref: + Name: Reference (Intel 2S Xeon Platinum 8280) + Summary: + CPU Speed: "548644 ops/s" + Single-core Turbo: "3928 MHz" + All-core Turbo: "3296 MHz" + Turbo TDP: "415.93 Watts" + Memory Peak Bandwidth: "223.9 GB/s" + Memory Minimum Latency: "72 ns" + Memory NUMA Bandwidth: + - - 111839.9 # 0, 0 + - 34387.8 # 0, 1 + - - 34380.3 # 1, 0 + - 112027.7 # 1, 1 + Memory Bandwidth and Latency: + - - 268.27 + - 223908.5 + - - 268.64 + - 223891.8 + - - 268.53 + - 223828.3 + - - 267.58 + - 224106.5 + - - 265.87 + - 224268.6 + - - 264.55 + - 224256.8 + - - 162.33 + - 215762.2 + - - 104.92 + - 150466.7 + - - 95.38 + - 113869.3 + - - 91.80 + - 91630.5 + - - 88.08 + - 65975.0 + - - 85.60 + - 46608.5 + - - 83.85 + - 36100.3 + - - 81.31 + - 27844.9 + - - 79.09 + - 19233.6 + - - 77.85 + - 13992.8 + - - 76.77 + - 10059.2 + - - 74.92 + - 5984.2 + - - 72.38 + - 3193.8 + +ICX_2: + Hostref: + Name: Reference (Intel 2S Xeon Platinum 8380) + Summary: + CPU Speed: "933644 ops/s" + Single-core Turbo: "3334 MHz" + All-core Turbo: "2950 MHz" + Turbo TDP: "552.00 Watts" + Idle TDP: "175.38 Watts" + Memory Peak Bandwidth: "350.7 GB/s" + Memory Minimum Latency: "70 ns" + Memory NUMA Bandwidth: + - - 175610.3 # 0, 0 + - 55579.7 # 0, 1 + - - 55575.2 # 1, 0 + - 175656.7 # 1, 1 + Memory Bandwidth and Latency: + - - 290.48 + - 350735.9 + - - 290.30 + - 350805.9 + - - 289.14 + - 350576.1 + - - 284.21 + - 350422.2 + - - 259.29 + - 348951.0 + - - 254.07 + - 346280.0 + - - 120.78 + - 247546.4 + - - 102.95 + - 186700.8 + - - 97.06 + - 144059.6 + - - 93.23 + - 116781.5 + - - 90.17 + - 84848.4 + - - 85.63 + - 60229.3 + - - 81.36 + - 46738.1 + - - 80.28 + - 36037.0 + - - 79.26 + - 24840.0 + - - 78.57 + - 18011.3 + - - 76.28 + - 12895.6 + - - 73.85 + - 7572.9 + - - 70.75 + - 3925.1 + +SPR_2: + Hostref: + Name: Reference (Intel 2S Xeon Platinum 8480+) + Summary: + CPU Speed: "1678712 ops/s" + Single-core Turbo: "3776 MHz" + All-core Turbo: "2996 MHz" + Turbo TDP: "698.35 Watts" + Idle TDP: "349.21 Watts" + Memory Peak Bandwidth: "524.6 GB/s" + Memory Minimum Latency: "111.8 ns" + Memory NUMA Bandwidth: + - - 262558.6 # 0, 0 + - 126800.3 # 0, 1 + - - 126781.7 # 1, 0 + - 263070.9 # 1, 1 + Memory Bandwidth and Latency: + - - 267.75 + - 524605.9 + - - 268.16 + - 524342.0 + - - 265.58 + - 523682.5 + - - 247.12 + - 521625.5 + - - 237.96 + - 523357.2 + - - 137.62 + - 313650.5 + - - 121.98 + - 152560.4 + - - 118.63 + - 103609.7 + - - 116.84 + - 77599.0 + - - 116.04 + - 62391.6 + - - 115.08 + - 44766.1 + - - 114.76 + - 31628.2 + - - 114.33 + - 24443.1 + - - 113.74 + - 18683.9 + - - 113.45 + - 12907.2 + - - 113.07 + - 9346.8 + - - 113.33 + - 6864.3 + - - 113.09 + - 4093.3 + - - 111.80 + - 2200.9 + +SPR_1: + Hostref: + Name: Reference (Intel 1S Xeon Platinum 8480+) + Summary: + All-core Turbo: "2999 MHz" + CPU Speed: "845743 ops/s" + Idle TDP: "163.79 Watts" + Memory Minimum Latency: "112.2 ns" + Memory Peak Bandwidth: "264.0 GB/s" + Single-core Turbo: "3783 MHz" + Turbo TDP: "334.68 Watts" + Memory NUMA Bandwidth: + - - 263646.3 # 0, 0 + Memory Bandwidth and Latency: + - - 272.24 + - 263696.7 + - - 272.30 + - 263816.2 + - - 269.44 + - 263469.2 + - - 254.81 + - 262244.5 + - - 252.53 + - 263984.0 + - - 135.86 + - 155828.1 + - - 119.94 + - 75906.8 + - - 117.47 + - 51345.4 + - - 116.02 + - 38755.0 + - - 115.51 + - 31200.3 + - - 114.71 + - 22499.1 + - - 114.26 + - 15980.3 + - - 113.88 + - 12418.5 + - - 113.59 + - 9584.2 + - - 113.45 + - 6658.4 + - - 113.32 + - 4937.0 + - - 113.00 + - 3697.8 + - - 113.12 + - 2292.5 + - - 112.24 + - 1383.6 diff --git a/config/report.html.tmpl b/config/report.html.tmpl new file mode 100644 index 0000000..2b14794 --- /dev/null +++ b/config/report.html.tmpl @@ -0,0 +1,467 @@ + + + + + + + System Health Inspector + + + + + + + + + + + + + + + +
+

Intel® System Health Inspector

+
+ +
+
+ + + + + + +
+ + {{$reportGen := .}} + {{range .ConfigurationReport.Tables}} +
+ {{$reportGen.RenderDataTable . $reportGen.ConfigurationReportReferenceData}} +
+ {{end}} +

svr-info version: {{.Version}}

+
+
+
+
+
+

Use the "-benchmark all" option to collect all micro-benchmarking data. See "-help" for finer control.

+ {{$reportGen := .}} + {{range .BenchmarkReport.Tables}} +
+ {{$reportGen.RenderDataTable . $reportGen.BenchmarkReportReferenceData}} +
+ {{end}} +

 

+
+
+
+
+

Use the "-profile all" option to collect all system profiling data. See "-help" for finer control.

+ {{$reportGen := .}} + {{range .ProfileReport.Tables}} +
+ {{$reportGen.RenderDataTable . $reportGen.ProfileReportReferenceData}} +
+ {{end}} +

 

+
+
+
+
+

Use the "-analyze all" option to collect all analysis data. See "-help" for finer control.

+ Upload your Intel® System Health Inspector JSON-formatted reports to Intel® Optimization Hub for deeper analysis. + {{$reportGen := .}} + {{range .AnalyzeReport.Tables}} +
+ {{$reportGen.RenderDataTable . $reportGen.AnalyzeReportReferenceData}} +
+ {{end}} +

 

+
+
+
+
+

Insights are derived from data collected by Intel® System Health Inspector. They are provided for consideration but may not always be relevant.

+ {{$reportGen := .}} + {{range .InsightsReport.Tables}} +
+ {{$reportGen.RenderDataTable . $reportGen.InsightsReportReferenceData}} +
+ {{end}} +

 

+
+
+ + + + + \ No newline at end of file diff --git a/documentation/ServerInfoUserGuide.pdf b/documentation/ServerInfoUserGuide.pdf new file mode 100644 index 0000000..3296539 Binary files /dev/null and b/documentation/ServerInfoUserGuide.pdf differ diff --git a/documentation/source/ServerInfoUserGuide.docx b/documentation/source/ServerInfoUserGuide.docx new file mode 100644 index 0000000..b60ebca Binary files /dev/null and b/documentation/source/ServerInfoUserGuide.docx differ diff --git a/go.work b/go.work new file mode 100644 index 0000000..be88d92 --- /dev/null +++ b/go.work @@ -0,0 +1,7 @@ +go 1.19 + +use ./src/collector +use ./src/msrbusy +use ./src/orchestrator +use ./src/rdmsr +use ./src/reporter \ No newline at end of file diff --git a/go.work.sum b/go.work.sum new file mode 100644 index 0000000..f35dbe0 --- /dev/null +++ b/go.work.sum @@ -0,0 +1,21 @@ +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/xuri/excelize/v2 v2.7.0 h1:Hri/czwyRCW6f6zrCDWXcXKshlq4xAZNpNOpdfnFhEw= +github.com/xuri/excelize/v2 v2.7.0/go.mod h1:ebKlRoS+rGyLMyUx3ErBECXs/HNYqyj+PbkkKRK5vSI= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/image v0.0.0-20220902085622-e7cb96979f69/go.mod h1:doUCurBvlfPMKfmIpRIywoHmhN3VyhnoFDbvIEWF4hY= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/Makefile b/src/Makefile new file mode 100644 index 0000000..ea34aa5 --- /dev/null +++ b/src/Makefile @@ -0,0 +1,241 @@ +#!make +# +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: MIT +# + +VERSION ?= dev + +default: apps +.PHONY: default all linux-source collector reporter orchestrator rdmsr msrbusy reset clean tools libs async-profiler burn calcfreq cpuid dmidecode ethtool fio flamegraph ipmitool lshw mlc perf spectre-meltdown-checker sshpass stress-ng sysstat turbostat oss-source + +package_tests: + cd pkg/commandfile && go test -v -vet=all . + cd pkg/core && go test -v -vet=all . + cd pkg/cpu && go test -v -vet=all . + # these tests require access to MSRs which we don't have on WSL2 and may not have on build machine -- cd pkg/msr && go test -v -vet=all . + cd pkg/progress && go test -v -vet=all . + cd pkg/target && go test -v -vet=all . + +orchestrator: package_tests + cd orchestrator && go test -v -vet=all . + cd orchestrator && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags '-s -w -X main.gVersion=$(VERSION)' -o orchestrator + +collector: + cd collector && go test -v -vet=all . + cd collector && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags '-s -w -X main.gVersion=$(VERSION)' -o collector + cd collector && CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags '-s -w -X main.gVersion=$(VERSION)' -o collector_arm64 + +reporter: + cd reporter && go test -v -vet=all . + cd reporter && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags '-s -w -X main.gVersion=$(VERSION)' -o reporter + +rdmsr: + cd rdmsr && go test -v -vet=all . + cd rdmsr && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags '-s -w -X main.gVersion=$(VERSION)' -o rdmsr + +msrbusy: + cd msrbusy && go test -v -vet=all . + cd msrbusy && CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags '-s -w -X main.gVersion=$(VERSION)' -o msrbusy + +async-profiler: +ifeq ("$(wildcard async-profiler)","") +ifeq ("$(wildcard async-profiler-2.9-linux-x64.tar.gz)","") + wget https://github.com/jvm-profiling-tools/async-profiler/releases/download/v2.9/async-profiler-2.9-linux-x64.tar.gz +endif + tar -xf async-profiler-2.9-linux-x64.tar.gz && mv async-profiler-2.9-linux-x64 async-profiler +endif + +burn: +ifeq ("$(wildcard burn)","") + git clone https://github.com/Granulate/burn.git + cd burn && git checkout v1.0.1g2 +endif + cd burn && GOWORK=off CGO_ENABLED=0 go build + +calcfreq: + cd calcfreq && VERSION=$(VERSION) make + +cpuid: +ifeq ("$(wildcard cpuid)","") +ifeq ("$(wildcard cpuid-20221003.src.tar.gz)","") + wget http://www.etallen.com/cpuid/cpuid-20221003.src.tar.gz +endif + tar -xf cpuid-20221003.src.tar.gz && mv cpuid-20221003/ cpuid/ +endif + # gcc 4.8 doesn't support -Wimplicit-fallthrough option + cd cpuid && sed -i s/"-Wimplicit-fallthrough"/""/ Makefile + cd cpuid && make + +dmidecode: +ifeq ("$(wildcard dmidecode)","") + git clone https://github.com/mirror/dmidecode.git +else + cd dmidecode && git checkout master && git pull +endif + cd dmidecode && git checkout dmidecode-3-4 + cd dmidecode && make + +ethtool: +ifeq ("$(wildcard ethtool)","") + git clone https://git.kernel.org/pub/scm/network/ethtool/ethtool.git +else + cd ethtool && git checkout master && git pull +endif + cd ethtool && git checkout v5.15 +ifeq ("$(wildcard ethtool/Makefile)","") + cd ethtool && ./autogen.sh && ./configure enable_netlink=no +endif + cd ethtool && make + +fio: +ifeq ("$(wildcard fio)","") + git clone https://github.com/axboe/fio.git +else + cd fio && git checkout master && git pull +endif + cd fio && git checkout fio-3.28 +ifeq ("$(wildcard fio/config.log)","") + cd fio && ./configure --build-static --disable-native +endif + cd fio && make + +flamegraph: +ifeq ("$(wildcard flamegraph)","") + git clone https://github.com/brendangregg/FlameGraph.git flamegraph + # small modification to script to include module name in output + cd flamegraph && sed -i '382 a \\t\t\t\t$$func = \$$func."'" "'".\$$mod;\t# add module name' stackcollapse-perf.pl +endif + +ipmitool: +ifeq ("$(wildcard ipmitool)","") + git clone https://github.com/ipmitool/ipmitool.git +endif + cd ipmitool && git checkout IPMITOOL_1_8_19 +ifeq ("$(wildcard ipmitool/Makefile)","") + # hack to get around static build problem - don't check for libreadline + sed -i "s#x\$$xenable_ipmishell#xno#" ipmitool/configure.ac + cd ipmitool && ./bootstrap && LDFLAGS=-static ./configure +endif + cd ipmitool && make + cd ipmitool/src && ../libtool --silent --tag=CC --mode=link gcc -fno-strict-aliasing -Wreturn-type -all-static -o ipmitool.static ipmitool.o ipmishell.o ../lib/libipmitool.la plugins/libintf.la + +lshw: +ifeq ("$(wildcard lshw)","") + git clone https://github.com/lyonel/lshw.git +else + cd lshw && git checkout master && git pull +endif + cd lshw && git checkout B.02.19 + cd lshw/src && make static + +mlc: +ifeq ("$(wildcard mlc)","") + -if [ ${CI} = true ]; then\ + git clone https://${CELINE_ACCESS_TOKEN}@github.com/intel-innersource/applications.benchmarking.cpu-micros.mlc.git mlc; \ + else\ + git clone git@github.com:intel-innersource/applications.benchmarking.cpu-micros.mlc.git mlc; \ + fi +else + cd mlc && git checkout master && git pull +endif + -cd mlc && git checkout v3.10 + -cd mlc && make version && STATIC="-static" make + +perf: linux-source + cd linux/tools/perf && make LDFLAGS=-static + +spectre-meltdown-checker: + mkdir -p spectre-meltdown-checker + rm -f spectre-meltdown-checker/spectre-meltdown-checker.sh + # get script from this PR https://github.com/speed47/spectre-meltdown-checker/pull/418 + cd spectre-meltdown-checker && wget https://raw.githubusercontent.com/speed47/spectre-meltdown-checker/3c4f5e4d8e0fc6fc828c5bc164f20372eb2537ac/spectre-meltdown-checker.sh + chmod +x spectre-meltdown-checker/spectre-meltdown-checker.sh + +sshpass: +ifeq ("$(wildcard sshpass)","") + wget https://cytranet.dl.sourceforge.net/project/sshpass/sshpass/1.09/sshpass-1.09.tar.gz + tar -xf sshpass-1.09.tar.gz + mv sshpass-1.09 sshpass + rm sshpass-1.09.tar.gz + cd sshpass && ./configure +endif + cd sshpass && make + +stress-ng: +ifeq ("$(wildcard stress-ng)","") + git clone https://github.com/ColinIanKing/stress-ng.git +else + cd stress-ng && git checkout master && git pull +endif + cd stress-ng && git checkout V0.13.08 + cd stress-ng && STATIC=1 make + +sysstat: +ifeq ("$(wildcard sysstat)","") + git clone https://github.com/sysstat/sysstat.git +else + cd sysstat && git checkout master && git pull +endif +ifeq ("$(wildcard sysstat/Makefile)","") + cd sysstat && ./configure +endif + cd sysstat && make + +linux-source: +ifeq ("$(wildcard linux)","") +ifeq ("$(wildcard linux-5.19.16.tar.xz)","") + wget https://cdn.kernel.org/pub/linux/kernel/v5.x/linux-5.19.16.tar.xz +endif + tar -xf linux-5.19.16.tar.xz && mv linux-5.19.16/ linux/ +endif + +turbostat: linux-source + cd linux/tools/power/x86/turbostat && make + +apps: collector reporter orchestrator +tools: rdmsr msrbusy async-profiler burn calcfreq cpuid dmidecode ethtool fio flamegraph ipmitool lshw mlc perf spectre-meltdown-checker sshpass stress-ng sysstat turbostat + +clean: + rm -rf async-profiler burn cpuid dmidecode ethtool fio flamegraph ipmitool linux lshw mlc spectre-meltdown-checker sshpass stress-ng sysstat oss_source.* linux-*.xz cpuid-*.gz glibc-*.bz2 libcrypt*.gz zlib.*.gz + rm -f calcfreq/calcfreq + rm -f collector/collector + rm -f collector/collector_arm64 + rm -f collector/collector.exe + rm -f reporter/reporter + rm -f reporter/reporter_arm64 + rm -f reporter/reporter.exe + rm -f orchestrator/orchestrator + rm -f orchestrator/orchestrator_arm64 + rm -f orchestrator/orchestrator.exe + rm -f rdmsr/rdmsr + rm -f msrbusy/msrbusy + +reset: + cd burn && git clean -fdx && git reset --hard + cd cpuid && make clean + cd dmidecode && git clean -fdx && git reset --hard + cd ethtool && git clean -fdx && git reset --hard + cd fio && git clean -fdx && git reset --hard + cd flamegraph && git clean -fdx && git reset --hard + cd ipmitool && git clean -fdx && git reset --hard + cd lshw && git clean -fdx && git reset --hard + -cd mlc && git clean -fdx && git reset --hard + cd linux/tools/perf && make clean + cd sshpass && make clean + cd stress-ng && git clean -fdx && git reset --hard + cd sysstat && git clean -fdx && git reset --hard + cd linux/tools/power/x86/turbostat && make clean + +# not used in build but required in oss archive file because some of the tools are statically linked +glibc-2.19.tar.bz2: + wget http://ftp.gnu.org/gnu/glibc/glibc-2.19.tar.bz2 +zlib.tar.gz: + wget https://github.com/madler/zlib/archive/refs/heads/master.tar.gz -O zlib.tar.gz +libcrypt.tar.gz: + wget https://github.com/gpg/libgcrypt/archive/refs/heads/master.tar.gz -O libcrypt.tar.gz +libs: glibc-2.19.tar.bz2 zlib.tar.gz libcrypt.tar.gz + +oss-source: reset libs + tar --exclude-vcs -czf oss_source.tgz async-profiler/ dmidecode/ ethtool/ fio/ flamegraph/ ipmitool/ linux/ lshw/ spectre-meltdown-checker/ sshpass/ sysstat/ stress-ng/ glibc-2.19.tar.bz2 zlib.tar.gz libcrypt.tar.gz + md5sum oss_source.tgz > oss_source.tgz.md5 diff --git a/src/calcfreq/Makefile b/src/calcfreq/Makefile new file mode 100644 index 0000000..519a275 --- /dev/null +++ b/src/calcfreq/Makefile @@ -0,0 +1,15 @@ +#!make +# +# Copyright (C) 2023 Intel Corporation +# SPDX-License-Identifier: MIT +# + +VERSION ?= dev + +calcfreq : calcfreq.c + gcc -D VERSION=\"$(VERSION)\" calcfreq.c -lpthread -o calcfreq -static + +all : calcfreq + +clean : + rm -f calcfreq \ No newline at end of file diff --git a/src/calcfreq/README.md b/src/calcfreq/README.md new file mode 100644 index 0000000..136c021 --- /dev/null +++ b/src/calcfreq/README.md @@ -0,0 +1,7 @@ +# Calcfreq + +Calcfreq is a micro utility that can individually stress cores and figure out the actual running frequency including P0 and P1n. + +Since the turbo algorithm uses the Turbo core ratios to judge what frequency the cores can run at based on how many such cores are active and at what TDP the CPU is running, its important to know if the system is adhering to this expected spec. + +Many times, BIOS knobs and thermals can throw this away resulting in lower frequency and thereby performance. diff --git a/src/calcfreq/calcfreq.c b/src/calcfreq/calcfreq.c new file mode 100644 index 0000000..fdce2da --- /dev/null +++ b/src/calcfreq/calcfreq.c @@ -0,0 +1,607 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT +*/ +#define _GNU_SOURCE + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef unsigned long long int UINT64; +typedef long long int __int64; + +#define IA32_APERF_MSR 0xe8 + +struct _p { + + __int64 total_time; + __int64 total_aperf_cycles; + __int64 iterations; + int id; + int id2; + __int64 junk[5]; +} param[128]; + +pthread_t td[1024]; + +__int64 iterations = 100LL*1000000LL; // 100 million iteration as default +UINT64 len=0; +UINT64 num_cpus=0; +UINT64 start_cpu=1; +UINT64 freq; +double NsecClk; +__int64 cycles_expected, actual_cycles, running_freq, actual_aperf_cycles; +int cpu_assignment=0; +int use_aperf=0; + +int BindToCpu(int cpu_num); +int check_whether_ia32_aperf_is_accessible(); +UINT64 get_msr_value(int cpu, unsigned long msrNum); +UINT64 read_msr(char * msrDevPName, unsigned long msrNum, UINT64 *msrValueP); +void NopLoop(__int64 iter); +void Calibrate(UINT64 *ClksPerSec); + +static inline unsigned long rdtsc () +{ + unsigned long var; + unsigned int hi, lo; + asm volatile ("lfence"); + asm volatile ("rdtsc" : "=a" (lo), "=d" (hi)); + var = ((unsigned long long int) hi << 32) | lo; + + return var; +} + +void BusyLoop() +{ + __int64 start, end; + + // run for about 200 milliseconds assuming a speed of 2GHz - need not be precise + // this is done so the core has enough time to ramp up the frequency + + start = rdtsc(); + while (1) { + end = rdtsc(); + if ((end - start) > 400000000LL) { + break; + } + } + +} + +void execNopLoop(void* p) +{ + char *buf; + int id, blk_start,i,j; + __int64 start, end, delta, start_aperf, end_aperf; + struct _p *pp; + + pp = (struct _p *)p; // cpu# + BindToCpu(pp->id); // pin to that cpu + pp->total_time = 0; + + // crank up the frequency to make sure it reaches the max limit + BusyLoop(); + + if (use_aperf) { + // just do one loop + start = rdtsc(); + start_aperf = get_msr_value(pp->id, IA32_APERF_MSR); + NopLoop((__int64)iterations); + end_aperf = get_msr_value(pp->id, IA32_APERF_MSR); + end = rdtsc(); + pp->total_time = end - start; + pp->total_aperf_cycles = end_aperf - start_aperf; + + } + else { + // repeat the measurement for 3 times and take the best value + for (i=0; i < 3; i++) { + start = rdtsc(); + NopLoop((__int64)iterations); + end = rdtsc(); + delta = end - start; + if (delta > pp->total_time) pp->total_time = delta; + } + } + +} + +int get_retire_per_cycle(int family, int model, int stepping) { + /* only Intel */ + if (family == 6 /*Intel*/) { + /* Note: this approach doesn't work for SPR, 5 is too low, six is too high, so using APERF. */ + if (model == 106 /*ICX*/ || model == 108 /*ICX*/) { + return 5; + } + if (model == 63 /*HSX*/ || model == 79 /*BDX*/ || model == 86 /*BDX2*/ || model == 85 /*SKX,CLX,CPX*/) { + return 4; + } + } + return -1; +} + +void get_arch(int *family, int *model, int *stepping) { + FILE *fp = fopen("/proc/cpuinfo", "r"); + assert(fp != NULL); + size_t n = 0; + char *line = NULL; + int info_count=0; + while (getline(&line, &n, fp) > 0) { + if (strstr(line, "model\t")) { + sscanf(line,"model : %d",model); + info_count++; + } + if (strstr(line, "cpu family\t")) { + sscanf(line,"cpu family : %d",family); + info_count++; + } + if (strstr(line, "stepping\t")) { + sscanf(line,"stepping : %d",stepping); + info_count++; + } + if(info_count==3) { + //printf("model=%d, family=%d, stepping=%d\n",*model,*family,*stepping); + break; + } + } + free(line); + fclose(fp); +} + +void Version() +{ + fprintf(stderr, "calcfreq %s\n", VERSION); +} + +void Usage(const char* error) +{ + if (error) { + fprintf(stderr, "%s\n\n", error); + } + + fprintf(stderr, " -t : number of physical cores to run this on...optional, defaults to all\n"); + fprintf(stderr, " -c : core count at which to start...optional, defaults to 1\n"); + fprintf(stderr, " -x : iterations in millions...optional, defaults to 100M\n"); + fprintf(stderr, " -a : set to 1 if HT threads get consecutive cpu #s...optional, defaults to 0 (alternating cpu #s)\n"); + fprintf(stderr, " -h : display this usage information\n"); + fprintf(stderr, " -v : display calcfreq version\n"); + fprintf(stderr, "\nExamples:\n"); + fprintf(stderr, " ./calcfreq\n"); + fprintf(stderr, " ./calcfreq -t4 -c2 -x10 -a1\n"); + + if (error) { + exit(1); + } + exit(0); + +} + +int main(int argc, char **argv) +{ + for (int i = 1; (i < argc && argv[i][0] == '-'); i++) { + switch (argv[i][1]) { + case 'h': { + /* Help - print usage and exit */ + Usage((char*) 0); + } + + case 'v': { + Version(); + exit(0); + } + + case 't': { + num_cpus = atoi(&argv[i][2]); + break; + } + + case 'a': { + cpu_assignment = atoi(&argv[i][2]); + break; + } + + case 'x': { + iterations = (UINT64)(atoi(&argv[i][2]))*1000000LL; + break; + } + + case 'c': { + start_cpu = (atoi(&argv[i][2])); + if (start_cpu < 1) { + start_cpu = 1; + } + break; + } + + default: { + fprintf(stderr, "Invalid Argument:%s\n", &argv[i][0]); + Usage((char*) 0); + } + } + } + + // Detect architecture to determine cycles_expected + int family, model, stepping; + get_arch(&family, &model, &stepping); + if (model == 143 /*SPR*/) { + use_aperf = check_whether_ia32_aperf_is_accessible(); + if (!use_aperf) { + fprintf(stderr, "Failed to read APERF MSR. Cannot proceed on SPR.\n"); + return 1; + } + } + int retiring = get_retire_per_cycle(family, model, stepping); + if (retiring == -1 && !use_aperf) { + fprintf(stderr, "Unsupported architecture: Family %d, Model %d, Stepping %d\n", family, model, stepping); + return 1; + } + // we are executing 200 instructions and in each cycle we can retire 4 or 5 based on architecture + cycles_expected = iterations * 200 / retiring; + + // ramp up the processor frequency and measure the TSC frequency + BusyLoop(); + Calibrate(&freq); // Get the P1 freq + printf("P1 freq = %lld MHz\n",freq/1000000); + + // measure specified cpu counts + for (int idx = start_cpu; idx <= num_cpus; idx++) { + __int64 tt, tt_aperf; + for (int i=0, j=0; i < idx; i++, j+=2) { + if (cpu_assignment == 1) { + // CPU#s are assigned consecutively. i.e cpu0&1 will map to the same physical core + param[i].id = j; + } + else { + param[i].id = i; + } + pthread_create(&td[i], NULL, (void*)execNopLoop, (void*)¶m[i]); + } + tt=0; + tt_aperf=0; + for (int i=0; i < idx; i++) { + pthread_join(td[i], NULL); + tt += param[i].total_time; + tt_aperf += param[i].total_aperf_cycles; + } + actual_cycles = tt / idx; + if (use_aperf) { + actual_aperf_cycles = tt_aperf / idx; + running_freq = (double)actual_aperf_cycles/((double)(actual_cycles)*NsecClk/(double)1000000000LL), + printf("%d-core turbo\t%lld MHz\n", idx, running_freq/1000000); + } + else { + running_freq = (__int64) ((double) cycles_expected * (double) freq / (double) actual_cycles); + printf("%d-core turbo\t%lld MHz\n", idx, running_freq/1000000); + } + + } + return 0; +} + +// pin to a specific cpu +int BindToCpu(int cpu_num) +{ + long status; + cpu_set_t cs; + + CPU_ZERO (&cs); + CPU_SET (cpu_num, &cs); + status = sched_setaffinity (0, sizeof(cs), &cs); + if (status < 0) { + printf ("Error: unable to bind thread to core %d\n", cpu_num); + exit(1); + } + return 1; +} + +// 200 instuctions are executed per iteration and in each cycle we can retire 4 of these instructions +void NopLoop(__int64 iter) +{ + asm ( + "xor %%r9, %%r9\n\t" + "mov %0,%%r8\n\t" + "loop1:\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "xor %%rax, %%rax\n\t" + "inc %%r9\n\t" + "cmp %%r8, %%r9\n\t" + "jb loop1\n\t" + + ::"r"(iter)); +} + +static inline unsigned long long int GetTickCount() +{ //Return ns counts + struct timeval tp; + gettimeofday(&tp,NULL); + return tp.tv_sec*1000+tp.tv_usec/1000; +} + +// Get P1 freq +void Calibrate(UINT64 *ClksPerSec) +{ + UINT64 start; + UINT64 end; + UINT64 diff; + + unsigned long long int starttick, endtick; + unsigned long long int tickdiff; + + endtick = GetTickCount(); + + while(endtick == (starttick=GetTickCount()) ); + + asm("mfence"); + start = rdtsc(); + asm("mfence"); + while((endtick=GetTickCount()) < (starttick + 500)); + asm("mfence"); + end = rdtsc(); + asm("mfence"); + // printf("start tick=%llu, end tick=%llu\n",starttick,endtick); + + diff = end - start; + tickdiff = endtick - starttick; + // printf("end=%llu,start=%llu,diff=%llu\n",end,start,diff); + *ClksPerSec = ( diff * (UINT64)1000 )/ (unsigned long long int)(tickdiff); + NsecClk = (double)1000000000 / (double)(__int64)*ClksPerSec; +} + +UINT64 read_msr(char * msrDevPName, unsigned long msrNum, UINT64 *msrValueP) +{ + int fh; + off_t fpos; + ssize_t countBy; + + if ((fh= open(msrDevPName,O_RDWR))<0) { + return 0; + } + if ((fpos= lseek(fh,msrNum,SEEK_SET)),0) { + return 0; + } + if ((countBy= read(fh,msrValueP,sizeof(UINT64)))<0) { + close(fh); + return 0; + } + else if (countBy!=sizeof(UINT64)) { + close(fh); + return 0; + } + close(fh); + return 1; +} + +int check_whether_ia32_aperf_is_accessible() +{ + char msrDevPName[1024]; + UINT64 msrValue; + int cpu=0; + + snprintf(msrDevPName,sizeof(msrDevPName)-1,"/dev/cpu/%d/msr",cpu); + if (read_msr(msrDevPName, IA32_APERF_MSR, &msrValue) == 0) { + //fprintf(stderr,"\n** Unable to read IA32_APERF MSR. So, frequency will be estimated\n"); + return 0; + } + return 1; +} + +UINT64 get_msr_value(int cpu, unsigned long msrNum) +{ + char msrDevPName[1024]; + UINT64 msrValue; + + snprintf(msrDevPName,sizeof(msrDevPName)-1,"/dev/cpu/%d/msr",cpu); + if (read_msr(msrDevPName, msrNum, &msrValue) == 0) { + fprintf(stderr, "failed to read msr %lx\n", msrNum); + exit(1); + } + return msrValue; +} + diff --git a/src/collector/README.md b/src/collector/README.md new file mode 100644 index 0000000..ff46fc7 --- /dev/null +++ b/src/collector/README.md @@ -0,0 +1,67 @@ +# Collector +This program reads and executes commands (bash script) defined in YAML format and outputs results in JSON format. + +Note: collector.log and collector.pid file will also be created at runtime in the same directory as the binary. + +## Running +$ [SUDO_PASSWORD=*password*] ./collector < run_these_commands.yaml > command_results.json + +## Input +Collector's input format (YAML) description. + +Root level keys: +- Arguments +- Commands + +Arguments are in key:value format. Valid keys are: +- **name**: typically the host name, will be the top-level key in the JSON output +- **bin_path**: path to dependencies of the commands, will be inserted at the beginning of the PATH environment variable +- **command_timeout**: maximum time to wait for any one command + +Commands are list items. + +Required command attributes: +- **label**: unique name for the command +- **command**: bash command or script + +Optional command attributes: +- **superuser**: bool indicates need for elevated privilege (default: false) +- **run**: bool indicates if command will be run (default: true) +- **modprobe**: comma separated list of kernel modules required to run command +- **parallel**: bool indicates if command can be run in parallel with other commands (default: false) + +Example: +```yaml +arguments: + name: my_server + bin_path: /home/me/my_bin_files + command_timeout: 300 +commands: + - label: date + command: date -u + - label: hardware info + command: lshw -businfo + superuser: true + - label: cpuid -1 + command: cpuid -1 + modprobe: cpuid +``` + +## Output +Collector's output format: +```JSON +{ + hostname { + [ + {'label': "command label", + 'command': "full command", + 'superuser': "true" or "false", + 'stdout': "command output", + 'stderr': "", + 'exitstatus': "0" + }, + ... + ] + } +} +``` diff --git a/src/collector/collector_linux.go b/src/collector/collector_linux.go new file mode 100644 index 0000000..c788322 --- /dev/null +++ b/src/collector/collector_linux.go @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + + "intel.com/svr-info/pkg/target" +) + +func getUserPath() string { + // get user's PATH environment variable, verify that it only contains paths (mitigate risk raised by Checkmarx) + var verifiedPaths []string + pathEnv := os.Getenv("PATH") + pathEnvPaths := strings.Split(pathEnv, ":") + for _, p := range pathEnvPaths { + files, err := filepath.Glob(p) + // Goal is to filter out any non path strings + // Glob will throw an error on pattern mismatch and return no files if no files + if err == nil && len(files) > 0 { + verifiedPaths = append(verifiedPaths, p) + } + } + return strings.Join(verifiedPaths, ":") +} + +func runCommand(label string, command string, superuser bool, superuserPassword string, binPath string, timeout int) (stdout string, stderr string, exitCode int, err error) { + // explicitly set PATH by pre-pending to command + cmdWithPath := command + if binPath != "" { + path := getUserPath() + newPath := fmt.Sprintf("%s%c%s", binPath, os.PathListSeparator, path) + cmdWithPath = fmt.Sprintf("PATH=\"%s\" %s", newPath, cmdWithPath) + } + if superuser { + return runSuperUserCommand(cmdWithPath, superuserPassword, timeout) + } + return runRegularUserCommand(cmdWithPath, timeout) +} + +func runRegularUserCommand(command string, timeout int) (stdout string, stderr string, exitCode int, err error) { + log.Printf("runRegularUserCommand Start: %s", command) + defer log.Printf("runRegularUserCommand Finish: %s", command) + return target.RunLocalCommandWithTimeout(exec.Command("bash", "-c", command), timeout) +} + +func runSuperUserCommand(command string, sudoPassword string, timeout int) (stdout string, stderr string, exitCode int, err error) { + // if running as root/super-user, run the command as is + if os.Geteuid() == 0 { + return runRegularUserCommand(command, timeout) + } + log.Printf("runSuperUserCommand Start: %s", command) + defer log.Printf("runSuperUserCommand Finish: %s", command) + // if sudo password was provided, send it to sudo via stdin + if sudoPassword != "" { + cmd := exec.Command("sudo", "-kSE", "bash", "-c", command) + pwdNewline := fmt.Sprintf("%s\n", sudoPassword) + return target.RunLocalCommandWithInputWithTimeout(cmd, pwdNewline, timeout) + } + // if password is not required for sudo, simply prepend 'sudo' + cmd := exec.Command("sudo", "-kn", "ls") + _, _, _, err = target.RunLocalCommandWithTimeout(cmd, timeout) + if err == nil { + cmd := exec.Command("sudo", "-E", "bash", "-c", command) + return target.RunLocalCommandWithTimeout(cmd, timeout) + } + // no other options, fail + err = fmt.Errorf("no option available to run command as super-user using sudo") + return +} + +func installMods(mods string, sudoPassword string) (installedMods []string) { + if len(mods) > 0 { + modList := strings.Split(mods, ",") + for _, mod := range modList { + log.Printf("Attempting to install kernel module: %s", mod) + _, _, _, err := runSuperUserCommand(fmt.Sprintf("modprobe --first-time %s > /dev/null 2>&1", mod), sudoPassword, 0) + if err == nil { + log.Printf("Installed kernel module %s", mod) + installedMods = append(installedMods, mod) + } + } + } + return installedMods +} + +func uninstallMods(modList []string, sudoPassword string) (err error) { + for _, mod := range modList { + log.Printf("Uninstalling kernel module %s", mod) + _, _, _, err = runSuperUserCommand(fmt.Sprintf("modprobe -r %s", mod), sudoPassword, 0) + if err != nil { + log.Printf("Error: %v", err) + return + } + } + return +} diff --git a/src/collector/collector_windows.go b/src/collector/collector_windows.go new file mode 100644 index 0000000..2cffc4d --- /dev/null +++ b/src/collector/collector_windows.go @@ -0,0 +1,45 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "log" + "os/exec" + "strings" + + "intel.com/svr-info/pkg/target" +) + +func runCommand(label string, command string, superuser bool, sudoPassword string, binPath string, timeout int) (stdout string, stderr string, exitCode int, err error) { + if superuser { + return runSuperUserCommand(command, sudoPassword, timeout) + } + return runRegularUserCommand(command, timeout) +} + +func runRegularUserCommand(command string, timeout int) (stdout string, stderr string, exitCode int, err error) { + log.Printf("runRegularUserCommand Start: %s", command) + defer log.Printf("runRegularUserCommand Finish: %s", command) + cmdList := strings.Split(command, " ") + var cmd *exec.Cmd + if len(cmdList) > 1 { + cmd = exec.Command(cmdList[0], cmdList[1:]...) + } else { + cmd = exec.Command(command) + } + return target.RunLocalCommand(cmd) +} + +func runSuperUserCommand(command string, sudoPassword string, timeout int) (stdout string, stderr string, exitCode int, err error) { + return runRegularUserCommand(command, timeout) +} + +func installMods(mods string, sudoPassword string) (installedMods []string) { + return +} + +func uninstallMods(modList []string, sudoPassword string) (err error) { + return +} diff --git a/src/collector/go.mod b/src/collector/go.mod new file mode 100644 index 0000000..a45987c --- /dev/null +++ b/src/collector/go.mod @@ -0,0 +1,24 @@ +module intel.com/svr-info/collector/v2 + +go 1.19 + +require ( + gopkg.in/yaml.v2 v2.4.0 + intel.com/svr-info/pkg/commandfile v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/core v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/target v0.0.0-00010101000000-000000000000 +) + +require github.com/creasty/defaults v1.6.0 // indirect + +replace intel.com/svr-info/pkg/core => ../pkg/core + +replace intel.com/svr-info/pkg/cpu => ../pkg/cpu + +replace intel.com/svr-info/pkg/msr => ../pkg/msr + +replace intel.com/svr-info/pkg/progress => ../pkg/progress + +replace intel.com/svr-info/pkg/target => ../pkg/target + +replace intel.com/svr-info/pkg/commandfile => ../pkg/commandfile diff --git a/src/collector/go.sum b/src/collector/go.sum new file mode 100644 index 0000000..833aefc --- /dev/null +++ b/src/collector/go.sum @@ -0,0 +1,6 @@ +github.com/creasty/defaults v1.6.0 h1:ltuE9cfphUtlrBeomuu8PEyISTXnxqkBIoQfXgv7BSc= +github.com/creasty/defaults v1.6.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/src/collector/main.go b/src/collector/main.go new file mode 100644 index 0000000..78244b5 --- /dev/null +++ b/src/collector/main.go @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "encoding/json" + "flag" + "fmt" + "io" + "log" + "os" + "path/filepath" + "strings" + + "gopkg.in/yaml.v2" + "intel.com/svr-info/pkg/commandfile" + "intel.com/svr-info/pkg/core" +) + +// globals +var ( + gVersion string = "dev" // build overrides this, see makefile +) + +type ResultType map[string]string + +type RunConfiguration struct { + cmdFile commandfile.CommandFile + sudo string +} + +func newRunConfiguration(yamlData []byte) (config *RunConfiguration, err error) { + config = new(RunConfiguration) + err = yaml.Unmarshal(yamlData, &(config.cmdFile)) + return +} + +func showUsage() { + fmt.Printf("%s Version: %s\n", filepath.Base(os.Args[0]), gVersion) + fmt.Println("Reads password from environment variable SUDO_PASSWORD, if provided.") + fmt.Println("Usage:") + fmt.Println(" [SUDO_PASSWORD=*********] collector < file[.yaml]") + fmt.Println(" [SUDO_PASSWORD=*********] collector [OPTION...] file[.yaml]") + fmt.Println("Options:") + flag.PrintDefaults() + fmt.Println( + `YAML Format: + Root level keys: + arguments + commands + Required arguments: + name - a string that will be the primary key of the output + Optional arguments + bin_path - a string containing the path to executables + Commands are list items. Command names label the command output. + Required command attributes: + command - will be executed by bash: + Optional command attributes: + superuser: bool indicates need for elevated privilege (default: false) + run: bool indicates if command will be run (default: true) + modprobe: comma separated list of kernel modules required to run command + parallel: bool indicates if command can be run in parallel with other commands (default: false)`) + fmt.Println( + `YAML Example: + arguments: + name: json output will be a dictionary with this -name- as the root key + bin_path: . + command_timeout: 300 + commands: + - date -u: + command: date -u + parallel: true + - cpuid -1: + command: cpuid -1 | grep family + modprobe: cpuid + parallel: true`) +} + +func printResult(out io.Writer, result ResultType, firstCommand bool) error { + b, err := json.MarshalIndent(result, "", " ") + if err != nil { + return err + } + if firstCommand { + fmt.Fprintf(out, "%s\n", string(b)) + } else { + fmt.Fprintf(out, ",%s\n", string(b)) + } + return nil +} + +func runConfigCommand(cmd commandfile.Command, args commandfile.Arguments, sudo string, ch chan ResultType) { + result := make(ResultType) + result["label"] = cmd.Label + result["command"] = cmd.Command + if cmd.Superuser { + result["superuser"] = "true" + } else { + result["superuser"] = "false" + } + stdout, stderr, exitCode, err := runCommand(cmd.Label, cmd.Command, cmd.Superuser, sudo, args.Binpath, args.Timeout) + if err != nil { + log.Printf("Error: %v Stderr: %s, Exit Code: %d", err, stderr, exitCode) + } + // if a sudo password was provided, make sure it doesn't show up in any of the command results + if sudo != "" { + result["stdout"] = strings.ReplaceAll(stdout, sudo, "********") + } else { + result["stdout"] = stdout + } + result["stderr"] = stderr + result["exitstatus"] = fmt.Sprint(exitCode) + ch <- result +} + +func runConfigCommands(config *RunConfiguration, out io.Writer) error { + // build a unique list of loadable kernel modules that must be installed + install := make(map[string]int) + for _, cmd := range config.cmdFile.Commands { + if cmd.Run && cmd.Modprobe != "" { + modList := strings.Split(cmd.Modprobe, ",") + for _, mod := range modList { + install[mod] = 1 + } + } + } + // install all loadable kernel modules + mods := make([]string, 0, len(install)) + for mod := range install { + mods = append(mods, mod) + } + modList := strings.Join(mods, ",") + installedMods := installMods(modList, config.sudo) + defer uninstallMods(installedMods, config.sudo) + // separate commands into parallel (those that can run in parallel) and serial + var parallelCommands []commandfile.Command + var serialCommands []commandfile.Command + for _, cmd := range config.cmdFile.Commands { + if cmd.Run { + if cmd.Parallel { + parallelCommands = append(parallelCommands, cmd) + } else { + serialCommands = append(serialCommands, cmd) + } + } + } + // run serial commands one at a time + // we run these first because they, typically, are more time sensitive...especially for profiling + ch := make(chan ResultType) + for idx, cmd := range serialCommands { + go runConfigCommand(cmd, config.cmdFile.Args, config.sudo, ch) + result := <-ch + err := printResult(out, result, idx == 0) + if err != nil { + log.Printf("Error: %v", err) + return err + } + } + // run parallel commands in parallel goroutines + for _, cmd := range parallelCommands { + go runConfigCommand(cmd, config.cmdFile.Args, config.sudo, ch) + } + for idx := range parallelCommands { + result := <-ch + err := printResult(out, result, (idx+len(serialCommands)) == 0) + if err != nil { + log.Printf("Error: %v", err) + return err + } + } + return nil +} + +func mainReturnWithCode() int { + var showHelp bool + var showVersion bool + flag.Usage = func() { showUsage() } // override default usage output + flag.BoolVar(&showHelp, "h", false, "Print this usage message.") + flag.BoolVar(&showVersion, "v", false, "Print program version.") + flag.Parse() + if showHelp { + showUsage() + return 0 + } + if showVersion { + fmt.Println(gVersion) + return 0 + } + + // configure logging + logFilename := filepath.Base(os.Args[0]) + ".log" + logFile, err := os.OpenFile(logFilename, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + log.Printf("Error: %v", err) + return 1 + } + defer logFile.Close() + log.SetOutput(logFile) + log.SetFlags(log.LstdFlags | log.Lmicroseconds) + + log.Printf("Starting up %s, version %s, PID %d, PPID %d, arguments: %s", + filepath.Base(os.Args[0]), + gVersion, + os.Getpid(), + os.Getppid(), + strings.Join(os.Args, " "), + ) + + // write pid to file + pidFilename := filepath.Base(os.Args[0]) + ".pid" + pidFile, err := os.OpenFile(pidFilename, os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + log.Printf("Error: %v", err) + return 1 + } + pidFile.WriteString(fmt.Sprintf("%d", os.Getpid())) + pidFile.Close() + + // read input + var data []byte + if flag.NArg() == 0 { + log.Print("Reading data from stdin") + data, err = io.ReadAll(os.Stdin) + if err != nil { + log.Printf("Error: %v", err) + return 1 + } + } else if flag.NArg() == 1 { + absFilename, err := core.AbsPath(flag.Arg(0)) + if err != nil { + log.Printf("Error: %v", err) + return 1 + } + log.Printf("Reading data from file: %s", absFilename) + data, err = os.ReadFile(absFilename) + if err != nil { + log.Printf("Error: %v", err) + return 1 + } + } else { + log.Print("Incorrect usage.") + showUsage() + return 1 + } + + // parse input data into config + runConfig, err := newRunConfiguration(data) + if err != nil { + log.Printf("Error: %v", err) + return 1 + } + runConfig.sudo = os.Getenv("SUDO_PASSWORD") + + // start json + fmt.Printf("{\n\"%s\": [\n", runConfig.cmdFile.Args.Name) + + // run commands - prints json formatted output for each command + err = runConfigCommands(runConfig, os.Stdout) + if err != nil { + return 1 + } + + // end json + fmt.Printf("]\n}\n") + + log.Print("All done.") + + return 0 +} + +func main() { os.Exit(mainReturnWithCode()) } diff --git a/src/collector/test/test_linux.yaml b/src/collector/test/test_linux.yaml new file mode 100644 index 0000000..c07e6ec --- /dev/null +++ b/src/collector/test/test_linux.yaml @@ -0,0 +1,49 @@ +############ +# Collector's YAML format +# Root level keys +# arguments +# commands +# Commands are list items. +# Required command attributes: +# label - unique name for the command +# command - will be executed by bash +# Optional command attributes: +# superuser - bool indicates need for elevated privilege, default is false +# run - bool indicates if command will be run, default is true +# modprobe - kernel module required for command +# parallel - bool indicates if command can be run in parallel with other commands (default: false) +########### + +############ +# global arguments +############ +arguments: + name: test_linux + bin_path: ../../build/amd64 + command_timeout: 30 +############ +# commands -- +############ +commands: + - label: date -u + command: date -u + parallel: true + - label: cpuid -1 + command: cpuid -1 | grep family + modprobe: cpuid + parallel: true + - label: gcc version + command: gcc --version + parallel: true + - label: binutils version + command: ld -v + parallel: true +############ +# Micro-benchmark commands below. +############ + - label: stress-ng --vm + run: true + command: stress-ng --vm 0 -t 1s --metrics-brief 2>&1 + - label: stress-ng --cpu + run: true + command: stress-ng --vm 0 -t 5s --metrics-brief 2>&1 diff --git a/src/collector/test/test_windows.yaml b/src/collector/test/test_windows.yaml new file mode 100644 index 0000000..ede2815 --- /dev/null +++ b/src/collector/test/test_windows.yaml @@ -0,0 +1,28 @@ +############ +# Collector's YAML format +# Root level keys +# arguments +# commands +# Commands are list items. Command names label the command output. +# Required command attributes: +# command - will be executed by bash +# Optional command attributes: +# superuser - bool indicates need for elevated privilege, default is false +# run - bool indicates if command will be run, default is true +# modprobe - kernel module required for command +########### + +############ +# global arguments +############ +arguments: + name: windows_example + bin_path: +############ +# commands -- +############ +commands: +- systeminfo: + command: systeminfo +- cpu name: + command: wmic cpu get name diff --git a/src/msrbusy/go.mod b/src/msrbusy/go.mod new file mode 100644 index 0000000..fae545f --- /dev/null +++ b/src/msrbusy/go.mod @@ -0,0 +1,7 @@ +module intel.com/svr-info/msrbusy/v2 + +go 1.19 + +replace intel.com/svr-info/pkg/msr => ../pkg/msr + +require intel.com/svr-info/pkg/msr v0.0.0-00010101000000-000000000000 diff --git a/src/msrbusy/main.go b/src/msrbusy/main.go new file mode 100644 index 0000000..85b987c --- /dev/null +++ b/src/msrbusy/main.go @@ -0,0 +1,165 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "intel.com/svr-info/pkg/msr" +) + +type CmdLineArgs struct { + help bool + version bool + processor int + iterations int + msrs []uint64 +} + +// globals +var ( + gVersion string = "dev" // build overrides this, see makefile + gCmdLineArgs CmdLineArgs +) + +func showUsage() { + appName := filepath.Base(os.Args[0]) + fmt.Fprintf(os.Stderr, "Usage: %s msr1 msr2 msr3\n", appName) + fmt.Fprintf(os.Stderr, "Example: %s -i 6 -p 0 0x123 0x234\n", appName) + flag.PrintDefaults() +} + +func showVersion() { + fmt.Println(gVersion) +} + +func init() { + // init command line flags + flag.Usage = func() { showUsage() } // override default usage output + flag.BoolVar(&gCmdLineArgs.help, "h", false, "Print this usage message.") + flag.BoolVar(&gCmdLineArgs.version, "v", false, "Print program version.") + flag.IntVar(&gCmdLineArgs.iterations, "i", 6, "Number of iterations.") + flag.IntVar(&gCmdLineArgs.processor, "p", 0, "Select processor number.") + flag.Parse() + if gCmdLineArgs.help || gCmdLineArgs.version { + return + } + // positional args + if flag.NArg() < 1 { + flag.Usage() + os.Exit(1) + } else { + for _, arg := range flag.Args() { + if len(arg) > 2 && arg[:2] == "0x" { + arg = arg[2:] + } + msr, err := strconv.ParseInt(arg, 16, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not parse provided msr address: %s, %v\n", arg, err) + showUsage() + os.Exit(1) + } + gCmdLineArgs.msrs = append(gCmdLineArgs.msrs, uint64(msr)) + } + } +} + +type msrVals struct { + msrTxt string + msr uint64 + vals []uint64 +} + +func getMSRVals(msrReader *msr.MSR, msrTxt string, msrNum uint64, processor int, iterations int, ch chan msrVals) { + var m msrVals + m.msrTxt = msrTxt + m.msr = msrNum + for i := 0; i < iterations; i++ { + var vals []uint64 + if processor == 0 { + //read msr off of core 0 on processor 0 + val, err := msrReader.ReadOne(msrNum, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to read MSR %s: %v\n", msrTxt, err) + break + } + vals = append(vals, val) + } else { + // ReadPackages will fail if PPID msr can't be read, so only call it if processor > 0 + var err error + vals, err = msrReader.ReadPackages(msrNum) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to read package MSR %s: %v\n", msrTxt, err) + break + } + } + if len(vals) <= processor { + fmt.Fprintf(os.Stderr, "Invalid processor number specified: %d\n", processor) + break + } + m.vals = append(m.vals, vals[processor]) + } + ch <- m +} + +func mainReturnWithCode() int { + if gCmdLineArgs.help { + showUsage() + return 0 + } + if gCmdLineArgs.version { + showVersion() + return 0 + } + msrReader, err := msr.NewMSR() + if err != nil { + fmt.Fprintf(os.Stderr, "%v", err) + return 1 + } + // run in parallel + ch := make(chan msrVals) + for i, msr := range gCmdLineArgs.msrs { + go getMSRVals(msrReader, flag.Arg(i), msr, gCmdLineArgs.processor, gCmdLineArgs.iterations, ch) + } + // wait for completion + msrVals := make(map[string][]uint64) + for range gCmdLineArgs.msrs { + x := <-ch + msrVals[x.msrTxt] = x.vals + } + var results []string + for _, msrTxt := range flag.Args() { + var prevVal uint64 + busy := false + for i, val := range msrVals[msrTxt] { + if i != 0 { + if val != prevVal { + busy = true + break + } + } + prevVal = val + } + if len(msrVals[msrTxt]) > 1 { + if busy { + results = append(results, "Active") + } else { + results = append(results, "Inactive") + } + } else { + results = append(results, "Unknown") + } + } + fmt.Printf("%s\n", strings.Join(flag.Args(), "|")) + fmt.Printf("%s\n", strings.Join(results, "|")) + return 0 +} + +func main() { os.Exit(mainReturnWithCode()) } diff --git a/src/orchestrator/collection.go b/src/orchestrator/collection.go new file mode 100644 index 0000000..c7ca453 --- /dev/null +++ b/src/orchestrator/collection.go @@ -0,0 +1,425 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "bytes" + "errors" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "text/template" + + "gopkg.in/yaml.v2" + "intel.com/svr-info/pkg/commandfile" + "intel.com/svr-info/pkg/core" + "intel.com/svr-info/pkg/target" +) + +type Collection struct { + target target.Target + cmdLineArgs *CmdLineArgs + assets *core.Assets + outputDir string + outputFilePath string + stdout string + stderr string + ok bool +} + +func newCollection(target target.Target, cmdLineArgs *CmdLineArgs, assets *core.Assets, outputDir string) *Collection { + c := Collection{ + target: target, + cmdLineArgs: cmdLineArgs, + assets: assets, + outputDir: outputDir, + stdout: "", + stderr: "", + ok: false, + } + return &c +} + +// getCommandFilePath returns full local path to target specific command file used by collector +func (c *Collection) getCommandFilePath(extra string) (commandFilePath string) { + commandFilePath = filepath.Join(c.outputDir, c.target.GetName()+extra+"_collector.yaml") + return +} + +func customizeCommandYAML(sourceFilePath string, cmdLineArgs *CmdLineArgs, targetBinDir string, targetHostName string) (customized []byte, err error) { + defaultCollectorYAML, err := os.ReadFile(sourceFilePath) + if err != nil { + return + } + var cf commandfile.CommandFile + err = yaml.Unmarshal(defaultCollectorYAML, &cf) + if err != nil { + return + } + cf.Args.Name = targetHostName + cf.Args.Binpath = targetBinDir + cf.Args.Timeout = cmdLineArgs.cmdTimeout + for idx := range cf.Commands { + cmd := &cf.Commands[idx] + if cmd.Label == "Memory MLC Bandwidth" || cmd.Label == "Memory MLC Loaded Latency Test" { + cmd.Run = strings.Contains(cmdLineArgs.benchmark, "memory") || strings.Contains(cmdLineArgs.benchmark, "all") + } else if cmd.Label == "stress-ng cpu methods" { + cmd.Run = strings.Contains(cmdLineArgs.benchmark, "cpu") || strings.Contains(cmdLineArgs.benchmark, "all") + } else if cmd.Label == "Measure Turbo Frequencies" { + cmd.Run = strings.Contains(cmdLineArgs.benchmark, "frequency") || strings.Contains(cmdLineArgs.benchmark, "all") + } else if cmd.Label == "CPU Turbo Test" || cmd.Label == "CPU Idle" { + cmd.Run = strings.Contains(cmdLineArgs.benchmark, "turbo") || strings.Contains(cmdLineArgs.benchmark, "all") + } else if cmd.Label == "fio" { + cmd.Run = strings.Contains(cmdLineArgs.benchmark, "storage") || strings.Contains(cmdLineArgs.benchmark, "all") + if cmd.Run { + fioDir := cmdLineArgs.storageDir + if fioDir == "" { + fioDir = targetBinDir + } + tmpl := template.Must(template.New("fioCommand").Parse(cmd.Command)) + buf := new(bytes.Buffer) + err = tmpl.Execute(buf, struct { + FioDir string + }{ + FioDir: fioDir, + }) + if err != nil { + return + } + cmd.Command = buf.String() + } + } else if cmd.Label == "profile" { + cmd.Run = cmdLineArgs.profile != "" + if cmd.Run { + tmpl := template.Must(template.New("profileCommand").Parse(cmd.Command)) + buf := new(bytes.Buffer) + err = tmpl.Execute(buf, struct { + Duration int + Interval int + ProfileCPU bool + ProfileStorage bool + ProfileMemory bool + ProfileNetwork bool + }{ + Duration: cmdLineArgs.profileDuration, + Interval: cmdLineArgs.profileInterval, + ProfileCPU: strings.Contains(cmdLineArgs.profile, "cpu") || strings.Contains(cmdLineArgs.profile, "all"), + ProfileStorage: strings.Contains(cmdLineArgs.profile, "storage") || strings.Contains(cmdLineArgs.profile, "all"), + ProfileMemory: strings.Contains(cmdLineArgs.profile, "memory") || strings.Contains(cmdLineArgs.profile, "all"), + ProfileNetwork: strings.Contains(cmdLineArgs.profile, "network") || strings.Contains(cmdLineArgs.profile, "all"), + }) + if err != nil { + return + } + cmd.Command = buf.String() + } + } else if cmd.Label == "analyze" { + cmd.Run = cmdLineArgs.analyze != "" + if cmd.Run { + tmpl := template.Must(template.New("analyzeCommand").Parse(cmd.Command)) + buf := new(bytes.Buffer) + err = tmpl.Execute(buf, struct { + Duration int + Frequency int + AnalyzeSystem bool + AnalyzeJava bool + }{ + Duration: cmdLineArgs.analyzeDuration, + Frequency: cmdLineArgs.analyzeFrequency, + AnalyzeSystem: strings.Contains(cmdLineArgs.analyze, "system") || strings.Contains(cmdLineArgs.analyze, "all"), + AnalyzeJava: strings.Contains(cmdLineArgs.analyze, "java") || strings.Contains(cmdLineArgs.analyze, "all"), + }) + if err != nil { + return + } + cmd.Command = buf.String() + } + } + } + customized, err = yaml.Marshal(cf) + return +} + +func (c *Collection) customizeCommandFile(sourceFilePath string, targetFilePath string, targetBinDir string) (err error) { + return customizeCmdFile(sourceFilePath, targetFilePath, targetBinDir, c.target.GetName(), c.cmdLineArgs) +} + +func customizeCmdFile(sourceFilePath string, targetFilePath string, targetBinDir string, targetHostName string, cmdLineArgs *CmdLineArgs) (err error) { + customized, err := customizeCommandYAML(sourceFilePath, cmdLineArgs, targetBinDir, targetHostName) + if err != nil { + return + } + err = os.WriteFile(targetFilePath, customized, 0644) + return +} + +func (c *Collection) getDepsFile() (depsFile string, err error) { + arch, err := c.target.GetArchitecture() + if err != nil { + return + } + switch arch { + case "x86_64", "amd64": + depsFile = c.assets[core.Amd64Deps] + case "aarch64", "arm64": + depsFile = c.assets[core.Arm64Deps] + } + if depsFile == "" { + err = fmt.Errorf("unsupported architecture: '%s'", arch) + } + return +} + +func (c *Collection) getCollectorFile() (collectorFile string, err error) { + arch, err := c.target.GetArchitecture() + if err != nil { + return + } + switch arch { + case "x86_64", "amd64": + collectorFile = c.assets[core.Amd64Collector] + case "aarch64", "arm64": + collectorFile = c.assets[core.Arm64Collector] + } + if collectorFile == "" { + err = errors.New("unsupported architecture: " + "'" + arch + "'") + } + return +} + +func (c *Collection) extractArchive(filename string, tempDir string) (err error) { + cmd := exec.Command("tar", "-C", tempDir, "-xf", filename) + _, _, _, err = c.target.RunCommand(cmd) + return +} + +func (c *Collection) cleanupTarget(tempDir string) { + if !c.cmdLineArgs.debug { + err := c.target.RemoveDirectory(tempDir) + if err != nil { + log.Printf("failed to remove temporary directory for %s", c.target.GetName()) + } + } +} + +func hasPreReqs(t target.Target, preReqs []string) bool { + for _, pr := range preReqs { + cmd := exec.Command("which", pr) + _, _, _, err := t.RunCommand(cmd) + if err != nil { + return false + } + } + return true +} + +func (c *Collection) getCollectorOutputFile(workingDirectory string) (outputFilePath string, err error) { + outputFilePath = filepath.Join(c.outputDir, c.target.GetName()+".raw.json") + err = c.target.PullFile(filepath.Join(workingDirectory, "collector.stdout"), outputFilePath) + return +} + +func (c *Collection) getExtraFiles() (extras []string, err error) { + extrasDir, err := core.FindAsset("extras") + if err != nil { + return + } + dir, err := os.Open(extrasDir) + if err != nil { + return + } + defer dir.Close() + files, err := dir.ReadDir(-1) + if err != nil { + return + } + for _, f := range files { + if f.Type().IsRegular() { + extras = append(extras, filepath.Join(extrasDir, f.Name())) + } + } + return +} + +func (c *Collection) runCollector(collectorFilePath string, yamlFilePath string, workingDirectory string) (stdout string, stderr string, err error) { + var cmd *exec.Cmd + bashCmd := fmt.Sprintf("%s %s > collector.stdout", collectorFilePath, yamlFilePath) + tType := fmt.Sprintf("%T", c.target) + if tType == "*target.LocalTarget" { + cmd = exec.Command("bash", "-c", bashCmd) + if c.target.GetSudo() != "" { + cmd.Env = append(os.Environ(), "SUDO_PASSWORD="+c.target.GetSudo()) + } + cmd.Dir = workingDirectory + } else { // RemoteTarget + if c.target.GetSudo() != "" { + cmd = exec.Command(fmt.Sprintf("cd %s && SUDO_PASSWORD=%s %s", workingDirectory, c.target.GetSudo(), bashCmd)) + } else { + cmd = exec.Command(fmt.Sprintf("cd %s && %s", workingDirectory, bashCmd)) + } + } + stdout, stderr, _, err = c.target.RunCommand(cmd) + return +} + +func (c *Collection) Collect() (err error) { + log.Printf("collection starting for target: %s", c.target.GetName()) + if !c.target.CanConnect() { + err = fmt.Errorf("failed to connect to target: %s", c.target.GetName()) + log.Print(err) + return + } + if !hasPreReqs(c.target, []string{"tar"}) { + err = fmt.Errorf("tar not found on target: %s", c.target.GetName()) + log.Print(err) + return + } + + if (strings.Contains(c.cmdLineArgs.analyze, "system") || strings.Contains(c.cmdLineArgs.analyze, "all")) && + !hasPreReqs(c.target, []string{"perl"}) { + log.Printf("perl not found on target: %s. Analyze system requires perl to process data.", c.target.GetName()) + } + + tempDir, err := c.target.CreateTempDirectory(c.cmdLineArgs.temp) + if err != nil { + log.Printf("failed to create temporary directory for %s", c.target.GetName()) + return + } + defer c.cleanupTarget(tempDir) + commandFilePath := c.getCommandFilePath("_reports") + err = c.customizeCommandFile(c.assets[core.ReportsYaml], commandFilePath, tempDir) + if err != nil { + log.Print("failed to customize command file path") + return + } + var depsFilename string + depsFilename, err = c.getDepsFile() + if err != nil || depsFilename == "" { + log.Printf("failed to get dependencies file for %s", c.target.GetName()) + return + } + err = c.target.PushFile(depsFilename, tempDir) + if err != nil { + log.Printf("failed to push dependencies file to temporary directory for %s", c.target.GetName()) + return + } + err = c.extractArchive(filepath.Join(tempDir, filepath.Base(depsFilename)), tempDir) + if err != nil { + log.Printf("failed to extract dependencies file in temporary directory for %s", c.target.GetName()) + return + } + var collectorFilename string + collectorFilename, err = c.getCollectorFile() + if err != nil { + log.Printf("failed to get collector file for %s", c.target.GetName()) + return + } + err = c.target.PushFile(collectorFilename, filepath.Join(tempDir, "collector")) + if err != nil { + log.Printf("failed to push collector to temporary directory for %s", c.target.GetName()) + return + } + err = c.target.PushFile(commandFilePath, tempDir) + if err != nil { + log.Printf("failed to push command file to temporary directory for %s", c.target.GetName()) + return + } + extraFilenames, err := c.getExtraFiles() + if err != nil { + log.Printf("failed to get extra file names: %v", err) + } + for _, extraFile := range extraFilenames { + err = c.target.PushFile(extraFile, tempDir) + if err != nil { + log.Printf("failed to push extra file %s to target at %s", extraFile, tempDir) + return + } + } + c.stdout, c.stderr, err = c.runCollector( + filepath.Join(tempDir, "collector"), + filepath.Join(tempDir, filepath.Base(commandFilePath)), + tempDir, + ) + if err != nil { + log.Printf("failed to run collector for %s", c.target.GetName()) + log.Printf("collector error output for %s: %s", c.target.GetName(), c.stderr) + return + } + c.outputFilePath, err = c.getCollectorOutputFile(tempDir) + if err != nil { + log.Printf("failed to retrieve collector output file for %s", c.target.GetName()) + return + } + if c.cmdLineArgs.megadata { + commandFilePath := c.getCommandFilePath("_megadata") + err = c.customizeCommandFile(c.assets[core.MegadataYaml], commandFilePath, tempDir) + if err != nil { + log.Print("failed to customize command file path") + return + } + err = c.target.PushFile(commandFilePath, tempDir) + if err != nil { + log.Printf("failed to push megadata command file to temporary directory for %s", c.target.GetName()) + return + } + megaDir := c.target.GetName() + "_" + "megadata" + var megaPath string + megaPath, err = c.target.CreateDirectory(tempDir, megaDir) + if err != nil { + log.Printf("failed to create megadata directory on %s", c.target.GetName()) + return + } + // run collector in the megadata directory so output from commands will land in that directory + _, _, err = c.runCollector( + filepath.Join(tempDir, "collector"), + filepath.Join(tempDir, filepath.Base(commandFilePath)), + megaPath, + ) + if err != nil { + log.Printf("failed to run megadata collector for %s", c.target.GetName()) + log.Printf("megadata collector error output for %s: %s", c.target.GetName(), c.stderr) + return + } + megadataTarball := filepath.Join(tempDir, c.target.GetName()+"_megadata.tgz") + cmd := exec.Command("tar", "-C", tempDir, "-czf", megadataTarball, megaDir) + _, _, _, err = c.target.RunCommand(cmd) + if err != nil { + log.Printf("failed to create megadata tarball") + return + } + err = c.target.PullFile(megadataTarball, c.outputDir) + if err != nil { + log.Printf("failed to retrieve megadata tarball") + return + } + err = c.target.PullFile(filepath.Join(tempDir, megaDir, "collector.log"), filepath.Join(c.outputDir, c.target.GetName()+"_megadata_collector.log")) + if err != nil { + log.Printf("failed to retrieve megadata collector.log") + return + } + cmd = exec.Command("tar", "-C", c.outputDir, "-xf", filepath.Join(c.outputDir, c.target.GetName()+"_megadata.tgz")) + _, _, _, err = target.RunLocalCommand(cmd) + if err != nil { + log.Printf("failed to extract megadata tarball") + return + } + cmd = exec.Command("rm", filepath.Join(c.outputDir, c.target.GetName()+"_megadata.tgz")) + _, _, _, err = target.RunLocalCommand(cmd) + if err != nil { + log.Printf("failed to remove megadata tarball") + return + } + } + err = c.target.PullFile(filepath.Join(tempDir, "collector.log"), filepath.Join(c.outputDir, c.target.GetName()+"_collector.log")) + if err != nil { + log.Printf("failed to retrieve collector.log") + return + } + c.ok = true + return +} diff --git a/src/orchestrator/command_line_args.go b/src/orchestrator/command_line_args.go new file mode 100644 index 0000000..0b56f3d --- /dev/null +++ b/src/orchestrator/command_line_args.go @@ -0,0 +1,330 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "flag" + "fmt" + "io/fs" + "os" + "path/filepath" + "strings" + + "intel.com/svr-info/pkg/core" +) + +type CmdLineArgs struct { + help bool + version bool + format string + benchmark string + storageDir string + profile string + profileDuration int + profileInterval int + analyze string + analyzeDuration int + analyzeFrequency int + all bool + ipAddress string + port int + user string + key string + targets string + megadata bool + output string + temp string + dumpConfig bool + cmdTimeout int + debug bool +} + +var benchmarkTypes = []string{"cpu", "frequency", "memory", "storage", "turbo", "all"} +var profileTypes = []string{"cpu", "network", "storage", "memory", "all"} +var analyzeTypes = []string{"system", "java", "all"} + +func showUsage() { + fmt.Fprintf(os.Stderr, "usage: %s [-h] [-v]\n", filepath.Base(os.Args[0])) + fmt.Fprintf(os.Stderr, " [-format SELECT]\n") + fmt.Fprintf(os.Stderr, " [-benchmark SELECT] [-storage_dir DIR]\n") + fmt.Fprintf(os.Stderr, " [-profile SELECT] [-profile_duration SECONDS] [-profile_interval N]\n") + fmt.Fprintf(os.Stderr, " [-analyze SELECT] [-analyze_duration SECONDS] [-analyze_frequency N]\n") + fmt.Fprintf(os.Stderr, " [-megadata]\n") + fmt.Fprintf(os.Stderr, " [-ip IP] [-port PORT] [-user USER] [-key KEY] [-targets TARGETS]\n") + fmt.Fprintf(os.Stderr, " [-output OUTPUT] [-temp TEMP] [-dumpconfig] [-cmd_timeout] [-debug]\n") + + longHelp := ` +Intel System Health Inspector. Creates configuration, benchmark, profile, and insights reports for one or more systems. + +general arguments: + -h show this help message and exit + -v show version number and exit + +report arguments: + -format SELECT comma separated list of desired output format(s): %[2]s, + e.g., -format json (default: html,xlsx,json) + +benchmark arguments: + -benchmark SELECT comma separated list of benchmarks: %[3]s, + e.g., -benchmark cpu,turbo (default: None) + -storage_dir DIR Path to directory on target (default: -temp DIR) + +profile arguments: + -profile SELECT comma separated list of profile options: %[4]s, + e.g., -profile cpu,memory (default: None) + -profile_duration N time, in seconds, to collect profiling data (default: 60) + -profile_interval N the amount of time in seconds between each sample (default: 2) + +analyze arguments: + -analyze SELECT comma separated list of profile options: %[5]s, + e.g., -analyze system,java (default: None) + -analyze_duration N time, in seconds, to collect analysis data (default: 60) + -analyze_frequency N the number of samples taken per second (default: 11) + +additional data collection arguments: + -megadata collect additional data specified in megadata template file (default: False) + +remote target arguments: + -ip IP ip address or hostname (default: Nil) + -port PORT ssh port (default: 22) + -user USER user on remote target (default: Nil) + -key KEY local path to ssh private key file (default: Nil) + -targets TARGETS path to targets file, one line per target. + Line format: + 'ip_address:ssh_port:user_name:private_key_path:ssh_password:sudo_password' + - Provide private_key_path or ssh_password. + If provided, overrides single target arguments. (default: Nil) + +advanced arguments: + -output DIR path to output directory. Directory must exist. (default: $PWD/orchestrator_timestamp) + -temp DIR path to temporary directory on target. Directory must exist. (default: system default) + -dumpconfig dump the collector configuration file and exit (default: False) + -cmd_timeout the maximum number of seconds to wait for each data collection command (default: 300) + -debug additional logging and retain temporary files + +Examples: +$ ./%[1]s + Collect configuration data on local machine. +$ ./%[1]s -benchmark all + Collect configuration and benchmark data on local machine. +$ ./%[1]s -profile all -targets ./targets + Collect configuration and profile data on remote machines defined in targets file. +$ ./%[1]s -format all + Collect configuration data on local machine. Generate all report formats. +$ ./%[1]s -ip 198.51.100.255 -port 22 -user user83767 -key ~/.ssh/id_rsa + Collect configuration data on one remote target. +` + fmt.Fprintf(os.Stderr, longHelp, filepath.Base(os.Args[0]), strings.Join(core.ReportTypes, ","), strings.Join(benchmarkTypes, ","), strings.Join(profileTypes, ","), strings.Join(analyzeTypes, ",")) +} + +func showVersion() { + fmt.Println(gVersion) +} + +func newCmdLineArgs() *CmdLineArgs { + cmdLineArgs := CmdLineArgs{} + return &cmdLineArgs +} + +func (cmdLineArgs *CmdLineArgs) parse(name string, arguments []string) (err error) { + flagSet := flag.NewFlagSet(name, flag.ContinueOnError) + flagSet.Usage = func() { showUsage() } // override default usage output + flagSet.BoolVar(&cmdLineArgs.help, "h", false, "") + flagSet.BoolVar(&cmdLineArgs.version, "v", false, "") + flagSet.StringVar(&cmdLineArgs.output, "output", "", "") + flagSet.StringVar(&cmdLineArgs.temp, "temp", "", "") + flagSet.BoolVar(&cmdLineArgs.dumpConfig, "dumpconfig", false, "") + flagSet.IntVar(&cmdLineArgs.cmdTimeout, "cmd_timeout", 300, "") + flagSet.StringVar(&cmdLineArgs.format, "format", "html,xlsx,json", "") + flagSet.StringVar(&cmdLineArgs.benchmark, "benchmark", "", "") + flagSet.StringVar(&cmdLineArgs.profile, "profile", "", "") + flagSet.StringVar(&cmdLineArgs.analyze, "analyze", "", "") + flagSet.StringVar(&cmdLineArgs.storageDir, "storage_dir", "", "") + flagSet.BoolVar(&cmdLineArgs.all, "all", false, "") + flagSet.StringVar(&cmdLineArgs.ipAddress, "ip", "", "") + flagSet.IntVar(&cmdLineArgs.port, "port", 22, "") + flagSet.StringVar(&cmdLineArgs.user, "user", "", "") + flagSet.StringVar(&cmdLineArgs.key, "key", "", "") + flagSet.StringVar(&cmdLineArgs.targets, "targets", "", "") + flagSet.BoolVar(&cmdLineArgs.debug, "debug", false, "") + flagSet.BoolVar(&cmdLineArgs.megadata, "megadata", false, "") + flagSet.IntVar(&cmdLineArgs.profileDuration, "profile_duration", 60, "") + flagSet.IntVar(&cmdLineArgs.analyzeDuration, "analyze_duration", 60, "") + flagSet.IntVar(&cmdLineArgs.profileInterval, "profile_interval", 2, "") + flagSet.IntVar(&cmdLineArgs.analyzeFrequency, "analyze_frequency", 11, "") + err = flagSet.Parse(arguments) + if err != nil { + return + } + if flagSet.NArg() != 0 { + err = fmt.Errorf("unrecognized argument(s): %s", strings.Join(flagSet.Args(), " ")) + return + } + return +} + +func argDirExists(dir string, label string) (err error) { + if dir != "" { + var path string + path, err = core.AbsPath(dir) + if err != nil { + return + } + var fileInfo fs.FileInfo + fileInfo, err = os.Stat(path) + if err != nil { + err = fmt.Errorf("-%s %s : directory does not exist", label, path) + return + } + if !fileInfo.IsDir() { + err = fmt.Errorf("-%s %s : must be a directory", label, path) + return + } + } + return +} + +func isValidType(validTypes []string, input string) (valid bool) { + inputTypes := strings.Split(input, ",") + for _, inputType := range inputTypes { + for _, validType := range validTypes { + if inputType == validType { + return true + } + } + } + return false +} + +func (cmdLineArgs *CmdLineArgs) validate() (err error) { + // -all (deprecated) TODO: remove the -all option in a future release + if cmdLineArgs.all { + fmt.Fprintf(os.Stderr, "\nWARNING: the -all flag is deprecated and will be removed soon. Use '-benchmark all' to run all benchmarks.\n\n") + cmdLineArgs.benchmark = "all" + } + + // -output dir + if cmdLineArgs.output != "" { + // if dir is specified, make sure it is a dir and that it exists + err = argDirExists(cmdLineArgs.output, "output") + if err != nil { + return + } + } + // -format + if cmdLineArgs.format != "" { + if !isValidType(core.ReportTypes, cmdLineArgs.format) { + err = fmt.Errorf("-format %s : invalid format type: %s", cmdLineArgs.format, cmdLineArgs.format) + return + } + } + // -benchmark + if cmdLineArgs.benchmark != "" { + if !isValidType(benchmarkTypes, cmdLineArgs.benchmark) { + err = fmt.Errorf("-benchmark %s : invalid benchmark type: %s", cmdLineArgs.benchmark, cmdLineArgs.benchmark) + return + } + } + // -profile + if cmdLineArgs.profile != "" { + if !isValidType(profileTypes, cmdLineArgs.profile) { + err = fmt.Errorf("-profile %s : invalid profile type: %s", cmdLineArgs.profile, cmdLineArgs.profile) + return + } + if cmdLineArgs.profileDuration <= 0 { + err = fmt.Errorf("-profile_duration %d : invalid value", cmdLineArgs.profileDuration) + return + } + if cmdLineArgs.profileInterval <= 0 { + err = fmt.Errorf("-profile_interval %d : invalid value", cmdLineArgs.profileInterval) + return + } + numSamples := cmdLineArgs.profileDuration / cmdLineArgs.profileInterval + maxSamples := (5 * 60) / 2 // 5 minutes at default interval + if numSamples > maxSamples { + err = fmt.Errorf("-profile_duration %d -profile_interval %d may result in too much data. Please reduce total samples (duration/interval) to %d or less", cmdLineArgs.profileDuration, cmdLineArgs.profileInterval, maxSamples) + return + } + } + // -analyze + if cmdLineArgs.analyze != "" { + if !isValidType(analyzeTypes, cmdLineArgs.analyze) { + err = fmt.Errorf("-analyze %s : invalid profile type: %s", cmdLineArgs.analyze, cmdLineArgs.analyze) + return + } + if cmdLineArgs.analyzeDuration <= 0 { + err = fmt.Errorf("-analyze_duration %d : invalid value", cmdLineArgs.analyzeDuration) + return + } + if cmdLineArgs.analyzeFrequency <= 0 { + err = fmt.Errorf("-analyze_frequency %d : invalid value", cmdLineArgs.analyzeFrequency) + return + } + numSamples := cmdLineArgs.analyzeDuration * cmdLineArgs.analyzeFrequency + maxSamples := (5 * 60) * 11 // 5 minutes at default frequency + if numSamples > maxSamples { + err = fmt.Errorf("-analyze_duration %d -analyze_frequency %d may result in too much data. Please reduce total samples (duration*frequency) to %d or less", cmdLineArgs.analyzeDuration, cmdLineArgs.analyzeFrequency, maxSamples) + return + } + } + // -ip + if cmdLineArgs.ipAddress != "" { + // make sure it isn't too long (max FQDN length is 255) + if len(cmdLineArgs.ipAddress) > 255 { + err = fmt.Errorf("-ip %s : longer than allowed max (255)", cmdLineArgs.ipAddress) + return + } + } + if cmdLineArgs.ipAddress != "" && cmdLineArgs.user == "" { + // if ip is provided, user is required + err = fmt.Errorf("-user : user required when -ip %s provided", cmdLineArgs.ipAddress) + return + } + if cmdLineArgs.ipAddress == "" && cmdLineArgs.user != "" { + // if user is provided, ip is required + err = fmt.Errorf("-ip : ip required when -user %s provided", cmdLineArgs.user) + return + } + // -port + if cmdLineArgs.port <= 0 { + err = fmt.Errorf("-port %d : port must be a positive integer", cmdLineArgs.port) + return + } + if cmdLineArgs.port != 22 && (cmdLineArgs.ipAddress == "" || cmdLineArgs.user == "") { + err = fmt.Errorf("-port %d : user and ip required when port provided", cmdLineArgs.port) + return + } + // -key + if cmdLineArgs.key != "" { + var path string + path, err = core.AbsPath(cmdLineArgs.key) + if err != nil { + return + } + err = core.FileExists(path) + if err != nil { + err = fmt.Errorf("-key %s : %s", path, err.Error()) + return + } + if cmdLineArgs.ipAddress == "" || cmdLineArgs.user == "" { + err = fmt.Errorf("-key %s : user and ip required when key provided", cmdLineArgs.key) + return + } + } + // -targets + if cmdLineArgs.targets != "" { + var path string + path, err = core.AbsPath(cmdLineArgs.targets) + if err != nil { + return + } + err = core.FileExists(path) + if err != nil { + err = fmt.Errorf("-targets %s : %s", path, err.Error()) + return + } + } + return +} diff --git a/src/orchestrator/command_line_args_test.go b/src/orchestrator/command_line_args_test.go new file mode 100644 index 0000000..c6f3f6c --- /dev/null +++ b/src/orchestrator/command_line_args_test.go @@ -0,0 +1,155 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "fmt" + "testing" +) + +// helper +func isValid(arguments []string) bool { + args := newCmdLineArgs() + err := args.parse("tester", arguments) + if err != nil { + fmt.Printf("%v\n", err) + return false + } + err = args.validate() + if err != nil { + fmt.Printf("%v\n", err) + } + return err == nil +} + +func TestParseInvalidArgs(t *testing.T) { + if isValid([]string{"-foo"}) { + t.Fail() + } + if isValid([]string{"foo"}) { + t.Fail() + } +} + +func TestParseNoArgs(t *testing.T) { + if !isValid([]string{}) { + t.Fail() + } +} + +func TestTooMuchAnalysis(t *testing.T) { + if isValid([]string{"-analyze", "all", "-analyze_duration", "301"}) { + t.Fail() + } + if isValid([]string{"-analyze", "all", "-analyze_frequency", "66"}) { + t.Fail() + } +} + +func TestTooMuchProfiling(t *testing.T) { + if isValid([]string{"-profile", "all", "-profile_duration", "302"}) { + t.Fail() + } + if isValid([]string{"-profile", "all", "-profile_interval", "1", "-profile_duration", "200"}) { + t.Fail() + } +} + +func TestFormat(t *testing.T) { + if !isValid([]string{"-format", "all"}) { + t.Fail() + } + if isValid([]string{"-format", "foo"}) { + t.Fail() + } + if !isValid([]string{"-format", "txt,xlsx,html,json"}) { + t.Fail() + } +} + +func TestAllExceptTargetsFile(t *testing.T) { + args := []string{ + "-format", "all", + "-benchmark", "all", + "-storage_dir", "/tmp", // any dir + "-analyze", "all", + "-analyze_duration", "20", + "-analyze_frequency", "22", + "-profile", "all", + "-profile_duration", "30", + "-profile_interval", "3", + "-temp", "/tmp", // any dir + "-output", "/tmp", // any dir + "-megadata", + "-debug", + "-cmd_timeout", "150", + "-dumpconfig", + "-ip", "192.168.1.1", + "-port", "20", + "-user", "foo", + "-key", "go.mod", // any file + } + if !isValid(args) { + t.Fail() + } +} + +func TestHelp(t *testing.T) { + if !isValid([]string{"-h"}) { + t.Fail() + } +} + +func TestVersion(t *testing.T) { + if !isValid([]string{"-v"}) { + t.Fail() + } +} + +func TestIPAddressTooLong(t *testing.T) { + b := make([]byte, 256) + for i := range b { + b[i] = 'x' + } + if isValid([]string{"-ip", string(b), "-user", "foo"}) { + t.Fail() + } +} + +func TestIPNoUser(t *testing.T) { + if isValid(([]string{"-ip", "192.168.1.1"})) { + t.Fail() + } +} + +func TestUserNoIp(t *testing.T) { + if isValid(([]string{"-user", "foo"})) { + t.Fail() + } +} + +func TestTargetsFile(t *testing.T) { + if !isValid(([]string{"-targets", "targets.example"})) { + t.Fail() + } +} + +func TestKeyFile(t *testing.T) { + if !isValid(([]string{"-key", "targets.example", "-ip", "192.168.1.1", "-user", "foo"})) { // any file will do + t.Fail() + } +} + +func TestKeyNoIpUser(t *testing.T) { + if isValid(([]string{"-key", "targets.example"})) { + t.Fail() + } +} + +func TestPortNoIpUser(t *testing.T) { + if isValid(([]string{"-port", "2022"})) { + t.Fail() + } +} diff --git a/src/orchestrator/go.mod b/src/orchestrator/go.mod new file mode 100644 index 0000000..e0f214d --- /dev/null +++ b/src/orchestrator/go.mod @@ -0,0 +1,29 @@ +module intel.com/svr-info/orchestrator/v2 + +go 1.19 + +replace intel.com/svr-info/pkg/core => ../pkg/core + +replace intel.com/svr-info/pkg/cpu => ../pkg/cpu + +replace intel.com/svr-info/pkg/msr => ../pkg/msr + +replace intel.com/svr-info/pkg/progress => ../pkg/progress + +replace intel.com/svr-info/pkg/target => ../pkg/target + +replace intel.com/svr-info/pkg/commandfile => ../pkg/commandfile + +require ( + golang.org/x/term v0.5.0 + gopkg.in/yaml.v2 v2.4.0 + intel.com/svr-info/pkg/commandfile v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/core v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/progress v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/target v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/creasty/defaults v1.6.0 // indirect + golang.org/x/sys v0.5.0 // indirect +) diff --git a/src/orchestrator/go.sum b/src/orchestrator/go.sum new file mode 100644 index 0000000..eb489ac --- /dev/null +++ b/src/orchestrator/go.sum @@ -0,0 +1,10 @@ +github.com/creasty/defaults v1.6.0 h1:ltuE9cfphUtlrBeomuu8PEyISTXnxqkBIoQfXgv7BSc= +github.com/creasty/defaults v1.6.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/src/orchestrator/main.go b/src/orchestrator/main.go new file mode 100644 index 0000000..e49cae0 --- /dev/null +++ b/src/orchestrator/main.go @@ -0,0 +1,399 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "archive/tar" + "compress/gzip" + "fmt" + "io" + "io/fs" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "time" + + "golang.org/x/term" + "intel.com/svr-info/pkg/core" + "intel.com/svr-info/pkg/progress" + "intel.com/svr-info/pkg/target" +) + +// globals +var ( + gVersion string = "dev" // build overrides this, see makefile +) + +func getTargets(assets *core.Assets, cmdLineArgs *CmdLineArgs) (targets []target.Target, err error) { + // if we have a targets file + if cmdLineArgs.targets != "" { + targetsFile := newTargetsFile(cmdLineArgs.targets) + var targetsFromFile []targetFromFile + targetsFromFile, err = targetsFile.parse() + if err != nil { + return + } + for _, t := range targetsFromFile { + if t.ip == "localhost" { // special case, "localhost" in targets file + var hostname string + hostname, err = os.Hostname() + if err != nil { + return + } + localTarget := target.NewLocalTarget(hostname, t.sudo) + if !localTarget.CanElevatePrivileges() { + log.Print("local target in targets file without root privileges.") + fmt.Println("WARNING: User does not have root privileges. Not all data will be collected.") + } + targets = append(targets, localTarget) + } else { + targets = append(targets, target.NewRemoteTarget(t.label, t.ip, t.port, t.user, t.key, t.pwd, assets[core.Sshpass], t.sudo)) + } + } + } else { + // if collecting on localhost + if cmdLineArgs.ipAddress == "" { + var hostname string + hostname, err = os.Hostname() + if err != nil { + return + } + localTarget := target.NewLocalTarget(hostname, "") + // ask for password if can't elevate privileges without it, but only if getting + // input from a terminal, i.e., not from a script (for testing) + if !localTarget.CanElevatePrivileges() { + fmt.Println("WARNING: Some data items cannot be collected without elevated privileges.") + if !term.IsTerminal(int(os.Stdin.Fd())) { + log.Print("NOT prompting for password because STDIN isn't coming from a terminal.") + } else { + log.Print("Prompting for password.") + fmt.Print("To collect all data, enter sudo password followed by Enter. Otherwise, press Enter:") + var pwd []byte + pwd, err = term.ReadPassword(0) + if err != nil { + return + } + fmt.Printf("\n") // newline after password + localTarget.SetSudo(string(pwd)) + if localTarget.GetSudo() != "" && !localTarget.CanElevatePrivileges() { + log.Print("Password provided but failed to elevate privileges.") + fmt.Println("WARNING: Not able to establish elevated privileges with provided password.") + fmt.Println("Continuing with regular user privileges. Some data will not be collected.") + localTarget.SetSudo("") + } + } + } + targets = append(targets, localTarget) + } else { + targets = append(targets, target.NewRemoteTarget(cmdLineArgs.ipAddress, cmdLineArgs.ipAddress, fmt.Sprintf("%d", cmdLineArgs.port), cmdLineArgs.user, cmdLineArgs.key, "", "", "")) + } + } + return +} + +// go routine +func doCollection(collection *Collection, ch chan *Collection, statusUpdate progress.MultiSpinnerUpdateFunc) { + if statusUpdate != nil { + statusUpdate(collection.target.GetName(), "collecting data") + } + err := collection.Collect() + if err != nil { + log.Printf("Error: %v", err) + if statusUpdate != nil { + statusUpdate(collection.target.GetName(), "error collecting data") + } + } else { + if statusUpdate != nil { + statusUpdate(collection.target.GetName(), "finished collecting data") + } + } + ch <- collection +} + +func getCollections(targets []target.Target, assets *core.Assets, workDir string, cmdLineArgs *CmdLineArgs, statusUpdate progress.MultiSpinnerUpdateFunc) (collections []*Collection, err error) { + // run collections in parallel + ch := make(chan *Collection) + for _, target := range targets { + collection := newCollection(target, cmdLineArgs, assets, workDir) + go doCollection(collection, ch, statusUpdate) + } + // wait for all collections to complete collecting + for range targets { + collection := <-ch + collections = append(collections, collection) + } + return +} + +func getReports(collections []*Collection, assets *core.Assets, outputDir string, cmdLineArgs *CmdLineArgs, statusUpdate progress.MultiSpinnerUpdateFunc) (reportFilePaths []string, err error) { + var okCollections = make([]*Collection, 0) + for _, collection := range collections { + if collection.ok { + okCollections = append(okCollections, collection) + if statusUpdate != nil { + statusUpdate(collection.target.GetName(), "creating report(s)") + } + } + } + if len(okCollections) == 0 { + err = fmt.Errorf("no data collected") + return + } + var collectionFilePaths []string + for _, collection := range okCollections { + collectionFilePaths = append(collectionFilePaths, collection.outputFilePath) + } + cmd := exec.Command(assets[core.Reporter], "-input", strings.Join(collectionFilePaths, ","), "-output", outputDir, "-format", cmdLineArgs.format) + log.Printf("run: %s", strings.Join(cmd.Args, " ")) + stdout, _, _, err := target.RunLocalCommand(cmd) + if err != nil { + for _, collection := range collections { + if statusUpdate != nil { + statusUpdate(collection.target.GetName(), "error creating report(s)") + } + } + return + } + reportFilePaths = strings.Split(stdout, "\n") + reportFilePaths = reportFilePaths[:len(reportFilePaths)-1] + for _, collection := range collections { + if collection.ok { + if statusUpdate != nil { + statusUpdate(collection.target.GetName(), "finished creating report(s)") + } + } + } + return +} + +func archiveOutputDir(outputDir string) (err error) { + tarFilePath := filepath.Join(outputDir, filepath.Base(outputDir)+".tgz") + out, err := os.Create(tarFilePath) + if err != nil { + return + } + defer out.Close() + gw := gzip.NewWriter(out) + defer gw.Close() + tw := tar.NewWriter(gw) + defer tw.Close() + baseDir, err := os.Getwd() + if err != nil { + return + } + err = os.Chdir(outputDir) + if err != nil { + return + } + defer os.Chdir(baseDir) + err = filepath.WalkDir(".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if !d.IsDir() && filepath.Base(path) != filepath.Base(tarFilePath) { + info, err := d.Info() + if err != nil { + return err + } + var header *tar.Header + header, err = tar.FileInfoHeader(info, info.Name()) + if err != nil { + return err + } + header.Name = filepath.Join(filepath.Base(outputDir), path) + err = tw.WriteHeader(header) + if err != nil { + return err + } + var file *os.File + file, err = os.Open(path) + if err != nil { + return err + } + _, err = io.Copy(tw, file) + file.Close() + if err != nil { + return err + } + } + return nil + }) + return +} + +func cleanupOutputDir(outputDir string, collections []*Collection, reportFilePaths []string) (err error) { + var filesToRemove []string + for _, collection := range collections { + hostname := collection.target.GetName() + filesToRemove = append(filesToRemove, filepath.Join(outputDir, getLogfileName())) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+"_reports_collector.yaml")) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+"_collector.log")) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+"_megadata_collector.yaml")) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+"_megadata_collector.log")) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+"_megadata", "collector.log")) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+"_megadata", "collector.pid")) + filesToRemove = append(filesToRemove, filepath.Join(outputDir, hostname+".raw.json")) + } + filesToRemove = append(filesToRemove, filepath.Join(outputDir, "reporter.log")) + for _, file := range filesToRemove { + os.Remove(file) + } + return +} + +func doWork(outputDir string, cmdLineArgs *CmdLineArgs) error { + assets, err := core.NewAssets() + if err != nil { + return err + } + _, nomatch, nodata, err := assets.Verify() + if err != nil { + log.Printf("Warning: %v", err) + } + for _, a := range nomatch { + log.Printf("checksum does not match -- %s has been modified.", a) + } + for _, a := range nodata { + log.Printf("checksum not found for %s.", a) + } + if cmdLineArgs.dumpConfig { + customized, err := customizeCommandYAML(assets[core.ReportsYaml], cmdLineArgs, ".", "target_hostname") + if err != nil { + return err + } + fmt.Print(string(customized)) + return nil + } + targets, err := getTargets(assets, cmdLineArgs) + if err != nil { + return err + } + if len(targets) == 0 { + return fmt.Errorf("no targets provided") + } + multiSpinner := progress.NewMultiSpinner() + for _, t := range targets { + multiSpinner.AddSpinner(t.GetName()) + } + multiSpinner.Start() + defer multiSpinner.Finish() + collections, err := getCollections(targets, assets, outputDir, cmdLineArgs, multiSpinner.Status) + if err != nil { + return err + } + var reportFilePaths []string + reportFilePaths, err = getReports(collections, assets, outputDir, cmdLineArgs, multiSpinner.Status) + if err != nil { + return err + } + err = archiveOutputDir(outputDir) + if err != nil { + return err + } + if !cmdLineArgs.debug { + err = cleanupOutputDir(outputDir, collections, reportFilePaths) + if err != nil { + return err + } + } + multiSpinner.Finish() + fmt.Print("Reports:\n") + for _, reportFilePath := range reportFilePaths { + relativePath, err := filepath.Rel(filepath.Join(outputDir, ".."), reportFilePath) + if err != nil { + return err + } + fmt.Printf(" %s\n", relativePath) + } + return nil +} + +func getLogfileName() string { + return filepath.Base(os.Args[0]) + ".log" +} + +const ( + retNoError = 0 + retError = 1 +) + +func mainReturnWithCode() int { + // command line + cmdLineArgs := newCmdLineArgs() + err := cmdLineArgs.parse(os.Args[0], os.Args[1:]) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return retError + } + err = cmdLineArgs.validate() + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + return retError + } + // show help + if cmdLineArgs.help { + showUsage() + return retNoError + } + // show version + if cmdLineArgs.version { + showVersion() + return retNoError + } + // output directory + var outputDir string + if cmdLineArgs.output != "" { + var err error + outputDir, err = core.AbsPath(cmdLineArgs.output) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return retError + } + } else { + outputDirName := filepath.Base(os.Args[0]) + "_" + time.Now().Local().Format("2006-01-02_15-04-05") + var err error + // outputDir will be created in current working directory + outputDir, err = core.AbsPath(outputDirName) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return retError + } + err = os.Mkdir(outputDir, 0755) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return retError + } + } + // logging + logFilename := getLogfileName() + logFile, err := os.OpenFile(filepath.Join(outputDir, logFilename), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return retError + } + defer logFile.Close() + log.SetOutput(logFile) + log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile) + + log.Printf("Starting up %s, version %s, PID %d, PPID %d, arguments: %s", + filepath.Base(os.Args[0]), + gVersion, + os.Getpid(), + os.Getppid(), + strings.Join(os.Args, " "), + ) + // get to work + err = doWork(outputDir, cmdLineArgs) + if err != nil { + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return retError + } + return retNoError +} + +func main() { os.Exit(mainReturnWithCode()) } diff --git a/src/orchestrator/targets.example b/src/orchestrator/targets.example new file mode 100644 index 0000000..f1c31d8 --- /dev/null +++ b/src/orchestrator/targets.example @@ -0,0 +1,19 @@ +# example targets file +# for use with the -targets command line option +# Line format: +# ip_address::user_name::: # trailing comments are supported +# - ip_address and user_name are required +# - ssh_port defaults to 22 +# - Field separators required (except for label separator) + +# example - ip address, user name, and ssh key +192.168.1.1::elaine:/home/elaine/.ssh/id_rsa:: + +# example - ip address, user name, ssh key, and sudo password +192.168.1.2::jerry:/home/jerry/.ssh/id_rsa::elevateme + +# example - optional label, ip address, user name, ssh password, sudo password, and trailing comment +Xeon_Gen_4:192.168.1.3::kramer::logmein:logmein # example comment + +# example - minimum required, e.g., passwordless ssh and passwordless sudo are configured +192.168.1.2::george::: diff --git a/src/orchestrator/targets_file.go b/src/orchestrator/targets_file.go new file mode 100644 index 0000000..6e59bad --- /dev/null +++ b/src/orchestrator/targets_file.go @@ -0,0 +1,107 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "bufio" + "bytes" + "fmt" + "os" + "strconv" + "strings" + + "intel.com/svr-info/pkg/core" +) + +type targetFromFile struct { + label string + ip string + port string + user string + key string + pwd string + sudo string + lineNo int +} + +type TargetsFile struct { + path string +} + +func newTargetsFile(path string) *TargetsFile { + return &TargetsFile{path: path} +} + +func (tf *TargetsFile) parse() (targets []targetFromFile, err error) { + content, err := os.ReadFile(tf.path) + if err != nil { + return + } + return tf.parseContent(content) +} + +func (tf *TargetsFile) parseContent(content []byte) (targets []targetFromFile, err error) { + scanner := bufio.NewScanner(bytes.NewReader(content)) + lineNo := 0 + var fileErrors []string + for scanner.Scan() { + lineNo += 1 + line := scanner.Text() + line = strings.Split(line, "#")[0] // strip trailing comment + line = strings.TrimSpace(line) + // skip blank and commented lines + if line == "" || line[0] == '#' { + continue + } + tokens := strings.Split(line, ":") + var t targetFromFile + if len(tokens) != 6 && len(tokens) != 7 { + fileErrors = append(fileErrors, fmt.Sprintf("-targets %s : format error, line %d\n", tf.path, lineNo)) + } else { + i := 0 + t.lineNo = lineNo + t.label = tokens[0] + if len(tokens) == 7 { + i++ + } + t.ip = tokens[i] + // ip is required + if t.ip == "" { + fileErrors = append(fileErrors, fmt.Sprintf("-targets %s : IP Address (or hostname) is required, line %d\n", tf.path, lineNo)) + } + // port is optional, but must be an integer if provided + t.port = tokens[i+1] + if t.port != "" { + _, err := strconv.Atoi(t.port) + if err != nil { + fileErrors = append(fileErrors, fmt.Sprintf("-targets %s : invalid port %s, line %d\n", tf.path, t.port, lineNo)) + } + } + // user is required + t.user = tokens[i+2] + if t.user == "" { + fileErrors = append(fileErrors, fmt.Sprintf("-targets %s : user name is required, line %d\n", tf.path, lineNo)) + } + // key, pwd, and sudo are all optional + t.key = tokens[i+3] + if t.key != "" { + err = core.FileExists(t.key) + if err != nil { + fileErrors = append(fileErrors, fmt.Sprintf("-targets %s : key file (%s) not a file, line %d\n", tf.path, t.key, lineNo)) + return + } + } + t.pwd = tokens[i+4] + t.sudo = tokens[i+5] + t.sudo = strings.ReplaceAll(t.sudo, "$", "\\$") // escape $ in sudo password + targets = append(targets, t) + } + } + if len(fileErrors) > 0 { + err = fmt.Errorf("%s", strings.Join(fileErrors, "\n")) + return + } + return +} diff --git a/src/orchestrator/targets_file_test.go b/src/orchestrator/targets_file_test.go new file mode 100644 index 0000000..789a9df --- /dev/null +++ b/src/orchestrator/targets_file_test.go @@ -0,0 +1,205 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "strings" + "testing" +) + +func TestParseAllFields(t *testing.T) { + content := ` + label:ip:22:user:targets.example:sshpassword:sudopassword # ignored comment + ` + tf := newTargetsFile("testing") + targets, err := tf.parseContent([]byte(content)) + if err != nil { + t.Fail() + } + if len(targets) != 1 { + t.Fail() + } + if targets[0].label != "label" { + t.Fail() + } + if targets[0].ip != "ip" { + t.Fail() + } + if targets[0].port != "22" { + t.Fail() + } + if targets[0].user != "user" { + t.Fail() + } + if targets[0].key != "targets.example" { + t.Fail() + } + if targets[0].pwd != "sshpassword" { + t.Fail() + } + if targets[0].sudo != "sudopassword" { + t.Fail() + } +} + +func TestParseNoLabel(t *testing.T) { + content := ` + ip:22:user:targets.example:sshpassword:sudopassword + ` + tf := newTargetsFile("testing") + targets, err := tf.parseContent([]byte(content)) + if err != nil { + t.Fail() + } + if len(targets) != 1 { + t.Fail() + } + if targets[0].label != "ip" { + t.Fail() + } + if targets[0].ip != "ip" { + t.Fail() + } + if targets[0].port != "22" { + t.Fail() + } + if targets[0].user != "user" { + t.Fail() + } + if targets[0].key != "targets.example" { + t.Fail() + } + if targets[0].pwd != "sshpassword" { + t.Fail() + } + if targets[0].sudo != "sudopassword" { + t.Fail() + } +} + +func TestParseMultiLine(t *testing.T) { + content := ` + + # this is a commented line + label:ip::user:targets.example:sshpassword:sudopassword + #label:ip:port:user::sshpassword:sudopassword # trailing comment + label:ip:22:user::sshpassword:sudopassword + # another commented line + + ` + tf := newTargetsFile("testing") + targets, err := tf.parseContent([]byte(content)) + if err != nil { + t.Fail() + } + if len(targets) != 2 { + t.Fail() + } + if targets[0].label != "label" { + t.Fail() + } + if targets[0].ip != "ip" { + t.Fail() + } + if targets[0].port != "" { + t.Fail() + } + if targets[0].user != "user" { + t.Fail() + } + if targets[0].key != "targets.example" { + t.Fail() + } + if targets[0].pwd != "sshpassword" { + t.Fail() + } + if targets[0].sudo != "sudopassword" { + t.Fail() + } +} + +func TestParseEmpty(t *testing.T) { + content := "" + tf := newTargetsFile("testing") + targets, err := tf.parseContent([]byte(content)) + if err != nil { + t.Fail() + } + if len(targets) != 0 { + t.Fail() + } +} + +func TestParseAllComments(t *testing.T) { + content := ` + # ip:22:user::sshpassword:sudopassword # comment + # foo + + ` + tf := newTargetsFile("testing") + targets, err := tf.parseContent([]byte(content)) + if err != nil { + t.Fail() + } + if len(targets) != 0 { + t.Fail() + } +} + +func TestParseMissingFields(t *testing.T) { + content := ` + # valid line + label:ip:22:user::sshpassword:sudopassword # comment + # invalid line + ip:22:user:key:sshpassword + ` + tf := newTargetsFile("testing") + _, err := tf.parseContent([]byte(content)) + if err == nil { + t.Fail() + } + if !strings.Contains(err.Error(), "format error, line 5") { + t.Fail() + } +} + +func TestParseInvalidPort(t *testing.T) { + content := "ip:invalid_port:user::sshpassword:sudopassword" + tf := newTargetsFile("testing") + _, err := tf.parseContent([]byte(content)) + if err == nil { + t.Fail() + } + if !strings.Contains(err.Error(), "invalid port invalid_port, line 1") { + t.Fail() + } +} + +func TestMissingIpAndUser(t *testing.T) { + content := ":22::targets.example:sshpassword:sudopassword" + tf := newTargetsFile("testing") + _, err := tf.parseContent([]byte(content)) + if err == nil { + t.Fail() + } + if !strings.Contains(err.Error(), "user name is required, line 1") { + t.Fail() + } + if !strings.Contains(err.Error(), "IP Address (or hostname) is required, line 1") { + t.Fail() + } +} + +func TestEscapeSudo(t *testing.T) { + content := "ip::user:::$foo$bar" + tf := newTargetsFile("testing") + targets, err := tf.parseContent([]byte(content)) + if err != nil { + t.Fail() + } + if targets[0].sudo != "\\$foo\\$bar" { + t.Fail() + } +} diff --git a/src/pkg/commandfile/commandfile.go b/src/pkg/commandfile/commandfile.go new file mode 100644 index 0000000..3e3ef62 --- /dev/null +++ b/src/pkg/commandfile/commandfile.go @@ -0,0 +1,48 @@ +/* +Package commandfile provides common interface to collector input file +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package commandfile + +import "github.com/creasty/defaults" + +type Command struct { + Label string `yaml:"label"` + Command string `yaml:"command"` + Modprobe string `yaml:"modprobe"` + Superuser bool `default:"false" yaml:"superuser"` + Run bool `default:"true" yaml:"run"` + Parallel bool `default:"false" yaml:"parallel"` +} + +type Arguments struct { + Name string `default:"test" yaml:"name"` + Binpath string `default:"." yaml:"bin_path"` + Timeout int `default:"300" yaml:"command_timeout"` +} + +type CommandFile struct { + Args Arguments `yaml:"arguments"` + Commands []Command `yaml:"commands"` +} + +func (s *Arguments) UnmarshalYAML(unmarshal func(interface{}) error) error { + defaults.Set(s) + type plain Arguments + if err := unmarshal((*plain)(s)); err != nil { + return err + } + return nil +} + +func (s *Command) UnmarshalYAML(unmarshal func(interface{}) error) error { + defaults.Set(s) + type plain Command + if err := unmarshal((*plain)(s)); err != nil { + return err + } + return nil +} diff --git a/src/pkg/commandfile/go.mod b/src/pkg/commandfile/go.mod new file mode 100644 index 0000000..7c94523 --- /dev/null +++ b/src/pkg/commandfile/go.mod @@ -0,0 +1,5 @@ +module intel.com/svr-info/pkg/commandfile + +go 1.19 + +require github.com/creasty/defaults v1.6.0 diff --git a/src/pkg/commandfile/go.sum b/src/pkg/commandfile/go.sum new file mode 100644 index 0000000..b360e2e --- /dev/null +++ b/src/pkg/commandfile/go.sum @@ -0,0 +1,2 @@ +github.com/creasty/defaults v1.6.0 h1:ltuE9cfphUtlrBeomuu8PEyISTXnxqkBIoQfXgv7BSc= +github.com/creasty/defaults v1.6.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM= diff --git a/src/pkg/core/assets.go b/src/pkg/core/assets.go new file mode 100644 index 0000000..ad1d6c8 --- /dev/null +++ b/src/pkg/core/assets.go @@ -0,0 +1,199 @@ +/* +Package core includes internal shared code. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package core + +import ( + "bufio" + "crypto/md5" + "fmt" + "io" + "os" + "path" + "path/filepath" + "regexp" + "strings" +) + +const ( + Orchestrator int = iota + Amd64Collector + Reporter + Amd64Deps + ReportsYaml + MegadataYaml + Sshpass + ReferenceData + HTMLReportTemplate + CPUsYaml + GPUsYaml + AcceleratorsYaml + Insights + Arm64Collector + Arm64Deps + Burn + numPaths // keep this at the end +) + +type Assets [numPaths]string + +func NewAssets() (assets *Assets, err error) { + assets = &Assets{} + + assets[Orchestrator], err = FindAsset("orchestrator") + if err != nil { + return + } + assets[Reporter], err = FindAsset("reporter") + if err != nil { + return + } + assets[Amd64Collector], err = FindAsset("collector") + if err != nil { + return + } + assets[Arm64Collector], err = FindAsset("collector_arm64") + if err != nil { + return + } + assets[ReportsYaml], err = FindAsset("collector_reports.yaml.tmpl") + if err != nil { + return + } + assets[MegadataYaml], err = FindAsset("collector_megadata.yaml.tmpl") + if err != nil { + return + } + assets[Amd64Deps], err = FindAsset("collector_deps_amd64.tgz") + if err != nil { + return + } + assets[Arm64Deps], err = FindAsset("collector_deps_arm64.tgz") + if err != nil { + return + } + assets[Sshpass], err = FindAsset("sshpass") + if err != nil { + return + } + assets[ReferenceData], err = FindAsset("reference.yaml") + if err != nil { + return + } + assets[HTMLReportTemplate], err = FindAsset("report.html.tmpl") + if err != nil { + return + } + assets[CPUsYaml], err = FindAsset("cpus.yaml") + if err != nil { + return + } + assets[CPUsYaml], err = FindAsset("gpus.yaml") + if err != nil { + return + } + assets[CPUsYaml], err = FindAsset("accelerators.yaml") + if err != nil { + return + } + assets[Insights], err = FindAsset("insights.grl") + if err != nil { + return + } + assets[Burn], err = FindAsset("burn") + if err != nil { + return + } + return +} + +func (assets *Assets) Verify() (match []string, nomatch []string, nodata []string, err error) { + sums := make(map[string]string) // filename to md5 map + // build map from file containing all md5 sums + sumsFilepath, err := FindAsset("sums.md5") + if err != nil { + return + } + sumsFile, err := os.Open(sumsFilepath) + if err != nil { + return + } + defer sumsFile.Close() + scanner := bufio.NewScanner(sumsFile) + for scanner.Scan() { + line := scanner.Text() + re := regexp.MustCompile(`(\w+)\s+([\w./-]+)`) + match := re.FindStringSubmatch(line) + if len(match) == 3 { + sums[path.Base(match[2])] = match[1] + } + } + + // find each asset in map, verify md5 + for _, asset := range assets { + if asset == "" { + continue + } + // calculate md5 of asset + var assetFile *os.File + assetFile, err = os.Open(asset) + if err != nil { + return + } + defer assetFile.Close() + hash := md5.New() + _, err = io.Copy(hash, assetFile) + if err != nil { + return + } + assetSum := fmt.Sprintf("%x", hash.Sum(nil)) + // categorize result + if sum, ok := sums[path.Base(asset)]; ok { + if sum != assetSum { + nomatch = append(nomatch, asset) + } else { + match = append(match, asset) + } + } else { + nodata = append(nodata, asset) + } + } + return +} + +func FindAsset(assetName string) (assetPath string, err error) { + exePath, _ := os.Executable() + exeDir := filepath.Dir(exePath) + + searchDirs := []string{ + /* for use during deployment */ + filepath.Join(exeDir, "..", "config"), + filepath.Join(exeDir, "..", "tools"), + /* for use during development */ + filepath.Join(exeDir, "..", "..", "config"), + filepath.Join(exeDir, "..", "orchestrator"), + filepath.Join(exeDir, "..", "collector"), + filepath.Join(exeDir, "..", "reporter"), + filepath.Join(exeDir, "..", "sshpass"), + filepath.Join(exeDir, "..", "burn"), + } + for _, dir := range searchDirs { + if dir == "." { + assetPath = strings.Join([]string{dir, assetName}, string(filepath.Separator)) + } else { + assetPath = filepath.Join(dir, assetName) + } + _, err = os.Stat(assetPath) + if err == nil { + return + } + } + if err != nil { + err = fmt.Errorf("could not find required asset (%s) relative to executable (%s)", assetName, exePath) + } + return +} diff --git a/src/pkg/core/assets_test.go b/src/pkg/core/assets_test.go new file mode 100644 index 0000000..388a3b8 --- /dev/null +++ b/src/pkg/core/assets_test.go @@ -0,0 +1,31 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package core + +// func TestNew(t *testing.T) { +// _, err := NewAssets() +// if err != nil { +// t.Fatal(err) +// } +// } + +// func TestFindAsset(t *testing.T) { +// _, err := FindAsset("insights.grl") +// if err != nil { +// t.Fatal(err) +// } +// } + +// func TestVerify(t *testing.T) { +// assets, err := NewAssets() +// if err != nil { +// t.Fatal(err) +// } +// match, nomatch, nodata, err := assets.Verify() +// if err != nil { +// t.Fatal(err) +// } +// fmt.Print(match, nomatch, nodata) +// } diff --git a/src/pkg/core/go.mod b/src/pkg/core/go.mod new file mode 100644 index 0000000..07d8147 --- /dev/null +++ b/src/pkg/core/go.mod @@ -0,0 +1,3 @@ +module intel.com/svr-info/pkg/core/v2 + +go 1.19 diff --git a/src/pkg/core/report_types.go b/src/pkg/core/report_types.go new file mode 100644 index 0000000..0982c9f --- /dev/null +++ b/src/pkg/core/report_types.go @@ -0,0 +1,39 @@ +/* +Package core includes internal shared code. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package core + +import ( + "fmt" + "strings" +) + +var ReportTypes = []string{"html", "json", "xlsx", "txt", "all"} + +func IsValidReportType(input string) (valid bool) { + for _, validType := range ReportTypes { + if input == validType { + return true + } + } + return false +} + +func GetReportTypes(input string) (reportTypes []string, err error) { + reportTypes = strings.Split(input, ",") + if len(reportTypes) == 1 && reportTypes[0] == "all" { + reportTypes = ReportTypes[:len(ReportTypes)-1] + return + } + for _, reportType := range reportTypes { + if !IsValidReportType(reportType) { + err = fmt.Errorf("invalid report type: %s", reportType) + return + } + } + return +} diff --git a/src/pkg/core/util.go b/src/pkg/core/util.go new file mode 100644 index 0000000..59c5232 --- /dev/null +++ b/src/pkg/core/util.go @@ -0,0 +1,52 @@ +/* +Package core includes internal shared code. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package core + +import ( + "fmt" + "io/fs" + "os" + "os/user" + "path/filepath" + "strings" +) + +// ExpandUser expands '~' to user's home directory, if found, otherwise returns original path +func ExpandUser(path string) string { + usr, _ := user.Current() + if path == "~" { + return usr.HomeDir + } else if strings.HasPrefix(path, "~"+string(os.PathSeparator)) { + return filepath.Join(usr.HomeDir, path[2:]) + } else { + return path + } +} + +// AbsPath returns absolute path after expanding '~' to user's home dir +// Useful when application is started by a process that isn't a shell, e.g. PKB +// Use everywhere in place of filepath.Abs() +func AbsPath(path string) (string, error) { + return filepath.Abs(ExpandUser(path)) +} + +// FileExists returns error if file does not exist or does exist but +// is not a file, i.e., is a directory +func FileExists(path string) (err error) { + var fileInfo fs.FileInfo + fileInfo, err = os.Stat(path) + if err != nil { + return + } else { + if !fileInfo.Mode().IsRegular() { + err = fmt.Errorf("%s not a file", path) + return + } + } + return +} diff --git a/src/pkg/cpu/cpu.go b/src/pkg/cpu/cpu.go new file mode 100644 index 0000000..bc32c84 --- /dev/null +++ b/src/pkg/cpu/cpu.go @@ -0,0 +1,108 @@ +/* +Package cpu provides a reference of CPU architectures and identification keys for known CPUS. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package cpu + +import ( + "fmt" + "log" + "os" + "regexp" + + "gopkg.in/yaml.v2" +) + +type CPUInfo struct { + Architecture string `yaml:"architecture"` + Family string `yaml:"family"` + Model string `yaml:"model"` + Stepping string `yaml:"stepping"` + Channels int `yaml:"channels"` +} + +type CPU struct { + configFilenames []string + cpusInfo []CPUInfo +} + +func NewCPU(configFilenames []string) (cpu *CPU, err error) { + cpu = &CPU{ + configFilenames: configFilenames, + cpusInfo: []CPUInfo{}, + } + err = cpu.init() + return +} + +func (c *CPU) init() (err error) { + for _, filename := range c.configFilenames { + yamlBytes, err := os.ReadFile(filename) + if err != nil { + log.Printf("failed to read CPU info file: %s, %v", filename, err) + continue + } + cpusInfo := []CPUInfo{} + err = yaml.UnmarshalStrict(yamlBytes, &cpusInfo) + if err != nil { + log.Printf("failed to parse CPU info file: %s, %v", filename, err) + continue + } + c.cpusInfo = append(c.cpusInfo, cpusInfo...) + } + return +} + +func (c *CPU) getCPU(family, model, stepping string) (cpu CPUInfo, err error) { + for _, info := range c.cpusInfo { + // if family matches + if info.Family == family { + var reModel *regexp.Regexp + reModel, err = regexp.Compile(info.Model) + if err != nil { + return + } + // if model matches + if reModel.FindString(model) != "" { + // if there is a stepping + if info.Stepping != "" { + var reStepping *regexp.Regexp + reStepping, err = regexp.Compile(info.Stepping) + if err != nil { + return + } + // if stepping does NOT match + if reStepping.FindString(stepping) == "" { + // no match + continue + } + } + cpu = info + return + } + } + } + err = fmt.Errorf("CPU match not found for family %s, model %s, stepping %s", family, model, stepping) + return +} + +func (c *CPU) GetMicroArchitecture(family, model, stepping string) (uarch string, err error) { + cpu, err := c.getCPU(family, model, stepping) + if err != nil { + return + } + uarch = cpu.Architecture + return +} + +func (c *CPU) GetMemoryChannels(family, model, stepping string) (channels int, err error) { + cpu, err := c.getCPU(family, model, stepping) + if err != nil { + return + } + channels = cpu.Channels + return +} diff --git a/src/pkg/cpu/cpu_test.go b/src/pkg/cpu/cpu_test.go new file mode 100644 index 0000000..0f8a2fe --- /dev/null +++ b/src/pkg/cpu/cpu_test.go @@ -0,0 +1,90 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package cpu + +import ( + "fmt" + "testing" +) + +func TestFindCPU(t *testing.T) { + cpu, err := NewCPU([]string{"cpu_test.yaml", "cpu_test_2.yaml"}) + if err != nil { + t.Fatal(err) + } + // should fail + _, err = cpu.GetMicroArchitecture("0", "0", "0") + if err == nil { + t.Fatal(err) + } + // should succeed + uarch, err := cpu.GetMicroArchitecture("6", "85", "4") //SKX + if err != nil { + t.Fatal(err) + } + if uarch != "SKX" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + // should succeed + uarch, err = cpu.GetMicroArchitecture("6", "85", "7") //CLX + if err != nil { + t.Fatal(err) + } + if uarch != "CLX" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + uarch, err = cpu.GetMicroArchitecture("6", "85", "6") //CLX + if err != nil { + t.Fatal(err) + } + if uarch != "CLX" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + // should succeed + uarch, err = cpu.GetMicroArchitecture("6", "108", "0") //ICX + if err != nil { + t.Fatal(err) + } + if uarch != "ICX" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + uarch, err = cpu.GetMicroArchitecture("6", "71", "0") //BDW + if err != nil { + t.Fatal(err) + } + if uarch != "BDW" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + + // test the regex on model for HSW + channels, err := cpu.GetMemoryChannels("6", "50", "0") //HSW + if err != nil { + t.Fatal(err) + } + if channels != 2 { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + uarch, err = cpu.GetMicroArchitecture("6", "69", "99") //HSW + if err != nil { + t.Fatal(err) + } + if uarch != "HSW" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + uarch, err = cpu.GetMicroArchitecture("6", "70", "") //HSW + if err != nil { + t.Fatal(err) + } + if uarch != "HSW" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } + uarch, err = cpu.GetMicroArchitecture("0", "1", "r3p1") // + if err != nil { + t.Fatal(err) + } + if uarch != "Neoverse N1" { + t.Fatal(fmt.Errorf("Found the wrong CPU")) + } +} diff --git a/src/pkg/cpu/cpu_test.yaml b/src/pkg/cpu/cpu_test.yaml new file mode 100755 index 0000000..87eebf3 --- /dev/null +++ b/src/pkg/cpu/cpu_test.yaml @@ -0,0 +1,17 @@ +- architecture: SKX + family: 6 + model: 85 + stepping: (0|1|2|3|4) + channels: 6 + +- architecture: CLX + family: 6 + model: 85 + stepping: (5|6|7) + channels: 6 + +- architecture: ICX + family: 6 + model: (106|108) + stepping: + channels: 8 diff --git a/src/pkg/cpu/cpu_test_2.yaml b/src/pkg/cpu/cpu_test_2.yaml new file mode 100755 index 0000000..3b09487 --- /dev/null +++ b/src/pkg/cpu/cpu_test_2.yaml @@ -0,0 +1,21 @@ +# Haswell +- architecture: HSW + family: 6 + model: (50|69|70) + stepping: + channels: 2 + +# Broadwell +- architecture: BDW + family: 6 + model: (61|71) + stepping: + channels: 2 + + +# Graviton 2 +- architecture: Neoverse N1 + family: 0 + model: 1 + stepping: r3p1 + channels: 8 diff --git a/src/pkg/cpu/go.mod b/src/pkg/cpu/go.mod new file mode 100644 index 0000000..bb218a2 --- /dev/null +++ b/src/pkg/cpu/go.mod @@ -0,0 +1,5 @@ +module intel.com/svr-info/pkg/cpu/v2 + +go 1.19 + +require gopkg.in/yaml.v2 v2.4.0 diff --git a/src/pkg/cpu/go.sum b/src/pkg/cpu/go.sum new file mode 100644 index 0000000..dd0bc19 --- /dev/null +++ b/src/pkg/cpu/go.sum @@ -0,0 +1,4 @@ +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= diff --git a/src/pkg/msr/go.mod b/src/pkg/msr/go.mod new file mode 100644 index 0000000..5ad3cb1 --- /dev/null +++ b/src/pkg/msr/go.mod @@ -0,0 +1,3 @@ +module intel.com/svr-info/pkg/msr/v2 + +go 1.19 diff --git a/src/pkg/msr/msr.go b/src/pkg/msr/msr.go new file mode 100644 index 0000000..92e290d --- /dev/null +++ b/src/pkg/msr/msr.go @@ -0,0 +1,190 @@ +/* +Package msr implements functions to read MSRs. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package msr + +import ( + "encoding/binary" + "fmt" + "os" + "path/filepath" +) + +type MSR struct { + fileNames []string // all msr file names + pkgFileNames []string // one file name per package (CPU/Socket) + fileStyleNew bool // new style if true, old style if false + lowBit int // low bit in requested bit range + highBit int // high bit in requested bit range +} + +func NewMSR() (msr *MSR, err error) { + msr = &MSR{ + lowBit: 0, + highBit: 63, + } + err = msr.init() + return +} + +func (msr *MSR) init() (err error) { + _, err = os.Stat("/dev/cpu/cpu0/msr") + if err == nil { + msr.fileStyleNew = false + msr.fileNames, err = filepath.Glob("/dev/cpu/cpu*/msr") + if err != nil { + return + } + } else { + _, err = os.Stat("/dev/cpu/0/msr") + if err == nil { + msr.fileStyleNew = true + msr.fileNames, err = filepath.Glob("/dev/cpu/*/msr") + if err != nil { + return + } + } else { + err = fmt.Errorf("could not find the MSR files in /dev/cpu (maybe you need a sudo modprobe msr)") + return + } + } + // determine which MSR files to use for packages + // don't return an error if this fails, we can't get the PPID on all platforms + var vals []uint64 + for _, fileName := range msr.fileNames { + var val uint64 + val, e := msr.read(0x4F, fileName, 8) // use PPID reg since it will be unique per package + if e != nil { + return + } + haveIt := false + for _, v := range vals { + if v == val { + haveIt = true + break + } + } + if !haveIt { + msr.pkgFileNames = append(msr.pkgFileNames, fileName) + vals = append(vals, val) + } + } + return +} + +// returns filenames for specified core and scope +// core == -1 indicates all cores +// packageScope arg ignored if specific core is requested +func (msr *MSR) getMSRFileNames(core int, packageScope bool) (fileNames []string) { + // all cores + if core == -1 { + if packageScope { + fileNames = msr.pkgFileNames + } else { + fileNames = msr.fileNames + } + } else { // specific core + if msr.fileStyleNew { + fileNames = append(fileNames, fmt.Sprintf("/dev/cpu/%d/msr", core)) + } else { + fileNames = append(fileNames, fmt.Sprintf("/dev/cpu/cpu%d/msr", core)) + } + } + return +} + +func maskUint64(highBit int, lowBit int, val uint64) (v uint64) { + bits := highBit - lowBit + 1 + if bits < 64 { + val >>= uint64(lowBit) + val &= (uint64(1) << bits) - 1 + } + v = val + return +} + +func (msr *MSR) read(reg uint64, fileName string, bytes int) (val uint64, err error) { + f, err := os.Open(fileName) + if err != nil { + return + } + defer f.Close() + buf := make([]byte, bytes) + read, err := f.ReadAt(buf, int64(reg)) + if err != nil { + return + } + if read != bytes { + err = fmt.Errorf("didn't read intended number of bytes") + return + } + val = uint64(binary.LittleEndian.Uint64(buf)) + val = maskUint64(msr.highBit, msr.lowBit, val) + return +} + +// SetBitRange filters bits for subsequent calls to Read* functions +func (msr *MSR) SetBitRange(highBit int, lowBit int) (err error) { + if lowBit >= highBit { + err = fmt.Errorf("lowBit must be less than highBit") + return + } + if lowBit < 0 || lowBit > 62 { + err = fmt.Errorf("lowBit must be a value between 0 and 62 (inclusive)") + return + } + if highBit < 1 || highBit > 63 { + err = fmt.Errorf("highBit must be a value between 1 and 63 (inclusive)") + return + } + msr.lowBit = lowBit + msr.highBit = highBit + return +} + +// ReadAll returns the register value for all cores +func (msr *MSR) ReadAll(reg uint64) (out []uint64, err error) { + fileNames := msr.getMSRFileNames(-1, false) + for _, fileName := range fileNames { + var val uint64 + val, err = msr.read(reg, fileName, 8) + if err != nil { + return + } + out = append(out, val) + } + return +} + +// ReadOne returns the register value for the specified core +func (msr *MSR) ReadOne(reg uint64, core int) (out uint64, err error) { + fileNames := msr.getMSRFileNames(core, false) + if len(fileNames) != 1 { + err = fmt.Errorf("did not find filenames for msr,core: %d, %d", reg, core) + return + } + out, err = msr.read(reg, fileNames[0], 8) + return +} + +// ReadPackages returns the specified register value for each package (CPU/Socket) +func (msr *MSR) ReadPackages(reg uint64) (out []uint64, err error) { + fileNames := msr.getMSRFileNames(-1, true) + if len(fileNames) == 0 { + err = fmt.Errorf("unable to identify msr files for package") + return + } + for _, fileName := range fileNames { + var val uint64 + val, err = msr.read(reg, fileName, 8) + if err != nil { + return + } + out = append(out, val) + } + return +} diff --git a/src/pkg/msr/msr_test.go b/src/pkg/msr/msr_test.go new file mode 100644 index 0000000..05fb152 --- /dev/null +++ b/src/pkg/msr/msr_test.go @@ -0,0 +1,109 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package msr + +import ( + "fmt" + "testing" +) + +func TestNewMSR(t *testing.T) { + _, err := NewMSR() + if err != nil { + t.Fatal(err) + } +} + +func TestSetBitRange(t *testing.T) { + msr, err := NewMSR() + if err != nil { + t.Fatal(err) + } + err = msr.SetBitRange(0, 1) + if err == nil { + t.Fatal("highBit < lowBit - should have failed") + } + err = msr.SetBitRange(64, 0) + if err == nil { + t.Fatal("highBit > 63 - should have failed") + } + err = msr.SetBitRange(63, 0) + if err != nil { + t.Fatal(err) + } + err = msr.SetBitRange(1, 0) + if err != nil { + t.Fatal(err) + } + err = msr.SetBitRange(63, 62) + if err != nil { + t.Fatal(err) + } +} + +func TestReadOne(t *testing.T) { + msr, err := NewMSR() + if err != nil { + t.Fatal(err) + } + // this one should work + fullVal, err := msr.ReadOne(0x1B0, 0) + if err != nil { + t.Fatal(err) + } + err = msr.SetBitRange(4, 0) + if err != nil { + t.Fatal(err) + } + partialVal, err := msr.ReadOne(0x1B0, 0) + if err != nil { + t.Fatal(err) + } + if fullVal == partialVal { + t.Fatal(fmt.Errorf("values should not match")) + } +} + +func TestReadAll(t *testing.T) { + msr, err := NewMSR() + if err != nil { + t.Fatal(err) + } + // this one should work + _, err = msr.ReadAll(0x1B0) + if err != nil { + t.Fatal(err) + } +} + +func TestReadPackage(t *testing.T) { + msr, err := NewMSR() + if err != nil { + t.Fatal(err) + } + // this one should work + _, err = msr.ReadPackages(0x1B0) + if err != nil { + t.Fatal(err) + } +} + +func TestMaskUint64(t *testing.T) { + var inputVal uint64 = 0xffffffff + outputVal := maskUint64(63, 0, inputVal) + if outputVal != inputVal { + t.Fatal("should match") + } + outputVal = maskUint64(3, 0, inputVal) + if outputVal != 0xf { + t.Fatal("should match") + } + + inputVal = 0x7857000158488 + outputVal = maskUint64(14, 0, inputVal) + if outputVal != 0x488 { + t.Fatal("should match") + } +} diff --git a/src/pkg/progress/go.mod b/src/pkg/progress/go.mod new file mode 100644 index 0000000..f4d5059 --- /dev/null +++ b/src/pkg/progress/go.mod @@ -0,0 +1,3 @@ +module intel.com/svr-info/pkg/progress/v2 + +go 1.19 diff --git a/src/pkg/progress/multispinner.go b/src/pkg/progress/multispinner.go new file mode 100644 index 0000000..b4b8e4f --- /dev/null +++ b/src/pkg/progress/multispinner.go @@ -0,0 +1,104 @@ +/* +Package progress provides CLI progress bar options. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package progress + +import ( + "fmt" + "os" + "sort" + "time" +) + +var spinChars []string = []string{"⣾", "⣽", "⣻", "⢿", "⡿", "⣟", "⣯", "⣷"} + +type MultiSpinnerUpdateFunc func(string, string) error + +type spinnerState struct { + status string + spinIndex int +} + +type MultiSpinner struct { + spinners map[string]*spinnerState + ticker *time.Ticker + done chan bool + spinning bool +} + +func NewMultiSpinner() *MultiSpinner { + ms := MultiSpinner{} + ms.spinners = make(map[string]*spinnerState) + ms.done = make(chan bool) + return &ms +} + +func (ms *MultiSpinner) AddSpinner(label string) (err error) { + if _, ok := ms.spinners[label]; ok { + err = fmt.Errorf("spinner with label %s already exists", label) + return + } + ms.spinners[label] = &spinnerState{"?", 0} + return +} + +func (ms *MultiSpinner) Start() { + ms.ticker = time.NewTicker(250 * time.Millisecond) + ms.spinning = true + go ms.onTick() +} + +func (ms *MultiSpinner) Finish() { + if ms.spinning { + ms.ticker.Stop() + ms.done <- true + ms.draw(false) + ms.spinning = false + } +} + +func (ms *MultiSpinner) Status(label string, status string) (err error) { + if spinner, ok := ms.spinners[label]; ok { + spinner.status = status + } else { + err = fmt.Errorf("did not find spinner with label %s", label) + return + } + return +} + +func (ms *MultiSpinner) onTick() { + for { + select { + case <-ms.done: + return + case <-ms.ticker.C: + ms.draw(true) + } + } +} + +func (ms *MultiSpinner) draw(goUp bool) { + var spinnerLabels []string + for k := range ms.spinners { + spinnerLabels = append(spinnerLabels, k) + } + sort.Strings(spinnerLabels) + for _, label := range spinnerLabels { + spinner := ms.spinners[label] + fmt.Fprintf(os.Stderr, "%-20s %s %-40s\n", label, spinChars[spinner.spinIndex], spinner.status) + spinner.spinIndex += 1 + if spinner.spinIndex >= len(spinChars) { + spinner.spinIndex = 0 + } + } + if goUp { + for range ms.spinners { + fmt.Fprintf(os.Stderr, "\x1b[1A") + } + } +} diff --git a/src/pkg/progress/multispinner_test.go b/src/pkg/progress/multispinner_test.go new file mode 100644 index 0000000..1e3f809 --- /dev/null +++ b/src/pkg/progress/multispinner_test.go @@ -0,0 +1,44 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package progress + +import ( + "testing" +) + +func TestNewMultiSpinner(t *testing.T) { + spinner := NewMultiSpinner() + if spinner == nil { + t.Fatal("failed to create a spinner") + } +} + +func TestMultiSpinner(t *testing.T) { + spinner := NewMultiSpinner() + if spinner == nil { + t.Fatal("failed to create a spinner") + } + if spinner.AddSpinner("A") != nil { + t.Fatal("failed to add spinner") + } + if spinner.AddSpinner("B") != nil { + t.Fatal("failed to add spinner") + } + if spinner.AddSpinner("A") == nil { + t.Fatal("added spinner with same label") + } + spinner.Start() + + if spinner.Status("A", "FOO") != nil { + t.Fatal("failed to update spinner status") + } + if spinner.Status("B", "BAR") != nil { + t.Fatal("failed to update spinner status") + } + if spinner.Status("C", "WOOPS") == nil { + t.Fatal("updated status of non-existant spinner") + } + spinner.Finish() +} diff --git a/src/pkg/target/go.mod b/src/pkg/target/go.mod new file mode 100644 index 0000000..4c77eb9 --- /dev/null +++ b/src/pkg/target/go.mod @@ -0,0 +1,7 @@ +module intel.com/svr-info/pkg/target/v2 + +go 1.19 + +require intel.com/svr-info/pkg/core v0.0.0-00010101000000-000000000000 + +replace intel.com/svr-info/pkg/core => ../core diff --git a/src/pkg/target/go.sum b/src/pkg/target/go.sum new file mode 100644 index 0000000..e69de29 diff --git a/src/pkg/target/target.go b/src/pkg/target/target.go new file mode 100644 index 0000000..e2aae75 --- /dev/null +++ b/src/pkg/target/target.go @@ -0,0 +1,458 @@ +/* +Package target provides a way to interact with local and remote systems. +*/ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package target + +import ( + "context" + "errors" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "time" + + "intel.com/svr-info/pkg/core" +) + +type Target interface { + RunCommand(*exec.Cmd) (string, string, int, error) + RunCommandWithTimeout(*exec.Cmd, int) (string, string, int, error) + CreateTempDirectory(string) (string, error) + GetArchitecture() (string, error) + PushFile(string, string) error + PullFile(string, string) error + CreateDirectory(string, string) (string, error) + RemoveDirectory(string) error + GetName() string + CanConnect() bool + GetSudo() string + SetSudo(string) +} + +type LocalTarget struct { + host string + sudo string +} + +type RemoteTarget struct { + name string + host string + port string + user string + key string + pass string + sshpassPath string + sudo string + arch string +} + +func NewRemoteTarget(name string, host string, port string, user string, key string, pass string, sshpassPath string, sudo string) *RemoteTarget { + t := RemoteTarget{name, host, port, user, key, pass, sshpassPath, sudo, ""} + return &t +} + +func NewLocalTarget(host string, sudo string) *LocalTarget { + t := LocalTarget{host, sudo} + return &t +} + +func (t *RemoteTarget) getSSHFlags(scp bool) (flags []string) { + flags = []string{ + "-2", + "-o", + "UserKnownHostsFile=/dev/null", + "-o", + "StrictHostKeyChecking=no", + "-o", + "IdentitiesOnly=yes", + "-o", + "ConnectTimeout=10", // This one exposes a bug in Windows' SSH client. Each connection takes + "-o", // 10 seconds to establish. https://github.com/PowerShell/Win32-OpenSSH/issues/1352 + "GSSAPIAuthentication=no", // This one is not supported, but is ignored on Windows. + "-o", + "ServerAliveInterval=30", + "-o", + "ServerAliveCountMax=10", // 30 * 10 = maximum 300 seconds before disconnect on no data + "-o", + "ControlPath=" + filepath.Join(os.TempDir(), "%h"), // <<<<<<<<<<<<< + "-o", + "ControlMaster=auto", + "-o", + "ControlPersist=1m", + } + if t.key != "" { + keyFlags := []string{ + "-o", + "PreferredAuthentications=publickey", + "-o", + "PasswordAuthentication=no", + "-i", + t.key, + } + flags = append(flags, keyFlags...) + } + if t.port != "" { + if scp { + flags = append(flags, "-P") + } else { + flags = append(flags, "-p") + } + flags = append(flags, t.port) + } + return +} + +func (t *RemoteTarget) getSSHCommand(command []string) []string { + var cmd []string + cmd = append(cmd, "ssh") + cmd = append(cmd, t.getSSHFlags(false)...) + if t.user != "" { + cmd = append(cmd, t.user+"@"+t.host) + } else { + cmd = append(cmd, t.host) + } + cmd = append(cmd, "--") + cmd = append(cmd, command...) + return cmd +} + +func (t *RemoteTarget) getSCPCommand(src string, dstDir string, push bool) []string { + var cmd []string + cmd = append(cmd, "scp") + cmd = append(cmd, t.getSSHFlags(true)...) + if push { + cmd = append(cmd, src) + dst := t.host + ":" + dstDir + if t.user != "" { + dst = t.user + "@" + dst + } + cmd = append(cmd, dst) + } else { // pull + s := t.host + ":" + src + if t.user != "" { + s = t.user + "@" + s + } + cmd = append(cmd, s) + cmd = append(cmd, dstDir) + } + return cmd +} + +func (t *LocalTarget) GetSudo() (sudo string) { + sudo = t.sudo + return +} + +func (t *RemoteTarget) GetSudo() (sudo string) { + sudo = t.sudo + return +} + +func (t *LocalTarget) SetSudo(sudo string) { + t.sudo = sudo +} + +func (t *RemoteTarget) SetSudo(sudo string) { + t.sudo = sudo +} + +func (t *LocalTarget) RunCommandWithTimeout(cmd *exec.Cmd, timeout int) (stdout string, stderr string, exitCode int, err error) { + log.Printf("run: %s", strings.Join(cmd.Args, " ")) + return RunLocalCommandWithTimeout(cmd, timeout) +} + +func (t *LocalTarget) RunCommand(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + return t.RunCommandWithTimeout(cmd, 0) +} + +func (t *RemoteTarget) RunCommandWithTimeout(cmd *exec.Cmd, timeout int) (stdout string, stderr string, exitCode int, err error) { + sshCommand := t.getSSHCommand(cmd.Args) + var name string + var args []string + if t.key == "" && t.pass != "" { + name = t.sshpassPath + args = append(args, "-e") + args = append(args, "--") + args = append(args, sshCommand...) + } else { + name = sshCommand[0] + args = sshCommand[1:] + } + localCommand := exec.Command(name, args...) + if t.key == "" && t.pass != "" { + localCommand.Env = append(localCommand.Env, "SSHPASS="+t.pass) + } + logOut := strings.Join(localCommand.Args, " ") + if t.sudo != "" { + logOut = strings.Replace(logOut, "SUDO_PASSWORD="+t.sudo, "SUDO_PASSWORD=*************", -1) + } + log.Printf("run: %s", logOut) + return RunLocalCommandWithTimeout(localCommand, timeout) +} + +func (t *RemoteTarget) RunCommand(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + return t.RunCommandWithTimeout(cmd, 0) +} + +func (t *LocalTarget) GetArchitecture() (arch string, err error) { + arch = runtime.GOARCH + return +} + +func (t *RemoteTarget) GetArchitecture() (arch string, err error) { + if t.arch == "" { + cmd := exec.Command("uname", "-m") + arch, _, _, err = t.RunCommand(cmd) + if err != nil { + return + } + arch = strings.TrimSpace(arch) + t.arch = arch + } else { + arch = t.arch + } + return +} + +// CreateTempDirectory creates a temporary directory on the local target in the directory +// specified by rootDir. If rootDir is an empty string, the temporary directory will be +// created in the system's default directory for temporary files, e.g. /tmp. +// The full path to the temporary directory is returned. +func (t *LocalTarget) CreateTempDirectory(rootDir string) (tempDir string, err error) { + temp, err := os.MkdirTemp(rootDir, fmt.Sprintf("%s.tmp.", filepath.Base(os.Args[0]))) + if err != nil { + return + } + tempDir, err = core.AbsPath(temp) + return +} + +// CreateTempDirectory creates a temporary directory on the remote target in the directory +// specified by rootDir. If rootDir is an empty string, the temporary directory will be +// created in the system's default directory for temporary files, e.g. /tmp. +// The full path to the temporary directory is returned. +func (t *RemoteTarget) CreateTempDirectory(rootDir string) (tempDir string, err error) { + var root string + if rootDir != "" { + root = fmt.Sprintf("--tmpdir=%s", rootDir) + } + cmd := exec.Command("mktemp", "-d", "-t", root, fmt.Sprintf("%s.tmp.XXXXXXXXXX", filepath.Base(os.Args[0])), "|", "xargs", "realpath") + tempDir, _, _, err = t.RunCommand(cmd) + tempDir = strings.TrimSpace(tempDir) + return +} + +// PushFile copies file from src to dst +// +// srcPath: full path to source file +// dstPath: destination directory or full path to destination file +func (t *LocalTarget) PushFile(srcPath string, dstPath string) (err error) { + srcFileStat, err := os.Stat(srcPath) + if err != nil { + log.Printf("failed to stat: %s", srcPath) + return + } + if !srcFileStat.Mode().IsRegular() { + err = fmt.Errorf("%s is not a regular file", srcPath) + return + } + srcFile, err := os.Open(srcPath) + if err != nil { + log.Printf("failed to open: %s", srcPath) + return + } + defer srcFile.Close() + dstFileStat, err := os.Stat(dstPath) + var dstFilename string + if err == nil && dstFileStat.IsDir() { + dstFilename = filepath.Join(dstPath, filepath.Base(srcPath)) + } else { + dstFilename = dstPath + } + dstFile, err := os.Create(dstFilename) + if err != nil { + log.Printf("failed to create: %s", dstFilename) + return + } + _, err = io.Copy(dstFile, srcFile) + dstFile.Close() + if err != nil { + log.Printf("failed to copy %s to %s", srcPath, dstFilename) + } + err = os.Chmod(dstFilename, srcFileStat.Mode()) + if err != nil { + log.Printf("failed to set file mode for %s", dstFilename) + } + return +} + +func (t *RemoteTarget) PushFile(srcPath string, dstDir string) (err error) { + scpCommand := t.getSCPCommand(srcPath, dstDir, true) + var name string + var args []string + if t.key == "" && t.pass != "" { + name = t.sshpassPath + args = append(args, "-e") + args = append(args, "--") + args = append(args, scpCommand...) + } else { + name = scpCommand[0] + args = scpCommand[1:] + } + localCommand := exec.Command(name, args...) + if t.key == "" && t.pass != "" { + localCommand.Env = append(localCommand.Env, "SSHPASS="+t.pass) + } + log.Printf("run: %s", strings.Join(localCommand.Args, " ")) + _, _, _, err = RunLocalCommand(localCommand) + return +} + +func (t *LocalTarget) PullFile(srcPath string, dstDir string) (err error) { + err = t.PushFile(srcPath, dstDir) + return +} + +func (t *RemoteTarget) PullFile(srcPath string, dstDir string) (err error) { + scpCommand := t.getSCPCommand(srcPath, dstDir, false) + var name string + var args []string + if t.key == "" && t.pass != "" { + name = t.sshpassPath + args = append(args, "-e") + args = append(args, "--") + args = append(args, scpCommand...) + } else { + name = scpCommand[0] + args = scpCommand[1:] + } + localCommand := exec.Command(name, args...) + if t.key == "" && t.pass != "" { + localCommand.Env = append(localCommand.Env, "SSHPASS="+t.pass) + } + log.Printf("run: %s", strings.Join(localCommand.Args, " ")) + _, _, _, err = RunLocalCommand(localCommand) + return +} + +func (t *LocalTarget) CreateDirectory(baseDir string, targetDir string) (dir string, err error) { + dir = filepath.Join(baseDir, targetDir) + err = os.Mkdir(dir, 0764) + return +} + +func (t *RemoteTarget) CreateDirectory(baseDir string, targetDir string) (dir string, err error) { + dir = filepath.Join(baseDir, targetDir) + cmd := exec.Command("mkdir", dir) + _, _, _, err = t.RunCommand(cmd) + return +} + +func (t *LocalTarget) RemoveDirectory(targetDir string) (err error) { + err = os.RemoveAll(targetDir) + return +} + +func (t *RemoteTarget) RemoveDirectory(targetDir string) (err error) { + cmd := exec.Command("rm", "-rf", targetDir) + _, _, _, err = t.RunCommand(cmd) + return +} + +func (t *LocalTarget) GetHost() (host string) { + host = t.host + return +} + +func (t *RemoteTarget) GetHost() (host string) { + host = t.host + return +} + +func (t *LocalTarget) GetName() (host string) { + host = t.host //local target host and name are same field + return +} +func (t *RemoteTarget) GetName() (host string) { + host = t.name + return +} + +func (t *LocalTarget) CanConnect() bool { + return true +} + +func (t *RemoteTarget) CanConnect() bool { + cmd := exec.Command("exit", "0") + _, _, _, err := t.RunCommand(cmd) + return err == nil +} + +func (t *LocalTarget) CanElevatePrivileges() bool { + if os.Geteuid() == 0 { + return true // user is root + } + if t.sudo != "" { + cmd := exec.Command("sudo", "-kS", "ls") + stdin, _ := cmd.StdinPipe() + go func() { + defer stdin.Close() + io.WriteString(stdin, t.sudo+"\n") + }() + _, _, _, err := t.RunCommand(cmd) + if err == nil { + return true // sudo password works + } + } + cmd := exec.Command("sudo", "-kS", "ls") + _, _, _, err := t.RunCommand(cmd) + return err == nil // true - passwordless sudo works +} + +func RunLocalCommandWithInputWithTimeout(cmd *exec.Cmd, input string, timeout int) (stdout string, stderr string, exitCode int, err error) { + if timeout > 0 { + var cancel context.CancelFunc + ctx, cancel := context.WithTimeout(context.Background(), time.Duration(timeout)*time.Second) + defer cancel() + commandWithContext := exec.CommandContext(ctx, cmd.Path, cmd.Args[1:]...) + commandWithContext.Env = cmd.Env + cmd = commandWithContext + } + if input != "" { + cmd.Stdin = strings.NewReader(input) + } + var outbuf, errbuf strings.Builder + cmd.Stdout = &outbuf + cmd.Stderr = &errbuf + err = cmd.Run() + stdout = outbuf.String() + stderr = errbuf.String() + if err != nil { + exitError := &exec.ExitError{} + if errors.As(err, &exitError) { + exitCode = exitError.ExitCode() + } + } + return +} + +func RunLocalCommandWithTimeout(cmd *exec.Cmd, timeout int) (stdout string, stderr string, exitCode int, err error) { + return RunLocalCommandWithInputWithTimeout(cmd, "", timeout) +} + +func RunLocalCommandWithInput(cmd *exec.Cmd, input string) (stdout string, stderr string, exitCode int, err error) { + return RunLocalCommandWithInputWithTimeout(cmd, input, 0) +} + +func RunLocalCommand(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { + return RunLocalCommandWithInput(cmd, "") +} diff --git a/src/pkg/target/target_test.go b/src/pkg/target/target_test.go new file mode 100644 index 0000000..1262404 --- /dev/null +++ b/src/pkg/target/target_test.go @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package target + +import ( + "testing" +) + +func TestNew(t *testing.T) { + localTarget := NewLocalTarget("hostname", "sudo") + if localTarget == nil { + t.Fatal("failed to create a local target") + } + remoteTarget := NewRemoteTarget("label", "hostname", "22", "user", "key", "pass", "sshpass", "sudo") + if remoteTarget == nil { + t.Fatal("failed to create a remote target") + } +} diff --git a/src/rdmsr/go.mod b/src/rdmsr/go.mod new file mode 100644 index 0000000..6ed1c3d --- /dev/null +++ b/src/rdmsr/go.mod @@ -0,0 +1,7 @@ +module intel.com/svr-info/rdmsr/v2 + +go 1.19 + +replace intel.com/svr-info/pkg/msr => ../pkg/msr + +require intel.com/svr-info/pkg/msr v0.0.0-00010101000000-000000000000 diff --git a/src/rdmsr/main.go b/src/rdmsr/main.go new file mode 100644 index 0000000..51cdd5e --- /dev/null +++ b/src/rdmsr/main.go @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "intel.com/svr-info/pkg/msr" +) + +type CmdLineArgs struct { + help bool + version bool + all bool + processor int + socket bool + bitrange string + msr uint64 +} + +// globals +var ( + gVersion string = "dev" // build overrides this, see makefile + gCmdLineArgs CmdLineArgs +) + +func showUsage() { + appName := filepath.Base(os.Args[0]) + fmt.Fprintf(os.Stderr, "Usage: %s msr\n", appName) + fmt.Fprintf(os.Stderr, "Example: %s -p 1 0x123\n", appName) + flag.PrintDefaults() +} + +func showVersion() { + fmt.Println(gVersion) +} + +func parseBitrangeArg() (highBit, lowBit int, err error) { + bitrangeOK := false + fields := strings.Split(gCmdLineArgs.bitrange, ":") + if len(fields) == 2 { + highBit, err = strconv.Atoi(fields[0]) + if err == nil && highBit > 0 && highBit <= 63 { + lowBit, err = strconv.Atoi(fields[1]) + if err == nil && lowBit >= 0 && lowBit < 63 { + if highBit > lowBit { + bitrangeOK = true + } + } + } + } + if !bitrangeOK { + err = fmt.Errorf("failed to parse bit range: %s", gCmdLineArgs.bitrange) + } + return +} + +func init() { + // init command line flags + flag.Usage = func() { showUsage() } // override default usage output + flag.BoolVar(&gCmdLineArgs.help, "h", false, "Print this usage message.") + flag.BoolVar(&gCmdLineArgs.version, "v", false, "Print program version.") + flag.BoolVar(&gCmdLineArgs.all, "a", false, "Read for all processors.") + flag.IntVar(&gCmdLineArgs.processor, "p", 0, "Select processor number.") + flag.BoolVar(&gCmdLineArgs.socket, "s", false, "Read for one processor on each socket (package/CPU).") + flag.StringVar(&gCmdLineArgs.bitrange, "f", "", "Output bits [h:l] only") + flag.Parse() + if gCmdLineArgs.help || gCmdLineArgs.version { + return + } + // positional arg + if flag.NArg() < 1 { + flag.Usage() + os.Exit(1) + } else { + msrHex := flag.Arg(0) + if len(msrHex) > 2 && msrHex[:2] == "0x" { + msrHex = msrHex[2:] + } + msr, err := strconv.ParseInt(msrHex, 16, 0) + if err != nil { + fmt.Fprintf(os.Stderr, "Could not parse provided msr address: %v\n", err) + showUsage() + os.Exit(1) + } + gCmdLineArgs.msr = uint64(msr) + } + // validate input flag arguments + if gCmdLineArgs.bitrange != "" { + _, _, err := parseBitrangeArg() + if err != nil { + fmt.Fprintln(os.Stderr, err) + showUsage() + os.Exit(1) + } + } +} + +func mainReturnWithCode() int { + if gCmdLineArgs.help { + showUsage() + return 0 + } + if gCmdLineArgs.version { + showVersion() + return 0 + } + msrReader, err := msr.NewMSR() + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + if gCmdLineArgs.bitrange != "" { + highBit, lowBit, _ := parseBitrangeArg() + err = msrReader.SetBitRange(highBit, lowBit) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + } + var vals []uint64 + if gCmdLineArgs.all { + vals, err = msrReader.ReadAll(gCmdLineArgs.msr) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + } else if gCmdLineArgs.socket { + vals, err = msrReader.ReadPackages(gCmdLineArgs.msr) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + } else { + val, err := msrReader.ReadOne(gCmdLineArgs.msr, gCmdLineArgs.processor) + if err != nil { + fmt.Fprintln(os.Stderr, err) + return 1 + } + vals = append(vals, val) + } + format := "%016x\n" + if gCmdLineArgs.bitrange != "" { // don't pad output if bitrange requested + format = "%x\n" + } + for _, val := range vals { + fmt.Printf(format, val) + } + return 0 +} + +func main() { os.Exit(mainReturnWithCode()) } diff --git a/src/reporter/go.mod b/src/reporter/go.mod new file mode 100644 index 0000000..24c8d6d --- /dev/null +++ b/src/reporter/go.mod @@ -0,0 +1,52 @@ +module intel.com/svr-info/reporter/v2 + +go 1.19 + +replace intel.com/svr-info/pkg/core => ../pkg/core + +replace intel.com/svr-info/pkg/cpu => ../pkg/cpu + +replace intel.com/svr-info/pkg/msr => ../pkg/msr + +replace intel.com/svr-info/pkg/progress => ../pkg/progress + +replace intel.com/svr-info/pkg/target => ../pkg/target + +require ( + github.com/google/go-cmp v0.5.9 + github.com/hyperjumptech/grule-rule-engine v1.13.0 + github.com/xuri/excelize/v2 v2.7.0 + gopkg.in/yaml.v2 v2.4.0 + intel.com/svr-info/pkg/core v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/cpu v0.0.0-00010101000000-000000000000 + intel.com/svr-info/pkg/target v0.0.0-00010101000000-000000000000 +) + +require ( + github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220527190237-ee62e23da966 // indirect + github.com/bmatcuk/doublestar v1.3.2 // indirect + github.com/emirpasic/gods v1.12.0 // indirect + github.com/google/uuid v1.1.1 // indirect + github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect + github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd // indirect + github.com/mitchellh/go-homedir v1.1.0 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect + github.com/richardlehane/mscfb v1.0.4 // indirect + github.com/richardlehane/msoleps v1.0.3 // indirect + github.com/sergi/go-diff v1.0.0 // indirect + github.com/sirupsen/logrus v1.8.1 // indirect + github.com/src-d/gcfg v1.4.0 // indirect + github.com/xanzy/ssh-agent v0.2.1 // indirect + github.com/xuri/efp v0.0.0-20220603152613-6918739fd470 // indirect + github.com/xuri/nfp v0.0.0-20220409054826-5e722a1d9e22 // indirect + go.uber.org/atomic v1.7.0 // indirect + go.uber.org/multierr v1.6.0 // indirect + go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.5.0 // indirect + golang.org/x/net v0.5.0 // indirect + golang.org/x/sys v0.4.0 // indirect + golang.org/x/text v0.6.0 // indirect + gopkg.in/src-d/go-billy.v4 v4.3.2 // indirect + gopkg.in/src-d/go-git.v4 v4.13.1 // indirect + gopkg.in/warnings.v0 v0.1.2 // indirect +) diff --git a/src/reporter/go.sum b/src/reporter/go.sum new file mode 100644 index 0000000..305f6f1 --- /dev/null +++ b/src/reporter/go.sum @@ -0,0 +1,160 @@ +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs= +github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA= +github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220527190237-ee62e23da966 h1:mEzJ8SH4M5wDL8C4a17yX2YeD/FIXV5w8FJekByaBi0= +github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20220527190237-ee62e23da966/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= +github.com/bmatcuk/doublestar v1.3.2 h1:mzUncgFmpzNUhIITFqGdZ8nUU0O7JTJzRO8VdkeLCSo= +github.com/bmatcuk/doublestar v1.3.2/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= +github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= +github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= +github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0= +github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hyperjumptech/grule-rule-engine v1.13.0 h1:YwPlzazuDSD+gQsOvWINTSJ+EP/rIqCrfSMHhqPwPqI= +github.com/hyperjumptech/grule-rule-engine v1.13.0/go.mod h1:kLuSOPGiB3U1FPvsThGaQhniDlWpWIvdIvCNI9hvCxU= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= +github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd h1:Coekwdh0v2wtGp9Gmz1Ze3eVRAWJMLokvN3QjdzCHLY= +github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= +github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/richardlehane/mscfb v1.0.4 h1:WULscsljNPConisD5hR0+OyZjwK46Pfyr6mPu5ZawpM= +github.com/richardlehane/mscfb v1.0.4/go.mod h1:YzVpcZg9czvAuhk9T+a3avCpcFPMUWm7gK3DypaEsUk= +github.com/richardlehane/msoleps v1.0.1/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= +github.com/richardlehane/msoleps v1.0.3 h1:aznSZzrwYRl3rLKRT3gUk9am7T/mLNSnJINvN0AQoVM= +github.com/richardlehane/msoleps v1.0.3/go.mod h1:BWev5JBpU9Ko2WAgmZEuiz4/u3ZYTKbjLycmwiWUfWg= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/src-d/gcfg v1.4.0 h1:xXbNR5AlLSA315x2UO+fTSSAXCDf+Ar38/6oyGbDKQ4= +github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/xanzy/ssh-agent v0.2.1 h1:TCbipTQL2JiiCprBWx9frJ2eJlCYT00NmctrHxVAr70= +github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= +github.com/xuri/efp v0.0.0-20220603152613-6918739fd470 h1:6932x8ltq1w4utjmfMPVj09jdMlkY0aiA6+Skbtl3/c= +github.com/xuri/efp v0.0.0-20220603152613-6918739fd470/go.mod h1:ybY/Jr0T0GTCnYjKqmdwxyxn2BQf2RcQIIvex5QldPI= +github.com/xuri/excelize/v2 v2.7.0 h1:Hri/czwyRCW6f6zrCDWXcXKshlq4xAZNpNOpdfnFhEw= +github.com/xuri/excelize/v2 v2.7.0/go.mod h1:ebKlRoS+rGyLMyUx3ErBECXs/HNYqyj+PbkkKRK5vSI= +github.com/xuri/nfp v0.0.0-20220409054826-5e722a1d9e22 h1:OAmKAfT06//esDdpi/DZ8Qsdt4+M5+ltca05dA5bG2M= +github.com/xuri/nfp v0.0.0-20220409054826-5e722a1d9e22/go.mod h1:WwHg+CVyzlv/TX9xqBFXEZAuxOPxn2k1GNHwG41IIUQ= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= +go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= +go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= +golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= +golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/image v0.0.0-20220902085622-e7cb96979f69 h1:Lj6HJGCSn5AjxRAH2+r35Mir4icalbqku+CLUtjnvXY= +golang.org/x/image v0.0.0-20220902085622-e7cb96979f69/go.mod h1:doUCurBvlfPMKfmIpRIywoHmhN3VyhnoFDbvIEWF4hY= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= +golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= +golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/src-d/go-billy.v4 v4.3.2 h1:0SQA1pRztfTFx2miS8sA97XvooFeNOmvUenF4o0EcVg= +gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0 h1:ivZFOIltbce2Mo8IjzUHAFoq/IylO9WHhNOAJK+LsJg= +gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= +gopkg.in/src-d/go-git.v4 v4.13.1 h1:SRtFyV8Kxc0UP7aCHcijOMQGPxHSmMOPrzulQWolkYE= +gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= +gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= +gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/src/reporter/main.go b/src/reporter/main.go new file mode 100644 index 0000000..7eea047 --- /dev/null +++ b/src/reporter/main.go @@ -0,0 +1,273 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "flag" + "fmt" + "io/fs" + "log" + "os" + "path/filepath" + "strings" + + "intel.com/svr-info/pkg/core" + "intel.com/svr-info/pkg/cpu" +) + +type CmdLineArgs struct { + help bool + version bool + format string + input string + output string + internalJSON bool +} + +// globals +var ( + gVersion string = "dev" // build overrides this, see makefile + gCmdLineArgs CmdLineArgs +) + +func showUsage() { + flag.PrintDefaults() +} + +func showVersion() { + fmt.Println(gVersion) +} + +func init() { + // init command line flags + flag.Usage = func() { showUsage() } // override default usage output + flag.BoolVar(&gCmdLineArgs.help, "h", false, "Print this usage message.") + flag.BoolVar(&gCmdLineArgs.version, "v", false, "Print program version.") + flag.StringVar(&gCmdLineArgs.format, "format", "html", "comma separated list of desired report format(s):"+strings.Join(core.ReportTypes[:len(core.ReportTypes)-1], ", ")+", or all") + flag.StringVar(&gCmdLineArgs.input, "input", "", "required, comma separated list of input files or directory containing input (*.raw.json) files") + flag.StringVar(&gCmdLineArgs.output, "output", ".", "output directory") + flag.BoolVar(&gCmdLineArgs.internalJSON, "internal_json", false, "Produce the internal json format introduced in the 2.0 release. This option is deprecated. Recommend transitioning to the new JSON report format ASAP.") + flag.Parse() + // validate input flag arguments + // -format + if gCmdLineArgs.format != "" { + reportTypes := strings.Split(gCmdLineArgs.format, ",") + for _, reportType := range reportTypes { + if !core.IsValidReportType(reportType) { + fmt.Fprintf(os.Stderr, "-report %s : invalid report type: %s\n", gCmdLineArgs.format, reportType) + os.Exit(1) + } + } + } + // -input + if gCmdLineArgs.input != "" { + inputPaths := strings.Split(gCmdLineArgs.input, ",") + for _, inputPath := range inputPaths { + path, err := core.AbsPath(inputPath) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + fileInfo, err := os.Stat(path) + if err != nil { + fmt.Fprintf(os.Stderr, "-input %s : file (or directory) does not exist\n", path) + os.Exit(1) + } + if !fileInfo.Mode().IsRegular() && !fileInfo.Mode().IsDir() { + fmt.Fprintf(os.Stderr, "-input %s : must be a file or directory\n", path) + os.Exit(1) + } + } + } else if !gCmdLineArgs.help && !gCmdLineArgs.version { + fmt.Fprintf(os.Stderr, "-input : input file list or directory is required\n") + showUsage() + os.Exit(1) + } + // -output + if gCmdLineArgs.output != "" { + path, err := core.AbsPath(gCmdLineArgs.output) + if err != nil { + fmt.Fprintf(os.Stderr, "%v\n", err) + os.Exit(1) + } + fileInfo, err := os.Stat(path) + if err != nil { + fmt.Fprintf(os.Stderr, "-output %s : directory does not exist\n", path) + os.Exit(1) + } + if !fileInfo.IsDir() { + fmt.Fprintf(os.Stderr, "-output %s : must be a directory\n", path) + os.Exit(1) + } + } +} + +func getInputFilePaths(input string) (inputFilePaths []string, err error) { + paths := strings.Split(input, ",") + for _, filename := range paths { + var fileInfo fs.FileInfo + fileInfo, err = os.Stat(filename) + if err != nil { + err = fmt.Errorf("%w: %s", err, filename) + return + } + if fileInfo.Mode().IsRegular() { + inputFilePaths = append(inputFilePaths, filename) + } else if fileInfo.IsDir() { + var matches []string + matches, err = filepath.Glob(filepath.Join(filename, "*.raw.json")) + if err != nil { + return + } + inputFilePaths = append(inputFilePaths, matches...) + } + } + return +} + +func getOutputDir(input string) (outputDir string, err error) { + fileInfo, err := os.Stat(input) + if err != nil { + err = fmt.Errorf("%w: %s", err, input) + return + } + if !fileInfo.IsDir() { + err = fmt.Errorf("%s is not a directory", input) + return + } + outputDir = input + return +} + +func getSources(inputFilePaths []string) (sources []*Source) { + for _, inputFilePath := range inputFilePaths { + source := newSource(inputFilePath) + err := source.parse() + if err != nil { + log.Printf("Failed to parse %s: %v", inputFilePath, err) + continue + } + sources = append(sources, source) + } + return +} + +func getCpusInfo() (cpusInfo *cpu.CPU, err error) { + // public cpus are required + cpusYaml, err := core.FindAsset("cpus.yaml") + if err != nil { + err = fmt.Errorf("failed to find cpus.yaml: %v", err) + return + } + cpusInfo, err = cpu.NewCPU([]string{cpusYaml}) + return +} + +func getReports(sources []*Source, reportTypes []string, outputDir string) (reportFilePaths []string, err error) { + cpusInfo, err := getCpusInfo() + if err != nil { + return + } + configReport := NewConfigurationReport(sources, cpusInfo) + briefReport := NewBriefReport(sources, configReport, cpusInfo) + profileReport := NewProfileReport(sources) + analyzeReport := NewAnalyzeReport(sources) + benchmarkReport := NewBenchmarkReport(sources) + insightsReport := NewInsightsReport(sources, configReport, briefReport, profileReport, benchmarkReport, analyzeReport, cpusInfo) + var rpt ReportGenerator + for _, rt := range reportTypes { + switch rt { + case "html": + rpt = newReportGeneratorHTML(outputDir, cpusInfo, configReport, insightsReport, profileReport, benchmarkReport, analyzeReport) + case "json": + if gCmdLineArgs.internalJSON { + rpt = newReportGeneratorJSON(outputDir, configReport, insightsReport, profileReport, benchmarkReport, analyzeReport) + } else { + rpt = newReportGeneratorJSONSimplified(outputDir, configReport, briefReport, insightsReport, profileReport, benchmarkReport, analyzeReport) + } + case "xlsx": + rpt = newReportGeneratorXLSX(outputDir, configReport, briefReport, insightsReport, profileReport, benchmarkReport, analyzeReport) // only Excel has 'brief' report + case "txt": + rpt = newReportGeneratorTXT(sources, outputDir) // txt report is special...more of a raw data dump than a report + default: + err = fmt.Errorf("unsupported report type: %s", rt) + return + } + var reportPaths []string + reportPaths, err = rpt.generate() + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportPaths...) + } + return +} + +func mainReturnWithCode() int { + if gCmdLineArgs.help { + showUsage() + return 0 + } + if gCmdLineArgs.version { + showVersion() + return 0 + } + outputDir, err := getOutputDir(gCmdLineArgs.output) + if err != nil { + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return 1 + } + logFilename := filepath.Base(os.Args[0]) + ".log" + logFile, err := os.OpenFile(filepath.Join(outputDir, logFilename), os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644) + if err != nil { + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return 1 + } + defer logFile.Close() + log.SetOutput(logFile) + log.SetFlags(log.LstdFlags | log.Lmicroseconds | log.Lshortfile) + log.Printf("Starting up %s, version %s, PID %d, PPID %d, arguments: %s", + filepath.Base(os.Args[0]), + gVersion, + os.Getpid(), + os.Getppid(), + strings.Join(os.Args, " "), + ) + + inputFilePaths, err := getInputFilePaths(gCmdLineArgs.input) + if err != nil { + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return 1 + } + reportTypes, err := core.GetReportTypes(gCmdLineArgs.format) + if err != nil { + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return 1 + } + sources := getSources(inputFilePaths) + if len(sources) == 0 { + err = fmt.Errorf("no input files found") + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return 1 + } + reportFilePaths, err := getReports(sources, reportTypes, outputDir) + if err != nil { + log.Printf("Error: %v", err) + fmt.Fprintf(os.Stderr, "Error: %v\n", err) + return 1 + } + for _, reportFilePath := range reportFilePaths { + log.Printf("Created report: %s", reportFilePath) + fmt.Println(reportFilePath) + } + return 0 +} + +func main() { os.Exit(mainReturnWithCode()) } diff --git a/src/reporter/process_stacks.go b/src/reporter/process_stacks.go new file mode 100644 index 0000000..7bbcd8b --- /dev/null +++ b/src/reporter/process_stacks.go @@ -0,0 +1,160 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* process_stacks implements the ProcessStacks type and related helper functions */ + +package main + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +// ProcessStacks ... +// [processName][callStack]=count +type ProcessStacks map[string]Stacks + +type Stacks map[string]int + +// example folded stack: +// swapper;secondary_startup_64_no_verify;start_secondary;cpu_startup_entry;arch_cpu_idle_enter 10523019 + +func (p *ProcessStacks) parsePerfFolded(folded string) (err error) { + re := regexp.MustCompile(`^([\w,\-, ,\.]+);(.+) (\d+)$`) + for _, line := range strings.Split(folded, "\n") { + match := re.FindStringSubmatch(line) + if match == nil { + continue + } + processName := match[1] + stack := match[2] + count, err := strconv.Atoi(match[3]) + if err != nil { + continue + } + if _, ok := (*p)[processName]; !ok { + (*p)[processName] = make(Stacks) + } + (*p)[processName][stack] = count + } + return +} + +func (p *ProcessStacks) parseAsyncProfilerFolded(folded string, processName string) (err error) { + re := regexp.MustCompile(`^(.+) (\d+)$`) + for _, line := range strings.Split(folded, "\n") { + match := re.FindStringSubmatch(line) + if match == nil { + continue + } + stack := match[1] + count, err := strconv.Atoi(match[2]) + if err != nil { + continue + } + if _, ok := (*p)[processName]; !ok { + (*p)[processName] = make(Stacks) + } + (*p)[processName][stack] = count + } + return +} + +func (p *ProcessStacks) totalSamples() (count int) { + count = 0 + for _, stacks := range *p { + for _, stackCount := range stacks { + count += stackCount + } + } + return +} + +func (p *ProcessStacks) scaleCounts(ratio float64) { + for processName, stacks := range *p { + for stack, stackCount := range stacks { + (*p)[processName][stack] = int(math.Round(float64(stackCount) * ratio)) + } + } +} + +func (p *ProcessStacks) averageDepth(processName string) (average float64) { + if _, ok := (*p)[processName]; !ok { + average = 0 + return + } + total := 0 + count := 0 + for stack := range (*p)[processName] { + total += len(strings.Split(stack, ";")) + count += 1 + } + average = float64(total) / float64(count) + return +} + +func (p *ProcessStacks) dumpFolded() (folded string) { + for processName, stacks := range *p { + for stack, stackCount := range stacks { + folded += fmt.Sprintf("%s;%s %d\n", processName, stack, stackCount) + } + } + return +} + +// helper functions below + +// mergeJavaFolded -- merge profiles from N java processes +func mergeJavaFolded(javaFolded map[string]string) (merged string, err error) { + javaStacks := make(ProcessStacks) + for processName, stacks := range javaFolded { + err = javaStacks.parseAsyncProfilerFolded(stacks, processName) + if err != nil { + continue + } + } + merged = javaStacks.dumpFolded() + return +} + +// mergeSystemFolded -- merge the two sets of system perf stacks into one set +// For every process, get the average depth of stacks from Fp and Dwarf. +// The stacks with the deepest average (per process) will be retained in the +// merged set. +// The Dwarf stack counts will be scaled to the FP stack counts. +func mergeSystemFolded(perfFp string, perfDwarf string) (merged string, err error) { + fpStacks := make(ProcessStacks) + err = fpStacks.parsePerfFolded(perfFp) + if err != nil { + return + } + dwarfStacks := make(ProcessStacks) + err = dwarfStacks.parsePerfFolded(perfDwarf) + if err != nil { + return + } + fpSampleCount := fpStacks.totalSamples() + dwarfSampleCount := dwarfStacks.totalSamples() + fpToDwarfScalingRatio := float64(fpSampleCount) / float64(dwarfSampleCount) + dwarfStacks.scaleCounts(fpToDwarfScalingRatio) + + // for every process in fpStacks, get the average stack depth from + // fpStacks and dwarfStacks, choose the deeper stack for the merged set + mergedStacks := make(ProcessStacks) + for processName := range fpStacks { + fpDepth := fpStacks.averageDepth(processName) + dwarfDepth := dwarfStacks.averageDepth(processName) + if fpDepth >= dwarfDepth { + mergedStacks[processName] = fpStacks[processName] + } else { + mergedStacks[processName] = dwarfStacks[processName] + } + } + + merged = mergedStacks.dumpFolded() + return +} diff --git a/src/reporter/report.go b/src/reporter/report.go new file mode 100644 index 0000000..fd222dc --- /dev/null +++ b/src/reporter/report.go @@ -0,0 +1,245 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* Defines the reports (e.g., Full, Brief, etc.) */ + +package main + +import ( + "log" + + "intel.com/svr-info/pkg/cpu" +) + +// Report ... all sources & tables that define a report +type Report struct { + InternalName string // the value set here needs to remain consistent for users who parse the json report + Sources []*Source + Tables []*Table +} + +// NewConfigurationReport -- includes all verbose tables +func NewConfigurationReport(sources []*Source, cpusInfo *cpu.CPU) (report *Report) { + report = &Report{ + InternalName: "Configuration", + Sources: sources, + Tables: []*Table{}, + } + + report.Tables = append(report.Tables, + []*Table{ + newHostTable(sources, System), + newSystemTable(sources, System), + newBaseboardTable(sources, System), + newChassisTable(sources, System), + newPCIeSlotsTable(sources, System), + + newBIOSTable(sources, Software), + newOperatingSystemTable(sources, Software), + newSoftwareTable(sources, Software), + + newCPUTable(sources, cpusInfo, CPU), + newISATable(sources, CPU), + newAcceleratorTable(sources, CPU), + newFeatureTable(sources, CPU), + newUncoreTable(sources, CPU), + + newPowerTable(sources, Power), + }..., + ) + + tableDIMM := newDIMMTable(sources, Memory) + tableDIMMPopulation := newDIMMPopulationTable(sources, tableDIMM, cpusInfo, Memory) + + report.Tables = append(report.Tables, + []*Table{ + newMemoryTable(sources, tableDIMM, tableDIMMPopulation, Memory), + tableDIMMPopulation, + tableDIMM, + + newNICTable(sources, Network), + newNetworkIRQTable(sources, Network), + + newDiskTable(sources, Storage), + newFilesystemTable(sources, Storage), + + newGPUTable(sources, GPU), + + newCXLDeviceTable(sources, CXL), + + newVulnerabilityTable(sources, Security), + + newProcessTable(sources, Status), + newSensorTable(sources, Status), + newChassisStatusTable(sources, Status), + newSystemEventLogTable(sources, Status), + newKernelLogTable(sources, Status), + newPMUTable(sources, Status), + }..., + ) + // TODO: remove check when code is stable + for _, table := range report.Tables { + check(table, sources) + } + return +} + +func NewBriefReport(sources []*Source, fullReport *Report, cpusInfo *cpu.CPU) (report *Report) { + report = &Report{ + InternalName: "Brief", + Sources: sources, + Tables: []*Table{}, + } + tableDiskSummary := newDiskSummaryTable(fullReport.findTable("Disk"), Storage) + tableNicSummary := newNICSummaryTable(fullReport.findTable("NIC"), Network) + report.Tables = append(report.Tables, + []*Table{ + fullReport.findTable("Host"), + newSystemSummaryTable(fullReport.findTable("System"), System), + newBaseboardSummaryTable(fullReport.findTable("Baseboard"), System), + newChassisSummaryTable(fullReport.findTable("Chassis"), System), + newCPUBriefTable(fullReport.findTable("CPU"), CPU), + newAcceleratorSummaryTable(fullReport.findTable("Accelerator"), CPU), + newMemoryBriefTable(fullReport.findTable("Memory"), Memory), + tableNicSummary, + tableDiskSummary, + newBIOSSummaryTable(fullReport.findTable("BIOS"), Software), + newOperatingSystemBriefTable(fullReport.findTable("Operating System"), Software), + fullReport.findTable("Power"), + newVulnerabilitySummaryTable(fullReport.findTable("Vulnerability"), Security), + newMarketingClaimTable(fullReport, tableNicSummary, tableDiskSummary, NoCategory), + }..., + ) + // TODO: remove check when code is stable + for _, table := range report.Tables { + check(table, sources) + } + return +} + +func NewInsightsReport(sources []*Source, configReport, briefReport, profileReport, benchmarkReport *Report, analyzeReport *Report, cpusInfo *cpu.CPU) (report *Report) { + report = &Report{ + InternalName: "Recommendations", + Sources: sources, + Tables: []*Table{}, + } + report.Tables = append(report.Tables, + []*Table{ + newInsightTable(sources, configReport, briefReport, profileReport, benchmarkReport, analyzeReport, cpusInfo), + }..., + ) + // TODO: remove check when code is stable + for _, table := range report.Tables { + check(table, sources) + } + return +} + +func NewProfileReport(sources []*Source) (report *Report) { + report = &Report{ + InternalName: "Profile", + Sources: sources, + Tables: []*Table{}, + } + averageCPUUtilizationTable := newAverageCPUUtilizationTable(sources, NoCategory) + CPUUtilizationTable := newCPUUtilizationTable(sources, NoCategory) + IRQRateTable := newIRQRateTable(sources, NoCategory) + driveStatsTable := newDriveStatsTable(sources, NoCategory) + netStatsTable := newNetworkStatsTable(sources, NoCategory) + memStatsTable := newMemoryStatsTable(sources, NoCategory) + summaryTable := newProfileSummaryTable(sources, NoCategory, averageCPUUtilizationTable, CPUUtilizationTable, IRQRateTable, driveStatsTable, netStatsTable, memStatsTable) + report.Tables = append(report.Tables, + []*Table{ + summaryTable, + averageCPUUtilizationTable, + CPUUtilizationTable, + IRQRateTable, + driveStatsTable, + netStatsTable, + memStatsTable, + }..., + ) + // TODO: remove check when code is stable + for _, table := range report.Tables { + check(table, sources) + } + return +} + +func NewAnalyzeReport(sources []*Source) (report *Report) { + report = &Report{ + InternalName: "Analyze", + Sources: sources, + Tables: []*Table{}, + } + report.Tables = append(report.Tables, + []*Table{ + newCodePathTable(sources, NoCategory), + }..., + ) + // TODO: remove check when code is stable + for _, table := range report.Tables { + check(table, sources) + } + return +} + +func NewBenchmarkReport(sources []*Source) (report *Report) { + report = &Report{ + InternalName: "Performance", + Sources: sources, + Tables: []*Table{}, + } + tableMemBandwidthLatency := newMemoryBandwidthLatencyTable(sources, NoCategory) + report.Tables = append(report.Tables, + []*Table{ + newBenchmarkSummaryTable(sources, tableMemBandwidthLatency, NoCategory), + newFrequencyTable(sources, NoCategory), + tableMemBandwidthLatency, + newMemoryNUMABandwidthTable(sources, NoCategory), + }..., + ) + // TODO: remove check when code is stable + for _, table := range report.Tables { + check(table, sources) + } + return +} + +/* +A function that creates and returns a table must return a valid table. +A valid table is defined as follows: + - Table.Name is set to a non-empty string + - Table.AllHostValues length is equal to number of Source + - HostValues.HostName is set to a non-empty string + - HostValues.Values[] lengths are equal to the number of HostValues.ValueNames or zero +*/ +func check(table *Table, sources []*Source) { + if table.Name == "" { + log.Panic("table name not set") + } + if len(table.AllHostValues) != len(sources) { + log.Panic("len of host values != len sources: " + table.Name) + } + for _, hv := range table.AllHostValues { + if hv.Name == "" { + log.Panic("host name not set: " + table.Name) + } + for _, record := range hv.Values { + if len(record) != len(hv.ValueNames) && len(record) != 0 { + log.Panic("# of values doesn't match # of value names: " + table.Name) + } + } + } +} + +func (r *Report) findTable(name string) (table *Table) { + for _, t := range r.Tables { + if t.Name == name { + table = t + return + } + } + return +} diff --git a/src/reporter/report_generator.go b/src/reporter/report_generator.go new file mode 100644 index 0000000..97bf69d --- /dev/null +++ b/src/reporter/report_generator.go @@ -0,0 +1,11 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* ReportGenerator is the interface required to be implemented by formatted reports, e.g. HTML, XLSX, etc. */ + +package main + +type ReportGenerator interface { + generate() (reportFilePath []string, err error) +} diff --git a/src/reporter/report_generator_html.go b/src/reporter/report_generator_html.go new file mode 100644 index 0000000..e02f468 --- /dev/null +++ b/src/reporter/report_generator_html.go @@ -0,0 +1,1478 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "bytes" + "fmt" + "html" + "html/template" + "log" + "math" + "os" + "os/exec" + "path/filepath" + "sort" + "strconv" + "strings" + texttemplate "text/template" + + "github.com/google/go-cmp/cmp" + "gopkg.in/yaml.v2" + "intel.com/svr-info/pkg/core" + "intel.com/svr-info/pkg/cpu" + "intel.com/svr-info/pkg/target" +) + +const ( + configurationDataIndex int = iota + benchmarkDataIndex + profileDataIndex + analyzeDataIndex + insightDataIndex +) + +const noDataFound = "No data found." +const perlWarning = "
Check if perl is installed in /usr/bin/." + +type ReportGeneratorHTML struct { + reports []*Report + outputDir string + cpusInfo *cpu.CPU +} + +func newReportGeneratorHTML(outputDir string, cpusInfo *cpu.CPU, configurationData *Report, insightData *Report, profileData *Report, benchmarkData *Report, analyzeData *Report) (rpt *ReportGeneratorHTML) { + rpt = &ReportGeneratorHTML{ + reports: []*Report{configurationData, benchmarkData, profileData, analyzeData, insightData}, // order matches const indexes defined above + outputDir: outputDir, + cpusInfo: cpusInfo, + } + return +} + +// ReportGen - struct used within the HTML template +type ReportGen struct { + HostIndices []int + ConfigurationReport *Report + ConfigurationReportReferenceData []*HostReferenceData + BenchmarkReport *Report + BenchmarkReportReferenceData []*HostReferenceData + ProfileReport *Report + ProfileReportReferenceData []*HostReferenceData + AnalyzeReport *Report + AnalyzeReportReferenceData []*HostReferenceData + InsightsReport *Report + InsightsReportReferenceData []*HostReferenceData + Version string +} + +func newReportGen(reportsData []*Report, hostIndices []int, hostsReferenceData []*HostReferenceData, version string) (gen *ReportGen) { + gen = &ReportGen{ + HostIndices: hostIndices, + ConfigurationReport: reportsData[configurationDataIndex], + ConfigurationReportReferenceData: []*HostReferenceData{}, + BenchmarkReport: reportsData[benchmarkDataIndex], + BenchmarkReportReferenceData: hostsReferenceData, + ProfileReport: reportsData[profileDataIndex], + ProfileReportReferenceData: []*HostReferenceData{}, + AnalyzeReport: reportsData[analyzeDataIndex], + AnalyzeReportReferenceData: []*HostReferenceData{}, + InsightsReport: reportsData[insightDataIndex], + InsightsReportReferenceData: []*HostReferenceData{}, + Version: version, + } + return +} + +type HostReferenceData map[string]interface{} +type ReferenceData map[string]HostReferenceData + +func newReferenceData() (data *ReferenceData) { + refDataPath, err := core.FindAsset("reference.yaml") + if err != nil { + log.Printf("Failed to find reference data: %v", err) + return + } + refYaml, err := os.ReadFile(refDataPath) + if err != nil { + log.Printf("Failed to read reference data file: %v.", err) + return + } + data = &ReferenceData{} + err = yaml.Unmarshal(refYaml, data) + if err != nil { + log.Printf("Failed to parse reference data: %v.", err) + } + return +} + +func (r *ReportGeneratorHTML) getRefLabel(hostIndex int) (refLabel string) { + source := r.reports[0].Sources[hostIndex] + family := source.valFromRegexSubmatch("lscpu", `^CPU family.*:\s*([0-9]+)$`) + model := source.valFromRegexSubmatch("lscpu", `^Model.*:\s*([0-9]+)$`) + stepping := source.valFromRegexSubmatch("lscpu", `^Stepping.*:\s*(.+)$`) + uarch, err := r.cpusInfo.GetMicroArchitecture(family, model, stepping) + if err != nil { + log.Printf("Did not find a matching CPU: %v", err) + return + } + sockets := source.valFromRegexSubmatch("lscpu", `^Socket\(.*:\s*(.+?)$`) + refLabel = fmt.Sprintf("%s_%s", uarch, sockets) + return +} + +func (r *ReportGeneratorHTML) loadHostReferenceData(hostIndex int, referenceData *ReferenceData) (data *HostReferenceData) { + refLabel := r.getRefLabel(hostIndex) + if refLabel == "" { + log.Printf("No reference data found for host %d", hostIndex) + return + } + for key, hostReferenceData := range *referenceData { + if key == refLabel { + data = &hostReferenceData + break + } + } + return +} + +func (r *ReportGen) RenderMenuItems(reportData *Report) template.HTML { + var out string + category := NoCategory + for _, table := range reportData.Tables { + if table.Category != category { + out += fmt.Sprintf(`
  • %s
  • `, table.Name, TableCategoryLabels[table.Category]) + category = table.Category + } + } + return template.HTML(out) +} + +func renderHTMLTable(tableHeaders []string, tableValues [][]string, class string, valuesStyle [][]string) (out string) { + if len(tableValues) > 0 { + out += `` + if len(tableHeaders) > 0 { + out += `` + out += `` + for _, label := range tableHeaders { + out += `` + } + out += `` + out += `` + } + out += `` + for rowIdx, rowValues := range tableValues { + out += `` + for colIdx, value := range rowValues { + var style string + if len(valuesStyle) > rowIdx && len(valuesStyle[rowIdx]) > colIdx { + style = ` style="` + valuesStyle[rowIdx][colIdx] + `"` + } + out += `` + value + `` + } + out += `` + } + out += `` + out += `
    ` + label + `
    ` + } else { + out += noDataFound + } + return +} + +/* Single Value Table is rendered like this: + * + * Hostname 1 | Hostname 2 | ..... | Hostname N + * Valname 1 value value value value + * Valname 2 value value value value + * ... value value value value + * Valname N value value value value + */ +func (r *ReportGen) renderSingleValueTable(table *Table, refData []*HostReferenceData) (out string) { + var tableHeaders []string + var tableValues [][]string + var tableValueStyles [][]string + + // only include column headers if there is more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if !hostnameHeader { + if len(refData) > 0 { + for _, ref := range refData { + if _, ok := (*ref)[table.Name]; ok { + hostnameHeader = true + } + } + } + } + if hostnameHeader { + // header in first column is blank + tableHeaders = append(tableHeaders, "") + // include only the hosts in HostIndices + for _, hostIndex := range r.HostIndices { + tableHeaders = append(tableHeaders, table.AllHostValues[hostIndex].Name) + } + // add a column for each reference + for _, ref := range refData { + if _, ok := (*ref)[table.Name]; ok { + tableHeaders = append(tableHeaders, (*ref)["Hostref"].(map[interface{}]interface{})["Name"].(string)) + } + } + } + + // we will not be in this function unless all host values' have the same value names, + // so use the value names from the first host + for valueIndex, valueName := range table.AllHostValues[r.HostIndices[0]].ValueNames { + var rowValues []string + // first column in row is the value name + rowValues = append(rowValues, valueName) + // include only the hosts in HostIndices + for _, hostIndex := range r.HostIndices { + hv := table.AllHostValues[hostIndex] + // if have the value + if len(hv.Values) > 0 && len(hv.Values[0]) > valueIndex { + rowValues = append(rowValues, hv.Values[0][valueIndex]) + } else { // value is missing + rowValues = append(rowValues, "") + } + } + // if reference data is available, add it to the table + for _, ref := range refData { + if refData, ok := (*ref)[table.Name]; ok { + if _, ok := refData.(map[interface{}]interface{})[valueName]; ok { + rowValues = append(rowValues, refData.(map[interface{}]interface{})[valueName].(string)) + } else { + rowValues = append(rowValues, "") + } + } + } + tableValues = append(tableValues, rowValues) + tableValueStyles = append(tableValueStyles, []string{"font-weight:bold"}) + } + // if all host data fields are empty string, then don't render the table + haveData := false + for _, rowValues := range tableValues { + for col, val := range rowValues { + if val != "" && col != 0 { + if col <= len(r.HostIndices) { // only host data, not reference + haveData = true + } + break + } + } + if haveData { + break + } + } + if !haveData { + tableValues = [][]string{} // this will cause renderHTMLTable to indicate "No data found." + } + out += renderHTMLTable(tableHeaders, tableValues, "pure-table pure-table-striped", tableValueStyles) + return +} + +/* Multi Value Table is rendered like this: + * + * Hostname 1 + * Valname 1 | Valname 2 | ...... | Valname N + * value value value value + * value value value value + * value value value value + * value value value value + * + * Hostname 2 + * Valname 1 | Valname 2 | ...... | Valname N + * value value value value + * value value value value + * value value value value + * value value value value + */ +func (r *ReportGen) renderMultiValueTable(table *Table, refData []*HostReferenceData) (out string) { + // include only the host in HostIndices + for _, hostIndex := range r.HostIndices { + // hostname above table if more than one hostname + if len(r.HostIndices) > 1 { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + out += renderHTMLTable( + table.AllHostValues[hostIndex].ValueNames, + table.AllHostValues[hostIndex].Values, + "pure-table pure-table-striped", + [][]string{}, + ) + } + return +} + +const datasetTemplate = ` +{ + label: '{{.Label}}', + data: [{{.Data}}], + backgroundColor: '{{.Color}}', + borderColor: '{{.Color}}', + borderWidth: 1, + showLine: true +} +` +const scatterChartTemplate = `
    + +
    + +` + +type scatterChartTemplateStruct struct { + ID string + Datasets string + XaxisText string + YaxisText string + TitleText string + DisplayTitle string + DisplayLegend string + AspectRatio string + YaxisZero string +} + +func (r *ReportGen) renderFrequencyChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + var datasets []string + // spec + formattedPoints := []string{} + for _, point := range table.AllHostValues[hostIndex].Values { + if point[1] != "" { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %s, y: %s}", point[0], point[1])) + } + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: "spec", + Data: specValues, + Color: getColor(0), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + // measured + formattedPoints = []string{} + for _, point := range table.AllHostValues[hostIndex].Values { + if point[2] != "" { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %s, y: %s}", point[0], point[2])) + } + } + if len(formattedPoints) > 0 { + measuredValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: "measured", + Data: measuredValues, + Color: getColor(1), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "scatterchart" + fmt.Sprintf("%d", hostIndex), + Datasets: strings.Join(datasets, ","), + XaxisText: "Core Count", + YaxisText: "Frequency (GHz)", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "4", + YaxisZero: "false", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + if len(datasets) > 1 { + out += r.renderFrequencyTable(table, hostIndex) + } + } else { + out += noDataFound + } + } else { + out += noDataFound + } + } + return +} + +func (r *ReportGen) renderFrequencyTable(table *Table, hostIndex int) (out string) { + hv := table.AllHostValues[hostIndex] + var rows [][]string + headers := []string{""} + for i := 0; i < len(hv.Values); i++ { + headers = append(headers, fmt.Sprintf("%d", i+1)) + } + specRow := []string{"spec"} + measRow := []string{"measured"} + for _, vals := range hv.Values { + specRow = append(specRow, vals[1]) + measRow = append(measRow, vals[2]) + } + rows = append(rows, specRow) + rows = append(rows, measRow) + valuesStyles := [][]string{} + valuesStyles = append(valuesStyles, []string{"font-weight:bold"}) + valuesStyles = append(valuesStyles, []string{"font-weight:bold"}) + out = renderHTMLTable(headers, rows, "pure-table pure-table-striped", valuesStyles) + return +} + +func (r *ReportGen) renderAverageCPUUtilizationChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + var datasets []string + for statIdx, stat := range hv.ValueNames { // 1 data set per stat, e.g., %usr, %nice, etc. + if statIdx == 0 { // skip Time value + continue + } + formattedPoints := []string{} + for pointIdx, point := range table.AllHostValues[hostIndex].Values { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %d, y: %s}", pointIdx, point[statIdx])) + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: stat, + Data: specValues, + Color: getColor(statIdx - 1), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "sysutil" + fmt.Sprintf("%d", hostIndex), + Datasets: strings.Join(datasets, ","), + XaxisText: "Time/Samples", + YaxisText: "% Utilization", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + } else { + out += noDataFound + } + } + return +} + +func (r *ReportGen) renderCPUUtilizationChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + var datasets []string + cpuBusyStats := make(map[int][]float64) + for _, point := range table.AllHostValues[hostIndex].Values { + idle, err := strconv.ParseFloat(point[len(point)-1], 64) + if err != nil { + continue + } + busy := 100.0 - idle + cpu, err := strconv.Atoi(point[1]) + if err != nil { + continue + } + if _, ok := cpuBusyStats[cpu]; !ok { + cpuBusyStats[cpu] = []float64{} + } + cpuBusyStats[cpu] = append(cpuBusyStats[cpu], busy) + } + var keys []int + for cpu := range cpuBusyStats { + keys = append(keys, cpu) + } + sort.Ints(keys) + for cpu := range keys { + stats := cpuBusyStats[cpu] + formattedPoints := []string{} + for statIdx, stat := range stats { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %d, y: %0.2f}", statIdx, stat)) + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: fmt.Sprintf("CPU %d", cpu), + Data: specValues, + Color: getColor(cpu), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "cpuutil" + fmt.Sprintf("%d", hostIndex), + Datasets: strings.Join(datasets, ","), + XaxisText: "Time/Samples", + YaxisText: "% Utilization", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "false", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + } else { + out += noDataFound + } + } + return +} + +func (r *ReportGen) renderIRQRateChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + var datasets []string + for statIdx, stat := range hv.ValueNames { // 1 data set per stat, e.g., %usr, %nice, etc. + if statIdx < 2 { // skip Time and CPU values + continue + } + formattedPoints := []string{} + // collapse per-CPU samples into a total per stat + timeStamp := table.AllHostValues[hostIndex].Values[0][0] // timestamp off of first sample + total := 0.0 + xVal := 0 + for _, point := range table.AllHostValues[hostIndex].Values { + statVal, err := strconv.ParseFloat(point[statIdx], 64) + if err != nil { + continue + } + total += statVal + if timeStamp != point[0] { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %d, y: %0.2f}", xVal, total)) + timeStamp = point[0] + total = 0.0 + xVal += 1 + } + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: stat, + Data: specValues, + Color: getColor(statIdx - 1), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "irqrate" + fmt.Sprintf("%d", hostIndex), + Datasets: strings.Join(datasets, ","), + XaxisText: "Time/Samples", + YaxisText: "IRQ/s", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + } else { + out += noDataFound + } + } + return +} + +func (r *ReportGen) renderDriveStatsChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host drive + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + driveStats := make(map[string][][]string) + for _, point := range table.AllHostValues[hostIndex].Values { + drive := point[0] + if _, ok := driveStats[drive]; !ok { + driveStats[drive] = [][]string{} + } + driveStats[drive] = append(driveStats[drive], point[1:]) + } + var keys []string + for drive := range driveStats { + keys = append(keys, drive) + } + sort.Strings(keys) + for _, drive := range keys { + var datasets []string + dstats := driveStats[drive] + for valIdx := 0; valIdx < len(dstats[0]); valIdx++ { // 1 dataset per stat type, e.g., tps, kB_read/s + formattedPoints := []string{} + for statIdx, stat := range dstats { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %d, y: %s}", statIdx, stat[valIdx])) + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: hv.ValueNames[valIdx+1], + Data: specValues, + Color: getColor(valIdx), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "drivestats" + fmt.Sprintf("%d%s", hostIndex, drive), + Datasets: strings.Join(datasets, ","), + XaxisText: "Time/Samples", + YaxisText: "", + TitleText: drive, + DisplayTitle: "true", + DisplayLegend: "true", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + } + } else { + out += noDataFound + } + } + return +} + +func (r *ReportGen) renderNetworkStatsChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host nic + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + netStats := make(map[string][][]string) + for _, point := range table.AllHostValues[hostIndex].Values { + net := point[1] + if _, ok := netStats[net]; !ok { + netStats[net] = [][]string{} + } + netStats[net] = append(netStats[net], point[2:]) + } + var keys []string + for drive := range netStats { + keys = append(keys, drive) + } + sort.Strings(keys) + for _, drive := range keys { + var datasets []string + dstats := netStats[drive] + for valIdx := 0; valIdx < len(dstats[0]); valIdx++ { // 1 dataset per stat type, e.g., rxpck/s, txpck/s + formattedPoints := []string{} + for statIdx, stat := range dstats { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %d, y: %s}", statIdx, stat[valIdx])) + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: hv.ValueNames[valIdx+2], + Data: specValues, + Color: getColor(valIdx), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "netstats" + fmt.Sprintf("%d%s", hostIndex, drive), + Datasets: strings.Join(datasets, ","), + XaxisText: "Time/Samples", + YaxisText: "", + TitleText: drive, + DisplayTitle: "true", + DisplayLegend: "true", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + } + } else { + out += noDataFound + } + } + return +} + +func (r *ReportGen) renderMemoryStatsChart(table *Table, refData []*HostReferenceData) (out string) { + // one chart per host + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + // need at least one set of values + if len(hv.Values) > 0 { + var datasets []string + for statIdx, stat := range hv.ValueNames { // 1 data set per stat, e.g., %usr, %nice, etc. + if statIdx == 0 { // skip Time value + continue + } + formattedPoints := []string{} + for pointIdx, point := range table.AllHostValues[hostIndex].Values { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %d, y: %s}", pointIdx, point[statIdx])) + } + if len(formattedPoints) > 0 { + specValues := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: stat, + Data: specValues, + Color: getColor(statIdx - 1), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "memstat" + fmt.Sprintf("%d", hostIndex), + Datasets: strings.Join(datasets, ","), + XaxisText: "Time/Samples", + YaxisText: "kilobytes", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + } else { + out += noDataFound + } + } + return +} + +const flameGraphTemplate = ` +
    + +` + +type flameGraphTemplateStruct struct { + ID string + Data string +} + +// getBurned -- converts folded code path frequency into hierarchical json +// format for use in d3-flame-graph +func getBurned(folded string) (burned string, err error) { + burnPath, err := core.FindAsset("burn") + if err != nil { + return + } + // run burn to convert folded to hierarchical format for d3-flame-graph + bashCmd := fmt.Sprintf("%s convert --type folded", burnPath) + cmd := exec.Command("bash", "-c", bashCmd) + burned, _, _, err = target.RunLocalCommandWithInputWithTimeout(cmd, folded, 10) + if err != nil { + return + } + return +} + +func renderFlameGraph(header string, hv *HostValues, field string, hostIndex int) (out string) { + out += fmt.Sprintf("

    %s

    \n", header) + fieldIdx, err := findValueIndex(hv, field) + if err != nil { + log.Panicf("didn't find expected field (%s) in table: %v", field, err) + } + folded := hv.Values[0][fieldIdx] + if folded == "" { + out += noDataFound + if header == "System" { + out += perlWarning + } + return + } + burned, err := getBurned(folded) + if err != nil { + log.Printf("failed to burn folded data: %v", err) + out += "Error." + } + fg := texttemplate.Must(texttemplate.New("flameGraphTemplate").Parse(flameGraphTemplate)) + buf := new(bytes.Buffer) + err = fg.Execute(buf, flameGraphTemplateStruct{ + ID: fmt.Sprintf("%d%s", hostIndex, header), + Data: burned, + }) + if err != nil { + log.Printf("failed to render flame graph template: %v", err) + out += "Error." + return + } + out += buf.String() + out += "\n" + return +} + +func (r *ReportGen) renderCodePathFrequency(table *Table) (out string) { + for _, hostIndex := range r.HostIndices { + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if hostnameHeader { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + hv := table.AllHostValues[hostIndex] + if len(hv.Values) > 0 { + out += renderFlameGraph("System", &hv, "System Paths", hostIndex) + out += renderFlameGraph("Java", &hv, "Java Paths", hostIndex) + } else { + out += noDataFound + } + } + return +} + +func getColor(idx int) string { + // color-blind safe pallette from here: http://mkweb.bcgsc.ca/colorblind/palettes.mhtml#page-container + colors := []string{"#9F0162", "#009F81", "#FF5AAF", "#00FCCF", "#8400CD", "#008DF9", "#00C2F9", "#FFB2FD", "#A40122", "#E20134", "#FF6E3A", "#FFC33B"} + return colors[idx%len(colors)] +} + +func (r *ReportGen) renderBandwidthLatencyChart(table *Table, refData []*HostReferenceData) (out string) { + var datasets []string + colorIdx := 0 + for _, hostIndex := range r.HostIndices { + hv := table.AllHostValues[hostIndex] + formattedPoints := []string{} + for _, point := range table.AllHostValues[hostIndex].Values { + if point[1] != "" { + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %s, y: %s}", point[1], point[0])) + } + } + if len(formattedPoints) > 0 { + data := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: hv.Name, + Data: data, + Color: getColor(colorIdx), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + colorIdx++ + } + } + if len(refData) > 0 && len(datasets) > 0 { + for _, ref := range refData { + if _, ok := (*ref)[table.Name]; ok { + hostname := (*ref)["Hostref"].(map[interface{}]interface{})["Name"].(string) + formattedPoints := []string{} + for _, point := range (*ref)[table.Name].([]interface{}) { + latency := point.([]interface{})[0].(float64) + bandwidth := point.([]interface{})[1].(float64) / 1000 + formattedPoints = append(formattedPoints, fmt.Sprintf("{x: %.0f, y: %.0f}", bandwidth, latency)) + } + if len(formattedPoints) > 0 { + data := strings.Join(formattedPoints, ",") + dst := texttemplate.Must(texttemplate.New("datasetTemplate").Parse(datasetTemplate)) + buf := new(bytes.Buffer) + err := dst.Execute(buf, struct { + Label string + Data string + Color string + }{ + Label: hostname, + Data: data, + Color: getColor(colorIdx), + }) + if err != nil { + return + } + datasets = append(datasets, buf.String()) + colorIdx++ + } + } + } + } + if len(datasets) > 0 { + sct := texttemplate.Must(texttemplate.New("scatterChartTemplate").Parse(scatterChartTemplate)) + buf := new(bytes.Buffer) + err := sct.Execute(buf, scatterChartTemplateStruct{ + ID: "memBandwdithLatency", + Datasets: strings.Join(datasets, ","), + XaxisText: "Bandwidth (GB/s)", + YaxisText: "Latency (ns)", + TitleText: "", + DisplayTitle: "false", + DisplayLegend: "true", + AspectRatio: "2", + YaxisZero: "true", + }) + if err != nil { + return + } + out += buf.String() + out += "\n" + } else { + out += noDataFound + } + return +} + +/* A NUMA Bandwidth table is rendered like this: + * + * Hostname 1 + * Node | 0 | 1 | ... | N + * 0 val val val val + * 1 val val val val + * ... val val val val + * N val val val val + * + * Hostname 2 + * ... + */ +func (r *ReportGen) renderNumaBandwidthTable(table *Table, refData []*HostReferenceData) (out string) { + + for _, hostIndex := range r.HostIndices { + var tableHeaders []string + var tableValues [][]string + var tableValueStyles [][]string + // add hostname only if more than one host or a single host with reference data + hostnameHeader := len(r.HostIndices) > 1 + if !hostnameHeader { + if len(refData) > 0 { + for _, ref := range refData { + if _, ok := (*ref)[table.Name]; ok { + hostnameHeader = true + } + } + } + } + tableHeaders = append(tableHeaders, "Node") + for nodeIdx, node := range table.AllHostValues[hostIndex].Values { + tableHeaders = append(tableHeaders, fmt.Sprintf("%d", nodeIdx)) + var rowValues []string + rowValues = append(rowValues, fmt.Sprintf("%d", nodeIdx)) + bandwidths := strings.Split(node[1], ",") + rowValues = append(rowValues, bandwidths...) + tableValues = append(tableValues, rowValues) + tableValueStyles = append(tableValueStyles, []string{"font-weight:bold"}) + } + if hostnameHeader && (len(tableValues) > 0 || len(r.HostIndices) > 1) { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + out += renderHTMLTable(tableHeaders, tableValues, "pure-table pure-table-striped", tableValueStyles) + } + // if reference data is available, create a table for each reference data set + /* Reference data format: + - - 67528.4 # 0, 0 + - 30178.1 # 0, 1 + - - 30177.9 # 1, 0 + - 66665.4 # 1, 1 + */ + // add ref data if host data tables rendered + if strings.Contains(out, "") { + for _, ref := range refData { + if _, ok := (*ref)[table.Name]; ok { + var tableHeaders []string + var tableValues [][]string + var tableValueStyles [][]string + out += `

    ` + (*ref)["Hostref"].(map[interface{}]interface{})["Name"].(string) + `

    ` + tableHeaders = append(tableHeaders, "Node") + for nodeIdx, node := range (*ref)[table.Name].([]interface{}) { + tableHeaders = append(tableHeaders, fmt.Sprintf("%d", nodeIdx)) + var rowValues []string + rowValues = append(rowValues, fmt.Sprintf("%d", nodeIdx)) + for _, bandwidth := range node.([]interface{}) { + rowValues = append(rowValues, fmt.Sprintf("%.1f", bandwidth)) + } + tableValues = append(tableValues, rowValues) + tableValueStyles = append(tableValueStyles, []string{"font-weight:bold"}) + } + out += renderHTMLTable(tableHeaders, tableValues, "pure-table pure-table-striped", tableValueStyles) + } + } + } + return +} + +func dimmDetails(dimm []string) (details string) { + if strings.Contains(dimm[SizeIdx], "No") { + details = "No Module Installed" + } else { + // Intel PMEM modules may have serial number appended to end of part number... + // strip that off so it doesn't mess with color selection later + partNumber := dimm[PartIdx] + if strings.Contains(dimm[DetailIdx], "Synchronous Non-Volatile") && + dimm[ManufacturerIdx] == "Intel" && + strings.HasSuffix(dimm[PartIdx], dimm[SerialIdx]) { + partNumber = dimm[PartIdx][:len(dimm[PartIdx])-len(dimm[SerialIdx])] + } + details = dimm[SizeIdx] + " @" + dimm[ConfiguredSpeedIdx] + details += " " + dimm[TypeIdx] + " " + dimm[DetailIdx] + details += " " + dimm[ManufacturerIdx] + " " + partNumber + } + return +} + +func (r *ReportGen) renderDIMMPopulationTable(table *Table, refData []*HostReferenceData) (out string) { + htmlColors := []string{"lightgreen", "orange", "aqua", "lime", "yellow", "beige", "magenta", "violet", "salmon", "pink"} + // a DIMM Population table for every host + for _, hostIndex := range r.HostIndices { + var slotColorIndices = make(map[string]int) + // header if more than one host + if len(r.HostIndices) > 1 { + out += `

    ` + table.AllHostValues[hostIndex].Name + `

    ` + } + // socket -> channel -> slot -> dimm details + var dimms = map[string]map[string]map[string]string{} + for _, vals := range table.AllHostValues[hostIndex].Values { + if _, ok := dimms[vals[DerivedSocketIdx]]; !ok { + dimms[vals[DerivedSocketIdx]] = map[string]map[string]string{} + } + if _, ok := dimms[vals[DerivedSocketIdx]][vals[DerivedChannelIdx]]; !ok { + dimms[vals[DerivedSocketIdx]][vals[DerivedChannelIdx]] = map[string]string{} + } + dimms[vals[DerivedSocketIdx]][vals[DerivedChannelIdx]][vals[DerivedSlotIdx]] = dimmDetails(vals) + } + var socketTableHeaders = []string{"Socket", ""} + var socketTableValues [][]string + var socketKeys []string + for k := range dimms { + socketKeys = append(socketKeys, k) + } + sort.Strings(socketKeys) + for _, socket := range socketKeys { + socketMap := dimms[socket] + socketTableValues = append(socketTableValues, []string{}) + var channelTableHeaders = []string{"Channel", "Slots"} + var channelTableValues [][]string + var channelKeys []string + for k := range socketMap { + channelKeys = append(channelKeys, k) + } + sort.Strings(channelKeys) + for _, channel := range channelKeys { + channelMap := socketMap[channel] + channelTableValues = append(channelTableValues, []string{}) + var slotTableHeaders []string + var slotTableValues [][]string + var slotTableValuesStyles [][]string + var slotKeys []string + for k := range channelMap { + slotKeys = append(slotKeys, k) + } + sort.Strings(slotKeys) + slotTableValues = append(slotTableValues, []string{}) + slotTableValuesStyles = append(slotTableValuesStyles, []string{}) + for _, slot := range slotKeys { + dimmDetails := channelMap[slot] + slotTableValues[0] = append(slotTableValues[0], dimmDetails) + var slotColor string + if dimmDetails == "No Module Installed" { + slotColor = "background-color:silver" + } else { + if _, ok := slotColorIndices[dimmDetails]; !ok { + slotColorIndices[dimmDetails] = int(math.Min(float64(len(slotColorIndices)), float64(len(htmlColors)-1))) + } + slotColor = "background-color:" + htmlColors[slotColorIndices[dimmDetails]] + } + slotTableValuesStyles[0] = append(slotTableValuesStyles[0], slotColor) + } + slotTable := renderHTMLTable(slotTableHeaders, slotTableValues, "pure-table pure-table-bordered", slotTableValuesStyles) + // channel number + channelTableValues[len(channelTableValues)-1] = append(channelTableValues[len(channelTableValues)-1], channel) + // slot table + channelTableValues[len(channelTableValues)-1] = append(channelTableValues[len(channelTableValues)-1], slotTable) + // style + } + channelTable := renderHTMLTable(channelTableHeaders, channelTableValues, "pure-table pure-table-bordered", [][]string{}) + // socket number + socketTableValues[len(socketTableValues)-1] = append(socketTableValues[len(socketTableValues)-1], socket) + // channel table + socketTableValues[len(socketTableValues)-1] = append(socketTableValues[len(socketTableValues)-1], channelTable) + } + out += renderHTMLTable(socketTableHeaders, socketTableValues, "pure-table pure-table-bordered", [][]string{}) + } + return +} + +// if there's one value per value name +// +// and +// +// if the value names are the same across hosts +func isSingleValueTable(table *Table) bool { + var valueNames []string + for _, hv := range table.AllHostValues { + if len(hv.Values) > 1 { + return false + } + if len(valueNames) == 0 { + valueNames = hv.ValueNames + } + if len(hv.ValueNames) > 0 && !cmp.Equal(hv.ValueNames, valueNames) { + return false + } + } + return true +} + +// HTMLEscapeTable - escape value names and values +func HTMLEscapeTable(table *Table) (safeTable Table) { + safeTable.Name = table.Name + safeTable.Category = table.Category + for _, hv := range table.AllHostValues { + var safeHv HostValues + safeHv.Name = hv.Name + for _, name := range hv.ValueNames { + safeHv.ValueNames = append(safeHv.ValueNames, html.EscapeString(name)) + } + for _, values := range hv.Values { + var safeValues []string + for _, value := range values { + safeValues = append(safeValues, html.EscapeString(value)) + } + safeHv.Values = append(safeHv.Values, safeValues) + } + safeTable.AllHostValues = append(safeTable.AllHostValues, safeHv) + } + return +} + +func (r *ReportGen) RenderDataTable(unsafeTable *Table, refData []*HostReferenceData) template.HTML { + t := HTMLEscapeTable(unsafeTable) + table := &t + out := fmt.Sprintf("

    %s

    \n", table.Name, table.Name) + if table.Name == "Core Frequency" { + out += r.renderFrequencyChart(table, refData) + } else if table.Name == "Memory Bandwidth and Latency" { + out += r.renderBandwidthLatencyChart(table, refData) + } else if table.Name == "Memory NUMA Bandwidth" { + out += r.renderNumaBandwidthTable(table, refData) + } else if table.Name == "DIMM Population" { + out += r.renderDIMMPopulationTable(table, refData) + } else if table.Name == "Average CPU Utilization" { + out += r.renderAverageCPUUtilizationChart(table, refData) + } else if table.Name == "CPU Utilization" { + out += r.renderCPUUtilizationChart(table, refData) + } else if table.Name == "IRQ Rate" { + out += r.renderIRQRateChart(table, refData) + } else if table.Name == "Drive Stats" { + out += r.renderDriveStatsChart(table, refData) + } else if table.Name == "Network Stats" { + out += r.renderNetworkStatsChart(table, refData) + } else if table.Name == "Memory Stats" { + out += r.renderMemoryStatsChart(table, refData) + } else if table.Name == "Code Path Frequency" { + out += r.renderCodePathFrequency(table) + } else if isSingleValueTable(table) { + out += r.renderSingleValueTable(table, refData) + } else { + out += r.renderMultiValueTable(table, refData) + } + return template.HTML(out) +} + +func (r *ReportGeneratorHTML) generate() (reportFilePaths []string, err error) { + reportTemplate, err := core.FindAsset("report.html.tmpl") + if err != nil { + return + } + t, err := template.ParseFiles(reportTemplate) + if err != nil { + return + } + referenceData := newReferenceData() + var hostnames []string + for _, values := range r.reports[0].Tables[0].AllHostValues { + hostnames = append(hostnames, values.Name) + } + // one HTML report for each host in reportData + for hostIndex, hostname := range hostnames { + // get the reference data for this host, if any + var hostsReferenceData []*HostReferenceData + hostReferenceData := r.loadHostReferenceData(hostIndex, referenceData) + if hostReferenceData != nil { + hostsReferenceData = append(hostsReferenceData, hostReferenceData) + } + fileName := hostname + ".html" + reportFilePath := filepath.Join(r.outputDir, fileName) + var f *os.File + f, err = os.OpenFile(reportFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return + } + err = t.Execute(f, newReportGen(r.reports, []int{hostIndex}, hostsReferenceData, gVersion)) + f.Close() + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + // if more than one host, create a combined report + if len(hostnames) > 1 { + // get unique host reference data, if any + var hostsReferenceData []*HostReferenceData + for hostIndex := range hostnames { + hostReferenceData := r.loadHostReferenceData(hostIndex, referenceData) + if hostReferenceData != nil { + // make sure we don't already have this one in the list + alreadyHaveIt := false + for _, ref := range hostsReferenceData { + if (*hostReferenceData)["Hostref"].(map[interface{}]interface{})["Name"].(string) == + (*ref)["Hostref"].(map[interface{}]interface{})["Name"].(string) { + alreadyHaveIt = true + break + } + } + if !alreadyHaveIt { + hostsReferenceData = append(hostsReferenceData, hostReferenceData) + } + } + } + fileName := "all_hosts" + ".html" + reportFilePath := filepath.Join(r.outputDir, fileName) + var f *os.File + f, err = os.OpenFile(reportFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return + } + var hostIndices []int + for i := 0; i < len(hostnames); i++ { + hostIndices = append(hostIndices, i) + } + err = t.Execute(f, newReportGen(r.reports, hostIndices, hostsReferenceData, gVersion)) + f.Close() + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + return +} diff --git a/src/reporter/report_generator_json.go b/src/reporter/report_generator_json.go new file mode 100644 index 0000000..70d1220 --- /dev/null +++ b/src/reporter/report_generator_json.go @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "encoding/json" + "os" + "path/filepath" +) + +type ReportGeneratorJSON struct { + reports []*Report + outputDir string +} + +func newReportGeneratorJSON(outputDir string, configurationData *Report, insightReport *Report, profileReport *Report, benchmarkReport *Report, analyzeReport *Report) (rpt *ReportGeneratorJSON) { + rpt = &ReportGeneratorJSON{ + reports: []*Report{configurationData, insightReport, profileReport, benchmarkReport, analyzeReport}, + outputDir: outputDir, + } + return +} + +func (r *ReportGeneratorJSON) generate() (reportFilePaths []string, err error) { + var hostnames []string + for _, values := range r.reports[0].Tables[0].AllHostValues { + hostnames = append(hostnames, values.Name) + } + // one json report per host + for hostIndex, hostname := range hostnames { + fileName := hostname + ".json" + reportFilePath := filepath.Join(r.outputDir, fileName) + // build new report data with values only from the current source/host + var genData []Table + for _, reportData := range r.reports { + for _, table := range reportData.Tables { + var genTable Table + genTable.Name = table.Name + genTable.Category = table.Category + if len(table.AllHostValues) > hostIndex { + genTable.AllHostValues = []HostValues{table.AllHostValues[hostIndex]} + } + genData = append(genData, genTable) + } + } + var jsonData []byte + jsonData, err = json.MarshalIndent(genData, "", " ") + if err != nil { + return + } + err = os.WriteFile(reportFilePath, jsonData, 0644) + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + // combined, all-host json report, if more than one host + if len(hostnames) > 1 { + fileName := "all_hosts.json" + reportFilePath := filepath.Join(r.outputDir, fileName) + var genData []Table + for _, reportData := range r.reports { + for _, table := range reportData.Tables { + genData = append(genData, *table) + } + } + var jsonData []byte + jsonData, err = json.MarshalIndent(genData, "", " ") + if err != nil { + return + } + err = os.WriteFile(reportFilePath, jsonData, 0644) + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + return +} diff --git a/src/reporter/report_generator_json_simplified.go b/src/reporter/report_generator_json_simplified.go new file mode 100644 index 0000000..dccffd9 --- /dev/null +++ b/src/reporter/report_generator_json_simplified.go @@ -0,0 +1,94 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "encoding/json" + "os" + "path/filepath" +) + +type ReportGeneratorJSONSimplified struct { + reports []*Report + outputDir string +} + +func newReportGeneratorJSONSimplified(outputDir string, configurationReport *Report, briefReport *Report, insightReport *Report, profileReport *Report, benchmarkReport *Report, analyzeReport *Report) (rpt *ReportGeneratorJSONSimplified) { + rpt = &ReportGeneratorJSONSimplified{ + reports: []*Report{configurationReport, briefReport, insightReport, profileReport, benchmarkReport, analyzeReport}, + outputDir: outputDir, + } + return +} + +type SimpleRow map[string]string //valuename->value +type SimpleTable map[string][]SimpleRow //tablename->[]rows +type SimpleReport map[string]SimpleTable //reportname->tables +type SimpleHosts map[string]SimpleReport //hostname->reports + +func convertToSimple(hostNames []string, reportsData []*Report) (simpleHosts SimpleHosts, err error) { + simpleHosts = make(SimpleHosts) + for hostIndex, hostName := range hostNames { + simpleReport := make(SimpleReport) + for _, report := range reportsData { + simpleTable := make(SimpleTable) + for _, table := range report.Tables { + hostValues := table.AllHostValues[hostIndex] + for _, values := range hostValues.Values { + simpleRow := make(SimpleRow) + for valueIndex, value := range values { + simpleRow[hostValues.ValueNames[valueIndex]] = value + } + simpleTable[table.Name] = append(simpleTable[table.Name], simpleRow) + } + } + simpleReport[report.InternalName] = simpleTable + } + simpleHosts[hostName] = simpleReport + } + return +} + +func (r *ReportGeneratorJSONSimplified) generate() (reportFilePaths []string, err error) { + var hostnames []string + for _, values := range r.reports[0].Tables[0].AllHostValues { + hostnames = append(hostnames, values.Name) + } + allHosts, err := convertToSimple(hostnames, r.reports) + if err != nil { + return + } + // one json report per host + for hostName, host := range allHosts { + fileName := hostName + ".json" + reportFilePath := filepath.Join(r.outputDir, fileName) + var jsonData []byte + jsonData, err = json.MarshalIndent(host, "", " ") + if err != nil { + return + } + err = os.WriteFile(reportFilePath, jsonData, 0644) + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + // combined, all-host json report, if more than one host + if len(hostnames) > 1 { + fileName := "all_hosts.json" + reportFilePath := filepath.Join(r.outputDir, fileName) + var jsonData []byte + jsonData, err = json.MarshalIndent(allHosts, "", " ") + if err != nil { + return + } + err = os.WriteFile(reportFilePath, jsonData, 0644) + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + return +} diff --git a/src/reporter/report_generator_txt.go b/src/reporter/report_generator_txt.go new file mode 100644 index 0000000..eec711a --- /dev/null +++ b/src/reporter/report_generator_txt.go @@ -0,0 +1,56 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "fmt" + "log" + "os" + "path/filepath" + "sort" +) + +type ReportGeneratorTXT struct { + sources []*Source + outputDir string +} + +func newReportGeneratorTXT(sources []*Source, outputDir string) (rpt *ReportGeneratorTXT) { + rpt = &ReportGeneratorTXT{ + sources: sources, + outputDir: outputDir, + } + return +} + +func (r *ReportGeneratorTXT) generate() (reportFilePaths []string, err error) { + for _, source := range r.sources { + fileName := source.getHostname() + ".txt" + reportFilePath := filepath.Join(r.outputDir, fileName) + f, err := os.OpenFile(reportFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + log.Printf("Failed to create/open file for writing: %s", reportFilePath) + continue + } + defer f.Close() + f.WriteString(fmt.Sprintf("Host: %s\n", source.getHostname())) + var keys []string + for key := range source.ParsedData { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + cmd := source.ParsedData[key] + f.WriteString("\n----------------------------------\n") + f.WriteString(fmt.Sprintf("label: %s\n", key)) + f.WriteString(fmt.Sprintf("command: %s\n", cmd.Command)) + f.WriteString(fmt.Sprintf("exit code: %s\n", cmd.ExitStatus)) + f.WriteString(fmt.Sprintf("stderr: %s\n", cmd.Stderr)) + f.WriteString(fmt.Sprintf("stdout: %s\n", cmd.Stdout)) + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + return +} diff --git a/src/reporter/report_generator_xlsx.go b/src/reporter/report_generator_xlsx.go new file mode 100644 index 0000000..df92e87 --- /dev/null +++ b/src/reporter/report_generator_xlsx.go @@ -0,0 +1,351 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/xuri/excelize/v2" +) + +type ReportGeneratorXLSX struct { + reports []*Report + sheetNames []string + outputDir string +} + +func newReportGeneratorXLSX(outputDir string, configurationReport *Report, briefReport *Report, insightReport *Report, profileReport *Report, benchmarkReport *Report, analyzeReport *Report) (rpt *ReportGeneratorXLSX) { + rpt = &ReportGeneratorXLSX{ + reports: []*Report{configurationReport, briefReport, benchmarkReport, profileReport, analyzeReport, insightReport}, // this is the order the tabs will appear in the spreadsheet + sheetNames: []string{"Configuration", "Brief", "Benchmark", "Profile", "Analyze", "Insights"}, + outputDir: outputDir, + } + return +} + +func cellName(col int, row int) (name string) { + columnName, err := excelize.ColumnNumberToName(col) + if err != nil { + return + } + name, err = excelize.JoinCellName(columnName, row) + if err != nil { + return + } + return +} + +func renderExcelTable(tableHeaders []string, tableValues [][]string, f *excelize.File, reportSheetName string, originRow int, originCol int, boldFirstCol bool) int { + row := originRow + col := originCol + bold, _ := f.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + }) + alignLeft, _ := f.NewStyle(&excelize.Style{ + Alignment: &excelize.Alignment{ + Horizontal: "left", + }, + }) + boldAlignLeft, _ := f.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + Alignment: &excelize.Alignment{ + Horizontal: "left", + }, + }) + if len(tableValues) > 0 { + if len(tableHeaders) > 0 { + for _, header := range tableHeaders { + // if possible, convert strings to floats before inserting into the sheet + floatValue, err := strconv.ParseFloat(header, 64) + if err == nil { + f.SetCellFloat(reportSheetName, cellName(col, row), floatValue, 1, 64) + f.SetCellStyle(reportSheetName, cellName(col, row), cellName(col, row), boldAlignLeft) + } else { + + f.SetCellStr(reportSheetName, cellName(col, row), header) + f.SetCellStyle(reportSheetName, cellName(col, row), cellName(col, row), bold) + } + col += 1 + } + row += 1 + } + for _, rowValues := range tableValues { + col = originCol + if len(rowValues) > 0 { + for rowIdx, value := range rowValues { + // if possible, convert strings to floats before inserting into the sheet + floatValue, err := strconv.ParseFloat(value, 64) + if err == nil { + f.SetCellFloat(reportSheetName, cellName(col, row), floatValue, 1, 64) + f.SetCellStyle(reportSheetName, cellName(col, row), cellName(col, row), alignLeft) + } else { + if rowIdx == 0 && boldFirstCol { + f.SetCellStyle(reportSheetName, cellName(col, row), cellName(col, row), bold) + } + f.SetCellStr(reportSheetName, cellName(col, row), value) + } + col += 1 + } + } else { + f.SetCellStr(reportSheetName, cellName(col, row), "") + } + row += 1 + } + } else { + f.SetCellStr(reportSheetName, cellName(col, row), "No data found.") + row += 1 + } + return row +} + +func (r *ReportGeneratorXLSX) renderSingleValueTable(table *Table, allHostValues []HostValues, f *excelize.File, reportSheetName string, row int, col int, noHeader bool) int { + var tableHeaders []string + var tableValues [][]string + + if len(allHostValues) > 1 && !noHeader { + tableHeaders = append(tableHeaders, "") + for _, hv := range allHostValues { + tableHeaders = append(tableHeaders, hv.Name) + } + } + // a host with no values will not have value names, so find a host with value names + var valueNames []string + for _, hv := range allHostValues { + if len(hv.ValueNames) > 0 { + valueNames = hv.ValueNames + break + } + } + for valueIndex, valueName := range valueNames { + var rowValues []string + rowValues = append(rowValues, valueName) + for _, hv := range allHostValues { + if len(hv.Values) > 0 && len(hv.Values[0]) > valueIndex { + rowValues = append(rowValues, hv.Values[0][valueIndex]) + } else { + rowValues = append(rowValues, "") + } + } + tableValues = append(tableValues, rowValues) + } + // if all data fields are empty string, then don't render the table + haveData := false + for _, rowValues := range tableValues { + for col, val := range rowValues { + if val != "" && col != 0 { + haveData = true + break + } + } + if haveData { + break + } + } + if !haveData { + tableValues = [][]string{} // this will cause renderExcelTable to indicate "No data found." + } + return renderExcelTable(tableHeaders, tableValues, f, reportSheetName, row, col, true) +} + +func (r *ReportGeneratorXLSX) renderMultiValueTable(table *Table, allHostValues []HostValues, f *excelize.File, reportSheetName string, row int, col int) int { + // render one Excel table per host + for idx, hv := range allHostValues { + // if more than one host, put hostname above table + if len(allHostValues) > 1 { + f.SetCellStr(reportSheetName, cellName(2, row), hv.Name) + headerStyle, _ := f.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + }) + f.SetCellStyle(reportSheetName, cellName(2, row), cellName(2, row), headerStyle) + row += 1 + } + row = renderExcelTable(hv.ValueNames, hv.Values, f, reportSheetName, row, col, false) + if idx < len(allHostValues)-1 { + row += 1 + } + } + return row +} + +func (r *ReportGeneratorXLSX) renderNumaBandwidthTable(table *Table, allHostValues []HostValues, f *excelize.File, reportSheetName string, row int) int { + // render one Excel table per host + for idx, hv := range allHostValues { + // if more than one host, put hostname above table + if len(allHostValues) > 1 { + f.SetCellStr(reportSheetName, cellName(2, row), hv.Name) + headerStyle, _ := f.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + }) + f.SetCellStyle(reportSheetName, cellName(2, row), cellName(2, row), headerStyle) + row += 1 + } + var tableHeaders []string + var tableValues [][]string + tableHeaders = append(tableHeaders, "Node") + for nodeIdx, node := range hv.Values { + tableHeaders = append(tableHeaders, fmt.Sprintf("%d", nodeIdx)) + rowValues := []string{node[0]} + bandwidths := strings.Split(node[1], ",") + rowValues = append(rowValues, bandwidths...) + tableValues = append(tableValues, rowValues) + } + row = renderExcelTable(tableHeaders, tableValues, f, reportSheetName, row, 2, true) + if idx < len(allHostValues)-1 { + row += 1 + } + } + return row +} + +func (r *ReportGeneratorXLSX) renderDIMMPopulationTable(table *Table, allHostValues []HostValues, f *excelize.File, reportSheetName string, row int) int { + // render one Excel table per host + for idx, hv := range allHostValues { + // if more than one host, put hostname above table + if len(allHostValues) > 1 { + f.SetCellStr(reportSheetName, cellName(2, row), hv.Name) + headerStyle, _ := f.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + }) + f.SetCellStyle(reportSheetName, cellName(2, row), cellName(2, row), headerStyle) + row += 1 + } + var tableHeaders = []string{"Socket", "Channel", "Slot", "Details"} + var tableValues [][]string + for _, dimm := range hv.Values { + tableValues = append(tableValues, []string{dimm[DerivedSocketIdx], dimm[DerivedChannelIdx], dimm[DerivedSlotIdx], dimmDetails(dimm)}) + } + row = renderExcelTable(tableHeaders, tableValues, f, reportSheetName, row, 2, false) + if idx < len(allHostValues)-1 { + row += 1 + } + } + return row +} + +func (r *ReportGeneratorXLSX) fillSheet(f *excelize.File, reportSheetName string, reportData *Report, sourceIndex int, briefReport bool) (err error) { + combinedReport := sourceIndex < 0 + headerStyle, _ := f.NewStyle(&excelize.Style{ + Font: &excelize.Font{ + Bold: true, + }, + }) + if briefReport { // wider first column for brief report + f.SetColWidth(reportSheetName, "A", "A", 25) + } else { + f.SetColWidth(reportSheetName, "A", "A", 15) + } + f.SetColWidth(reportSheetName, "B", "L", 25) + row, col := 1, 1 + for tableIdx, table := range reportData.Tables { + if table == nil { + continue + } + var allHostValues []HostValues + if combinedReport { + allHostValues = table.AllHostValues + } else { + allHostValues = []HostValues{table.AllHostValues[sourceIndex]} + } + col = 1 + if !briefReport { // no table names in brief report + f.SetCellStr(reportSheetName, cellName(col, row), table.Name) + f.SetCellStyle(reportSheetName, cellName(col, row), cellName(col, row), headerStyle) + col++ + } + + if table.Name == "Memory NUMA Bandwidth" { + row = r.renderNumaBandwidthTable(table, allHostValues, f, reportSheetName, row) + } else if table.Name == "DIMM Population" { + row = r.renderDIMMPopulationTable(table, allHostValues, f, reportSheetName, row) + } else if isSingleValueTable(table) { + noHeader := briefReport && tableIdx != 0 + row = r.renderSingleValueTable(table, allHostValues, f, reportSheetName, row, col, noHeader) + } else { + row = r.renderMultiValueTable(table, allHostValues, f, reportSheetName, row, col) + } + if !briefReport { //no row between tables in brief report + row += 1 + } + } + if briefReport { + row += 1 + } + f.SetCellStr(reportSheetName, cellName(1, row), fmt.Sprintf("svr-info version: %s", gVersion)) + return +} + +// one Excel report for each host in reportData and a combined report if more than one host +// Note: an Excel report includes a full report, a brief report, a benchmark report, a profile report, an analyze reportk, and a insights report +func (r *ReportGeneratorXLSX) generate() (reportFilePaths []string, err error) { + var hostnames []string + for _, values := range r.reports[0].Tables[0].AllHostValues { + hostnames = append(hostnames, values.Name) + } + // generate one excel file for every host + for hostIndex, hostname := range hostnames { + fileName := hostname + ".xlsx" + reportFilePath := filepath.Join(r.outputDir, fileName) + f := excelize.NewFile() + for reportIndex, reportData := range r.reports { + if reportIndex == 0 { + f.SetSheetName("Sheet1", r.sheetNames[reportIndex]) + } else { + f.NewSheet(r.sheetNames[reportIndex]) + } + r.fillSheet(f, r.sheetNames[reportIndex], reportData, hostIndex, reportIndex == 1) + } + var outFile *os.File + outFile, err = os.OpenFile(reportFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return + } + _, err = f.WriteTo(outFile) + outFile.Close() + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + // if more than one host create a combined report + if len(r.reports[0].Sources) > 1 { + fileName := "all_hosts.xlsx" + reportFilePath := filepath.Join(r.outputDir, fileName) + f := excelize.NewFile() + for reportIndex, reportData := range r.reports { + if reportIndex == 0 { + f.SetSheetName("Sheet1", r.sheetNames[reportIndex]) + } else { + f.NewSheet(r.sheetNames[reportIndex]) + } + r.fillSheet(f, r.sheetNames[reportIndex], reportData, -1, reportIndex == 1) // -1 means all sources + } + var outFile *os.File + outFile, err = os.OpenFile(reportFilePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return + } + _, err = f.WriteTo(outFile) + outFile.Close() + if err != nil { + return + } + reportFilePaths = append(reportFilePaths, reportFilePath) + } + return +} diff --git a/src/reporter/report_tables.go b/src/reporter/report_tables.go new file mode 100644 index 0000000..c7358f1 --- /dev/null +++ b/src/reporter/report_tables.go @@ -0,0 +1,2154 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* functions for creating tables used in reports */ + +package main + +import ( + "fmt" + "log" + "math" + "os" + "regexp" + "sort" + "strconv" + "strings" + + "github.com/hyperjumptech/grule-rule-engine/ast" + "github.com/hyperjumptech/grule-rule-engine/builder" + "github.com/hyperjumptech/grule-rule-engine/engine" + "github.com/hyperjumptech/grule-rule-engine/pkg" + "gopkg.in/yaml.v2" + "intel.com/svr-info/pkg/core" + "intel.com/svr-info/pkg/cpu" +) + +/* a note about functions that define tables... + * - "Brief" and "Summary" in the function name have meaning...see examples below + * - Avoid duplicating any of the parsing logic present in the full table when creating Brief or Summary tables + * - Avoid duplicating ...............................when creating tables, in general. + * Examples: + * memoryTable() - the full table + * memoryBriefTable() - a table used in the "Brief" report that has a reduced number of fields compared to the full table + * nicTable() - the full table + * nicSummaryTable() - has info derived from the full table, but is presented in summary format + */ + +func newMarketingClaimTable(fullReport *Report, tableNicSummary *Table, tableDiskSummary *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Marketing Claim", + Category: category, + AllHostValues: []HostValues{}, + } + // BASELINE: Test by Intel as of . 1-node, 2x Intel® Xeon® , xx cores, HT On/Off?, Turbo On/Off?, Total Memory xxx GB (xx slots/ xx GB/ xxxx MHz [run @ xxxx MHz] ), , , , , , , , score=? + template := "Test by as of %s. 1-node, %sx %s, %s cores, HT %s, Turbo %s, Total Memory %s, BIOS %s, microcode %s, %s, %s, %s, %s, , , , score=?" + var date, socketCount, cpuModel, coreCount, htOnOff, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion string + + for sourceIdx, source := range fullReport.Sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "System Summary", + }, + Values: [][]string{}, + } + date = source.getCommandOutput("date") + socketCount, _ = fullReport.findTable("CPU").getValue(sourceIdx, "Sockets") + cpuModel, _ = fullReport.findTable("CPU").getValue(sourceIdx, "CPU Model") + coreCount, _ = fullReport.findTable("CPU").getValue(sourceIdx, "Cores per Socket") + hyperthreading, _ := fullReport.findTable("CPU").getValue(sourceIdx, "Hyperthreading") + if hyperthreading == "Enabled" { + htOnOff = "On" + } else if hyperthreading == "Disabled" { + htOnOff = "Off" + } else { + htOnOff = "?" + } + turboEnabledDisabled, _ := fullReport.findTable("CPU").getValue(sourceIdx, "Intel Turbo Boost") + if turboEnabledDisabled == "Enabled" { + turboOnOff = "On" + } else { + turboOnOff = "Off" + } + installedMem, _ = fullReport.findTable("Memory").getValue(sourceIdx, "Installed Memory") + biosVersion, _ = fullReport.findTable("BIOS").getValue(sourceIdx, "Version") + uCodeVersion, _ = fullReport.findTable("Operating System").getValue(sourceIdx, "Microcode") + nics, _ = tableNicSummary.getValue(sourceIdx, "NIC") + disks, _ = tableDiskSummary.getValue(sourceIdx, "Disk") + operatingSystem, _ = fullReport.findTable("Operating System").getValue(sourceIdx, "OS") + kernelVersion, _ = fullReport.findTable("Operating System").getValue(sourceIdx, "Kernel") + claim := fmt.Sprintf(template, date, socketCount, cpuModel, coreCount, htOnOff, turboOnOff, installedMem, biosVersion, uCodeVersion, nics, disks, operatingSystem, kernelVersion) + hostValues.Values = append(hostValues.Values, []string{claim}) + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newMemoryNUMABandwidthTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Memory NUMA Bandwidth", + Category: category, + AllHostValues: []HostValues{}, + } + /* MLC Output: + Numa node + Numa node 0 1 + 0 175610.3 55579.7 + 1 55575.2 175656.7 + */ + /* table : + Node | Bandwidths + 0 | val,val1,val...,valn + 1 | val,val1,val...,valn + ... | val,val1,val...,valn + N | val,val1,val...,valn + */ + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Node", + "Bandwidths 0-N", + }, + Values: [][]string{}, + } + nodeBandwidthsPairs := source.valsArrayFromRegexSubmatch("Memory MLC Bandwidth", `^(\d)\s+(\d.*)`) + for _, nodeBandwidthsPair := range nodeBandwidthsPairs { + bandwidths := strings.Split(strings.TrimSpace(nodeBandwidthsPair[1]), "\t") + hostValues.Values = append(hostValues.Values, []string{nodeBandwidthsPair[0], strings.Join(bandwidths, ",")}) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newMemoryBandwidthLatencyTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Memory Bandwidth and Latency", + Category: category, + AllHostValues: []HostValues{}, + } + /* MLC Output: + Inject Latency Bandwidth + Delay (ns) MB/sec + ========================== + 00000 261.65 225060.9 + 00002 261.63 225040.5 + 00008 261.54 225073.3 + ... + */ + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Latency (ns)", + "Bandwidth (GB/s)", + }, + Values: [][]string{}, + } + latencyBandwidthPairs := source.valsArrayFromRegexSubmatch("Memory MLC Loaded Latency Test", `^[0-9]*\s*([0-9]*\.[0-9]+)\s*([0-9]*\.[0-9]+)`) + for _, latencyBandwidth := range latencyBandwidthPairs { + latency := latencyBandwidth[0] + bandwidth, err := strconv.ParseFloat(latencyBandwidth[1], 32) + if err != nil { + log.Printf("Unable to convert bandwidth to float: %s", latencyBandwidth[1]) + continue + } + bandwidth = bandwidth / 1000 + // insert into beginning of array (reverse order) + vals := []string{latency, fmt.Sprintf("%.1f", bandwidth)} + hostValues.Values = append([][]string{vals}, hostValues.Values...) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newNetworkIRQTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Network IRQ Mapping", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{"Interface", "CPU:IRQs CPU:IRQs ..."}, + Values: [][]string{}, + } + nics := source.valsFromRegexSubmatch("lshw", `^pci.*? (\S+)\s+network\s+\S.*?\s+\[\w+:\w+]$`) + nics = append(nics, source.valsFromRegexSubmatch("lshw", `^usb.*? (\S+)\s+network\s+\S.*?$`)...) + for _, nic := range nics { + cmdout := source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`CPU AFFINITY %s: (.*)\n`, nic)) + // command output is formatted like this: 200:1;201:1-17,36-53;202:44 + // which is : + // we need to reverse it to : + cpuIRQMappings := make(map[int][]int) + irqCPUPairs := strings.Split(cmdout, ";") + for _, pair := range irqCPUPairs { + if pair == "" { + continue + } + tokens := strings.Split(pair, ":") + irq, err := strconv.Atoi(tokens[0]) + if err != nil { + continue + } + cpuList := tokens[1] + cpus := expandCPUList(cpuList) + for _, cpu := range cpus { + cpuIRQMappings[cpu] = append(cpuIRQMappings[cpu], irq) + } + } + var val string + var cpuIRQs []string + var cpus []int + for k := range cpuIRQMappings { + cpus = append(cpus, k) + } + sort.Ints(cpus) + for _, cpu := range cpus { + irqs := cpuIRQMappings[cpu] + cpuIRQ := fmt.Sprintf("%d:", cpu) + var irqStrings []string + for _, irq := range irqs { + irqStrings = append(irqStrings, fmt.Sprintf("%d", irq)) + } + cpuIRQ += strings.Join(irqStrings, ",") + cpuIRQs = append(cpuIRQs, cpuIRQ) + } + val = strings.Join(cpuIRQs, " ") + hostValues.Values = append(hostValues.Values, []string{nic, val}) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newFrequencyTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Core Frequency", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Core Count", + "Spec Frequency (GHz)", + "Measured Frequency (GHz)", + }, + Values: [][]string{}, + } + type freq struct { + spec float64 + measured float64 + } + vals := make(map[int]freq) // map core count to spec/measured frequences + + // get measured frequencies (these are optionally collected) + matches := source.valsArrayFromRegexSubmatch("Measure Turbo Frequencies", `^(\d+)-core turbo\s+(\d+) MHz`) + for _, countFreq := range matches { + mhz, err := strconv.Atoi(countFreq[1]) + if err != nil { + log.Print(err) + return + } + ghz := math.Round(float64(mhz)/100.0) / 10 + count, err := strconv.Atoi(countFreq[0]) + if err != nil { + log.Print(err) + return + } + vals[count] = freq{} + x := vals[count] + x.measured = ghz + vals[count] = x + } + if len(vals) > 0 { + // get spec frequencies (these also may not be present) + countFreqs, err := source.getSpecCountFrequencies() + if err != nil { + log.Print(err) + } else { + // fill in gaps in sparse list... + // go through list in reverse order so we can fill previous slots with same frequency + for i := len(countFreqs) - 1; i >= 0; i-- { + countFreq := countFreqs[i] + count, _ := strconv.Atoi(countFreq[0]) + ghz, _ := strconv.ParseFloat(countFreq[1], 64) + for j := count; j > 0; j-- { + if _, ok := vals[j]; !ok { + vals[j] = freq{} + } + x := vals[j] + x.spec = ghz + vals[j] = x + } + } + } + } + // need the vals in order (by core count), so get and sort the keys + var valKeys []int + for k := range vals { + valKeys = append(valKeys, k) + } + sort.Ints(valKeys) + // now go through the vals in sorted order + for _, k := range valKeys { + var count, spec, measured string + count = fmt.Sprintf("%d", k) + if vals[k].spec != 0 { + spec = fmt.Sprintf("%.1f", vals[k].spec) + } + if vals[k].measured != 0 { + measured = fmt.Sprintf("%.1f", vals[k].measured) + } + hostValues.Values = append(hostValues.Values, []string{count, spec, measured}) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newHostTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Host", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Name", + "Time", + }, + Values: [][]string{ + { + source.valFromRegexSubmatch("uname -a", `^Linux (\S+) \S+`), + source.valFromRegexSubmatch("date -u", `^(.*UTC\s*[0-9]*)$`), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newOperatingSystemTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Operating System", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "OS", + "Kernel", + "Boot Parameters", + "Microcode", + }, + Values: [][]string{ + { + source.getOperatingSystem(), + source.valFromRegexSubmatch("uname -a", `^Linux \S+ (\S+)`), + source.getCommandOutputLine("/proc/cmdline"), + source.valFromRegexSubmatch("/proc/cpuinfo", `^microcode.*:\s*(.+?)$`), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newOperatingSystemBriefTable(tableOS *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "OS", + Category: category, + AllHostValues: []HostValues{}, + } + copyValues(tableOS, table, []string{ + "Microcode", + "OS", + "Kernel", + }) + for i := range table.AllHostValues { + table.AllHostValues[i].Name = tableOS.AllHostValues[i].Name + } + return +} + +func newSystemTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "System", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Manufacturer", + "Product Name", + "Version", + "Serial #", + "UUID", + }, + Values: [][]string{ + { + source.valFromDmiDecodeRegexSubmatch("1", `^Manufacturer:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("1", `^Product Name:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("1", `^Version:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("1", `^Serial Number:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("1", `^UUID:\s*(.+?)$`), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newSystemSummaryTable(tableSystem *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "System", + Category: category, + AllHostValues: []HostValues{}, + } + for _, srcHv := range tableSystem.AllHostValues { + mfgIndex, err := findValueIndex(&srcHv, "Manufacturer") + if err != nil { + log.Panicf("Did not find Manufacturer field in table.") + } + nameIndex, err := findValueIndex(&srcHv, "Product Name") + if err != nil { + log.Panicf("Did not find Product Name field in table.") + } + var hostValues = HostValues{ + Name: srcHv.Name, + ValueNames: []string{ + "System", + }, + Values: [][]string{ + { + strings.Join([]string{ + srcHv.Values[0][mfgIndex], + srcHv.Values[0][nameIndex], + }, " "), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newChassisTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Chassis", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Manufacturer", + "Type", + "Version", + "Serial #", + }, + Values: [][]string{ + { + source.valFromDmiDecodeRegexSubmatch("3", `^Manufacturer:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("3", `^Type:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("3", `^Version:\s*(.+?)$`), + source.valFromDmiDecodeRegexSubmatch("3", `^Serial Number:\s*(.+?)$`), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newChassisSummaryTable(tableChassis *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Chassis", + Category: category, + AllHostValues: []HostValues{}, + } + for _, srcHv := range tableChassis.AllHostValues { + mfgIndex, err := findValueIndex(&srcHv, "Manufacturer") + if err != nil { + log.Panicf("Did not find Manufacturer field in table.") + } + typeIndex, err := findValueIndex(&srcHv, "Type") + if err != nil { + log.Panicf("Did not find Type field in table.") + } + var hostValues = HostValues{ + Name: srcHv.Name, + ValueNames: []string{ + "Chassis", + }, + Values: [][]string{ + { + strings.Join([]string{ + srcHv.Values[0][mfgIndex], + srcHv.Values[0][typeIndex], + }, " "), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newBIOSTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "BIOS", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Vendor", + "Version", + "Release Date", + }, + Values: [][]string{ + { + source.valFromDmiDecodeRegexSubmatch("0", `^Vendor:\s*(.+?)$`), // BIOS + source.valFromDmiDecodeRegexSubmatch("0", `^Version:\s*(.+?)$`), // BIOS + source.valFromDmiDecodeRegexSubmatch("0", `^Release Date:\s*(.+?)$`), // BIOS + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newBIOSSummaryTable(tableBIOS *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "BIOS", + Category: category, + AllHostValues: []HostValues{}, + } + for _, srcHv := range tableBIOS.AllHostValues { + versionIndex, err := findValueIndex(&srcHv, "Version") + if err != nil { + log.Panicf("Did not find Version field.") + } + var hostValues = HostValues{ + Name: srcHv.Name, + ValueNames: []string{ + "BIOS", + }, + Values: [][]string{ + {srcHv.Values[0][versionIndex]}, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newBaseboardTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Baseboard", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Manufacturer", + "Product Name", + "Version", + "Serial #", + }, + Values: [][]string{ + { + source.valFromDmiDecodeRegexSubmatch("2", `^Manufacturer:\s*(.+?)$`), // Baseboard + source.valFromDmiDecodeRegexSubmatch("2", `^Product Name:\s*(.+?)$`), // Baseboard + source.valFromDmiDecodeRegexSubmatch("2", `^Version:\s*(.+?)$`), // Baseboard + source.valFromDmiDecodeRegexSubmatch("2", `^Serial Number:\s*(.+?)$`), // Baseboard + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} +func newBaseboardSummaryTable(tableBaseboard *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Baseboard", + Category: category, + AllHostValues: []HostValues{}, + } + for _, srcHv := range tableBaseboard.AllHostValues { + mfgIndex, err := findValueIndex(&srcHv, "Manufacturer") + if err != nil { + log.Panicf("Did not find Manufacturer field in Baseboard table.") + } + nameIndex, err := findValueIndex(&srcHv, "Product Name") + if err != nil { + log.Panicf("Did not find Product Name field in Baseboard table.") + } + var hostValues = HostValues{ + Name: srcHv.Name, + ValueNames: []string{ + "Baseboard", + }, + Values: [][]string{ + { + strings.Join([]string{ + srcHv.Values[0][mfgIndex], + srcHv.Values[0][nameIndex], + }, " "), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newSoftwareTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Software Version", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "GCC", + "GLIBC", + "Binutils", + "Python", + "Python3", + "Java", + "OpenSSL", + }, + Values: [][]string{ + { + source.valFromRegexSubmatch("gcc version", `^(gcc .*)$`), + source.valFromRegexSubmatch("glibc version", `^(ldd .*)`), + source.valFromRegexSubmatch("binutils version", `^(GNU ld .*)$`), + source.valFromRegexSubmatch("python version", `^(Python .*)$`), + source.valFromRegexSubmatch("python3 version", `^(Python 3.*)$`), + source.valFromRegexSubmatch("java version", `^(openjdk .*)$`), + source.valFromRegexSubmatch("openssl version", `^(OpenSSL .*)$`), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newUncoreTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Uncore", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "CHA Count", + "Minimum Frequency", + "Maximum Frequency", + }, + Values: [][]string{ + { + source.getCHACount(), + source.getUncoreMinFrequency(), + source.getUncoreMaxFrequency(), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newCPUTable(sources []*Source, cpusInfo *cpu.CPU, category TableCategory) (table *Table) { + table = &Table{ + Name: "CPU", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + family := source.valFromRegexSubmatch("lscpu", `^CPU family.*:\s*([0-9]+)$`) + model := source.valFromRegexSubmatch("lscpu", `^Model.*:\s*([0-9]+)$`) + stepping := source.valFromRegexSubmatch("lscpu", `^Stepping.*:\s*(.+)$`) + microarchitecture, err := cpusInfo.GetMicroArchitecture(family, model, stepping) + if err != nil && family == "6" { + microarchitecture = "Unknown Intel" + } + channelCount, err := cpusInfo.GetMemoryChannels(family, model, stepping) + channels := fmt.Sprintf("%d", channelCount) + if err != nil { + channels = "Unknown" + } + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "CPU Model", + "Architecture", + "Microarchitecture", + "Family", + "Model", + "Stepping", + "Base Frequency", + "Maximum Frequency", + "All-core Maximum Frequency", + "CPUs", + "On-line CPU List", + "Hyperthreading", + "Cores per Socket", + "Sockets", + "NUMA Nodes", + "NUMA CPU List", + "L1d Cache", + "L1i Cache", + "L2 Cache", + "L3 Cache", + "Memory Channels", + "Prefetchers", + "Intel Turbo Boost", + "Virtualization", + "PPINs", + }, + Values: [][]string{ + { + source.valFromRegexSubmatch("lscpu", `^[Mm]odel name.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^Architecture.*:\s*(.+)$`), + microarchitecture, + family, + model, + stepping, + source.getBaseFrequency(), + source.getMaxFrequency(), + source.getAllCoreMaxFrequency(), + source.valFromRegexSubmatch("lscpu", `^CPU\(.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^On-line CPU.*:\s*(.+?)$`), + source.getHyperthreading(), + source.valFromRegexSubmatch("lscpu", `^Core\(s\) per socket.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^Socket\(.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^NUMA node\(.*:\s*(.+?)$`), + source.getNUMACPUList(), + source.valFromRegexSubmatch("lscpu", `^L1d cache.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^L1i cache.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^L2 cache.*:\s*(.+?)$`), + source.valFromRegexSubmatch("lscpu", `^L3 cache.*:\s*(.+?)$`), + channels, + source.getPrefetchers(), + enabledIfValAndTrue(source.valFromRegexSubmatch("cpuid -1", `^Intel Turbo Boost Technology\s*= (.+?)$`)), + source.valFromRegexSubmatch("lscpu", `^Virtualization.*:\s*(.+?)$`), + source.getPPINs(), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newCPUBriefTable(tableCPU *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "CPU", + Category: category, + AllHostValues: []HostValues{}, + } + copyValues(tableCPU, table, []string{ + "CPU Model", + "Microarchitecture", + "Sockets", + "Cores per Socket", + "Hyperthreading", + "CPUs", + "Intel Turbo Boost", + "Base Frequency", + "All-core Maximum Frequency", + "Maximum Frequency", + "NUMA Nodes", + "Prefetchers", + "PPINs", + }) + for i := range table.AllHostValues { + table.AllHostValues[i].Name = tableCPU.AllHostValues[i].Name + } + return +} + +func newISATable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "ISA", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "AVX - Advanced Vector Extensions", + "AVX2 - Advanced Vector Extensions 2", + "AVX512F - AVX-512 Foundation", + "AVX512_VNNI - Vector Neural Network Instructions", + "AVX512_4VNNIW - VNNI Word Variable Precision", + "AVX512_BF16 - BFLOAT16", + "AES - Advanced Encryption Standard New Instructions (AES-NI)", + "VAES - Vector AES", + "AMX-BF16 - Advanced Matrix Extensions Tile BFLOAT16", + "AMX-TILE - Advanced Matrix Extensions Tile Architecture", + "AMX-INT8 - Advanced Matrix Extensions Tile 8-bit Integer", + }, + Values: [][]string{ + { + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AVX: advanced vector extensions\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AVX2: advanced vector extensions 2\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AVX512F: AVX-512 foundation instructions\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AVX512_VNNI: neural network instructions\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AVX512_4VNNIW: neural network instrs\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AVX512_BF16: bfloat16 instructions\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AES instruction\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `VAES instructions\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AMX-BF16: tile bfloat16 support\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AMX-TILE: tile architecture support\s*= (.+?)$`)), + yesIfTrue(source.valFromRegexSubmatch("cpuid -1", `AMX-INT8: tile 8-bit integer support\s*= (.+?)$`)), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newAcceleratorTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Accelerator", + Category: category, + AllHostValues: []HostValues{}, + } + type Accelerator struct { + MfgID string `yaml:"mfgid"` + DevID string `yaml:"devid"` + Name string `yaml:"name"` + FullName string `yaml:"full_name"` + Description string `yaml:"description"` + } + var accelDefs []Accelerator + // load GPU info from YAML + yamlPath, err := core.FindAsset("accelerators.yaml") + if err != nil { + log.Printf("failed to find accelerators.yaml") + return + } + yamlBytes, err := os.ReadFile(yamlPath) + if err != nil { + log.Printf("failed to read accelerator info file: %s, %v", yamlPath, err) + return + } + err = yaml.UnmarshalStrict(yamlBytes, &accelDefs) + if err != nil { + log.Printf("failed to parse accelerator info file: %s, %v", yamlPath, err) + return + } + for _, source := range sources { + cmdout := source.getCommandOutput("lshw") + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Name", + "Count", + "Full Name", + "Description", + }, + Values: [][]string{}, + } + for _, accelDef := range accelDefs { + regex := fmt.Sprintf("%s:%s", accelDef.MfgID, accelDef.DevID) + re, err := regexp.Compile(regex) + if err != nil { + log.Printf("failed to compile regex from accelerator definition: %s", regex) + return + } + count := fmt.Sprintf("%d", len(re.FindAllString(cmdout, -1))) + hostValues.Values = append(hostValues.Values, []string{accelDef.Name, count, accelDef.FullName, accelDef.Description}) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newAcceleratorSummaryTable(tableAccelerator *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Accelerator", + Category: category, + AllHostValues: []HostValues{}, + } + for _, hv := range tableAccelerator.AllHostValues { + var summaryParts []string + for _, rowValues := range hv.Values { + summaryParts = append(summaryParts, fmt.Sprintf("%s:%s", rowValues[0], rowValues[1])) + } + var summaryHv = HostValues{ + Name: hv.Name, + ValueNames: []string{"Accelerators"}, + Values: [][]string{{strings.Join(summaryParts, ", ")}}, + } + table.AllHostValues = append(table.AllHostValues, summaryHv) + } + return +} + +func newPowerTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Power", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "TDP", + "Power & Perf Policy", + "Frequency Governor", + "Frequency Driver", + "Max C-State", + }, + Values: [][]string{ + { + source.getTDP(), + source.getPowerPerfPolicy(), + source.getCommandOutputLine("cpu_freq_governor"), + source.getCommandOutputLine("cpu_freq_driver"), + source.getCommandOutputLine("max_cstate"), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newGPUTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "GPU", + Category: category, + AllHostValues: []HostValues{}, + } + type GPU struct { + Model string `yaml:"model"` + MfgID string `yaml:"mfgid"` + DevID string `yaml:"devid"` + } + var gpuDefs []GPU + // load GPU info from YAML + yamlPath, err := core.FindAsset("gpus.yaml") + if err != nil { + log.Printf("failed to find gpus.yaml") + return + } + yamlBytes, err := os.ReadFile(yamlPath) + if err != nil { + log.Printf("failed to read GPU info file: %s, %v", yamlPath, err) + return + } + err = yaml.UnmarshalStrict(yamlBytes, &gpuDefs) + if err != nil { + log.Printf("failed to parse GPU info file: %s, %v", yamlPath, err) + return + } + for _, source := range sources { + // get all GPUs from lshw + var gpus [][]string + gpusLshw := source.valsArrayFromRegexSubmatch("lshw", `^pci.*?\s+display\s+(\w+).*?\s+\[(\w+):(\w+)]$`) + idxMfgName := 0 + idxMfgID := 1 + idxDevID := 2 + for _, gpu := range gpusLshw { + // Find GPU in GPU defs, note the model + var model string + for _, gpuDef := range gpuDefs { + if gpu[idxMfgID] == gpuDef.MfgID { + re, err := regexp.Compile(gpuDef.DevID) + if err != nil { + log.Printf("failed to compile regex from GPU definition: %s", gpuDef.DevID) + return + } + if re.FindString(gpu[idxDevID]) != "" { + // found it + model = gpuDef.Model + break + } + } + } + if model == "" { + if gpu[idxMfgID] == "8086" { + model = "Unknown Intel" + } else { + model = "Unknown" + } + } + gpus = append(gpus, []string{gpu[idxMfgName], model, gpu[idxMfgID] + ":" + gpu[idxDevID]}) + } + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Manufacturer", + "Model", + "PCI ID", + }, + Values: gpus, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newNICTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "NIC", + Category: category, + AllHostValues: []HostValues{}, + } + idxNicName := 0 + idxNicModel := 1 + for _, source := range sources { + nicsInfo := source.valsArrayFromRegexSubmatch("lshw", `^pci.*? (\S+)\s+network\s+(\S.*?)\s+\[\w+:\w+]$`) + nicsInfo = append(nicsInfo, source.valsArrayFromRegexSubmatch("lshw", `^usb.*? (\S+)\s+network\s+(\S.*?)$`)...) + var nics [][]string + for _, nic := range nicsInfo { + nics = append(nics, []string{ + nic[idxNicName], + nic[idxNicModel], + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`Settings for %s:(?:.|\n)*?Speed:\s*(.+)(?:.|\n)*?MAC ADDRESS`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`Settings for %s:(?:.|\n)*?Link detected:\s*(.+)(?:.|\n)*?MAC ADDRESS`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`Settings for %s:(?:.|\n)*?bus-info:\s*(.+)(?:.|\n)*?MAC ADDRESS`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`Settings for %s:(?:.|\n)*?driver:\s*(.+)(?:.|\n)*?MAC ADDRESS`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`Settings for %s:(?:.|\n)*?version:\s*(.+)(?:.|\n)*?MAC ADDRESS`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`Settings for %s:(?:.|\n)*?firmware-version:\s*(.+)(?:.|\n)*?MAC ADDRESS`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`MAC ADDRESS %s: (.*)\n`, nic[0])), + source.valFromOutputRegexSubmatch("nic info", fmt.Sprintf(`NUMA NODE %s: (.*)\n`, nic[0])), + enabledIfVal(source.getCommandOutputLine("irqbalance")), + }) + } + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Name", + "Model", + "Speed", + "Link", + "Bus", + "Driver", + "Driver Version", + "Firmware Version", + "MAC Address", + "NUMA Node", + "IRQBalance", + }, + Values: nics, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newNICSummaryTable(tableNic *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "NIC", + Category: category, + AllHostValues: []HostValues{}, + } + for _, hv := range tableNic.AllHostValues { + modelValIdx := 1 + var modelCount = make(map[string]int) + for _, nic := range hv.Values { + model := nic[modelValIdx] + if _, ok := modelCount[model]; !ok { + modelCount[model] = 0 + } + modelCount[model] += 1 + } + var summaryParts []string + for model, count := range modelCount { + summaryParts = append(summaryParts, fmt.Sprintf("%dx %s", count, model)) + } + var summaryHv = HostValues{ + Name: hv.Name, + ValueNames: []string{"NIC"}, + Values: [][]string{{strings.Join(summaryParts, ", ")}}, + } + table.AllHostValues = append(table.AllHostValues, summaryHv) + } + return +} + +func newMemoryTable(sources []*Source, tableDIMM *Table, tableDIMMPopulation *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Memory", + Category: category, + AllHostValues: []HostValues{}, + } + for sourceIdx, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Installed Memory", + "MemTotal", + "MemFree", + "MemAvailable", + "Buffers", + "Cached", + "HugePages_Total", + "Hugepagesize", + "Transparent Huge Pages", + "Automatic NUMA Balancing", + "Populated Memory Channels", + }, + Values: [][]string{ + { + getDIMMsSummary(tableDIMM, sourceIdx), + source.valFromRegexSubmatch("/proc/meminfo", `^MemTotal:\s*(.+?)$`), + source.valFromRegexSubmatch("/proc/meminfo", `^MemFree:\s*(.+?)$`), + source.valFromRegexSubmatch("/proc/meminfo", `^MemAvailable:\s*(.+?)$`), + source.valFromRegexSubmatch("/proc/meminfo", `^Buffers:\s*(.+?)$`), + source.valFromRegexSubmatch("/proc/meminfo", `^Cached:\s*(.+?)$`), + source.valFromRegexSubmatch("/proc/meminfo", `^HugePages_Total:\s*(.+?)$`), + source.valFromRegexSubmatch("/proc/meminfo", `^Hugepagesize:\s*(.+?)$`), + source.valFromRegexSubmatch("transparent huge pages", `.*\[(.*)\].*`), + source.getMemoryNUMABalancing(), + getPopulatedMemoryChannels(tableDIMMPopulation, sourceIdx), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newMemoryBriefTable(tableMemory *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Memory", + Category: category, + AllHostValues: []HostValues{}, + } + copyValues(tableMemory, table, []string{ + "Installed Memory", + "Hugepagesize", + "Transparent Huge Pages", + "Automatic NUMA Balancing", + }) + for i := range table.AllHostValues { + table.AllHostValues[i].Name = tableMemory.AllHostValues[i].Name + } + return +} + +const ( + BankLocatorIdx = iota + LocatorIdx + ManufacturerIdx + PartIdx + SerialIdx + SizeIdx + TypeIdx + DetailIdx + SpeedIdx + RankIdx + ConfiguredSpeedIdx + DerivedSocketIdx + DerivedChannelIdx + DerivedSlotIdx +) + +func newDIMMTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "DIMM", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Bank Locator", + "Locator", + "Manufacturer", + "Part", + "Serial", + "Size", + "Type", + "Detail", + "Speed", + "Rank", + "Configured Speed", + }, + Values: source.valsArrayFromDmiDecodeRegexSubmatch( + "17", + `^Bank Locator:\s*(.+?)$`, + `^Locator:\s*(.+?)$`, + `^Manufacturer:\s*(.+?)$`, + `^Part Number:\s*(.+?)\s*$`, + `^Serial Number:\s*(.+?)\s*$`, + `^Size:\s*(.+?)$`, + `^Type:\s*(.+?)$`, + `^Type Detail:\s*(.+?)$`, + `^Speed:\s*(.+?)$`, + `^Rank:\s*(.+?)$`, + `^Configured.*Speed:\s*(.+?)$`, + ), + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +/* +DMI type 9, 24 bytes +System Slot Information + + Designation: RISER_SLOT_1(PCIe x32) + Type: x8 \u003cOUT OF SPEC\u003e + Current Usage: In Use + Length: Long + Characteristics: + 3.3 V is provided + Bus Address: 0000:26:01.0 + Data Bus Width: 11 + Peer Devices: 0 + + Handle 0x007F +*/ +func newPCIeSlotsTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "PCIe Slots", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Designation", + "Type", + "Length", + "Bus Address", + "Current Usage", + }, + Values: source.valsArrayFromDmiDecodeRegexSubmatch( + "9", + `^Designation:\s*(.+?)$`, + `^Type:\s*(.+?)$`, + `^Length:\s*(.+?)\s*$`, + `^Bus Address:\s*(.+?)$`, + `^Current Usage:\s*(.+?)$`, + ), + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newDIMMPopulationTable(sources []*Source, dimmTable *Table, cpusInfo *cpu.CPU, category TableCategory) (table *Table) { + table = &Table{ + Name: "DIMM Population", + Category: category, + AllHostValues: []HostValues{}, + } + for sourceIdx, source := range sources { + // deep copy of dimmTable's HostValues + var hv HostValues + hv.Name = dimmTable.AllHostValues[sourceIdx].Name + hv.ValueNames = append(hv.ValueNames, dimmTable.AllHostValues[sourceIdx].ValueNames...) + hv.Values = append(hv.Values, dimmTable.AllHostValues[sourceIdx].Values...) + // extend value names + hv.ValueNames = append(hv.ValueNames, []string{"Derived Socket", "Derived Channel", "Derived Slot"}...) + // populate with empty values + for valuesIdx := range hv.Values { + hv.Values[valuesIdx] = append(hv.Values[valuesIdx], []string{"", "", ""}...) + } + success := false + family := source.valFromRegexSubmatch("lscpu", `^CPU family.*:\s*([0-9]+)$`) + model := source.valFromRegexSubmatch("lscpu", `^Model.*:\s*([0-9]+)$`) + stepping := source.valFromRegexSubmatch("lscpu", `^Stepping.*:\s*(.+)$`) + channels, err := cpusInfo.GetMemoryChannels(family, model, stepping) + if err != nil { + log.Printf("Failed to find CPU info: %v", err) + } else { + vendor := source.valFromDmiDecodeRegexSubmatch("0", `^\s*Vendor:\s*(.+?)$`) + sockets, _ := strconv.Atoi(source.valFromRegexSubmatch("lscpu", `^Socket\(.*:\s*(.+?)$`)) + if vendor == "Dell" { + err := deriveDIMMInfoDell(&hv.Values, sockets, channels) + if err != nil { + log.Printf("%v", err) + } + success = err == nil + } else if vendor == "HPE" { + err := deriveDIMMInfoHPE(&hv.Values, sockets, channels) + if err != nil { + log.Printf("%v", err) + } + success = err == nil + } else if vendor == "Amazon EC2" { + err := deriveDIMMInfoEC2(&hv.Values, sockets, channels) + if err != nil { + log.Printf("%v", err) + } + success = err == nil + } + if !success { + err := deriveDIMMInfoOther(&hv.Values, sockets, channels) + if err != nil { + log.Printf("%v", err) + } + success = err == nil + } + } + if !success { + hv.ValueNames = []string{} + hv.Values = [][]string{} + } + table.AllHostValues = append(table.AllHostValues, hv) + } + return +} + +func newBenchmarkSummaryTable(sources []*Source, tableMemBandwidthLatency *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Summary", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + singleCoreTurbo, allCoreTurbo, turboTDP := source.getTurbo() + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "CPU Speed", + "Single-core Turbo", + "All-core Turbo", + "Turbo TDP", + "Idle TDP", + "Memory Peak Bandwidth", + "Memory Minimum Latency", + "Disk Speed", + }, + Values: [][]string{ + { + source.getCPUSpeed(), // CPU speed + singleCoreTurbo, // single-core turbo + allCoreTurbo, // all-core turbo + turboTDP, // turbo TDP + source.getIdleTDP(), // idle TDP + source.getPeakBandwidth(tableMemBandwidthLatency), // peak memory bandwidth + source.getMinLatency(tableMemBandwidthLatency), // minimum memory latency + source.getDiskSpeed(), // disk speed + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newDiskTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Disk", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "NAME", + "MODEL", + "SIZE", + "MOUNTPOINT", + "FSTYPE", + "RQ-SIZE", + "MIN-IO", + "FwRev", + }, + Values: [][]string{}, + } + for i, line := range source.getCommandOutputLines("lsblk -r -o") { + fields := strings.Split(line, " ") + if len(fields) != len(hostValues.ValueNames)-1 { + log.Printf("lsblk field count mismatch: %s", strings.Join(fields, ",")) + continue + } + if i == 0 { // headers are in the first line + for idx, field := range fields { + if field != hostValues.ValueNames[idx] { + log.Printf("lsblk field name mismatch: %s", strings.Join(fields, ",")) + break + } + } + continue + } + // clean up the model name + fields[1] = strings.ReplaceAll(fields[1], `\x20`, " ") + fields[1] = strings.TrimSpace(fields[1]) + fields = append(fields, source.getDiskFwRev(fields[0])) + hostValues.Values = append(hostValues.Values, fields) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newDiskSummaryTable(tableDisk *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Disk", + Category: category, + AllHostValues: []HostValues{}, + } + for _, hv := range tableDisk.AllHostValues { + modelValIdx := 1 + sizeValIdx := 2 + var modelSizeCount = make(map[string]int) + for _, disk := range hv.Values { + model := disk[modelValIdx] + if model != "" { + size := disk[sizeValIdx] + modelSize := strings.Join([]string{model, size}, ",") + if _, ok := modelSizeCount[modelSize]; !ok { + modelSizeCount[modelSize] = 0 + } + modelSizeCount[modelSize] += 1 + } + } + var summaryParts []string + for modelSize, count := range modelSizeCount { + tokens := strings.Split(modelSize, ",") + model := tokens[0] + size := tokens[1] + summaryParts = append(summaryParts, fmt.Sprintf("%dx %s %s", count, size, model)) + } + var summaryHv = HostValues{ + Name: hv.Name, + ValueNames: []string{"Disk"}, + Values: [][]string{{strings.Join(summaryParts, ", ")}}, + } + table.AllHostValues = append(table.AllHostValues, summaryHv) + } + return +} + +func newFilesystemTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Filesystem", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{}, + Values: [][]string{}, + } + for i, line := range source.getCommandOutputLines("df -h") { + fields := strings.Fields(line) + if i == 0 { // headers are in the first line + hostValues.ValueNames = fields[:len(fields)-1] // drop last header field because it is "On" from "Mounted On" + continue + } + if len(fields) != len(hostValues.ValueNames) { + log.Printf("Warning: filesystem field count does not match header count: %s", strings.Join(fields, ",")) + continue + } + hostValues.Values = append(hostValues.Values, fields) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newProcessTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Process", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{}, + Values: [][]string{}, + } + for i, line := range source.getCommandOutputLines("ps -eo") { + fields := strings.Fields(line) + if i == 0 { + hostValues.ValueNames = fields + continue + } + // combine trailing fields + if len(fields) > len(hostValues.ValueNames) { + fields[len(hostValues.ValueNames)-1] = strings.Join(fields[len(hostValues.ValueNames)-1:], " ") + fields = fields[:len(hostValues.ValueNames)] + } + if len(fields) != len(hostValues.ValueNames) { + log.Printf("Warning: process field count does not match header count: %s", strings.Join(fields, ",")) + continue + } + hostValues.Values = append(hostValues.Values, fields) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newPMUTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "PMU", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "cpu_cycles", + "instructions", + "ref_cycles", + "topdown_slots", + "gen_programmable_1", + "gen_programmable_2", + "gen_programmable_3", + "gen_programmable_4", + "gen_programmable_5", + "gen_programmable_6", + "gen_programmable_7", + "gen_programmable_8", + }, + Values: [][]string{}, + } + lines := source.getCommandOutputLines("msrbusy") + var vals []string + if len(lines) == 2 { + vals = strings.Split(lines[1], "|") + } else { + for range hostValues.ValueNames { + vals = append(vals, "") + } + } + hostValues.Values = append(hostValues.Values, vals) + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newVulnerabilityTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Vulnerability", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{}, + Values: [][]string{}, + } + var values []string + for _, pair := range source.valsArrayFromRegexSubmatch("spectre-meltdown-checker", `(CVE-\d+-\d+): (.+)`) { + hostValues.ValueNames = append(hostValues.ValueNames, pair[0]) + values = append(values, pair[1]) + } + if len(values) > 0 { + hostValues.Values = append(hostValues.Values, []string{}) + hostValues.Values[0] = values + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newVulnerabilitySummaryTable(tableVuln *Table, category TableCategory) (table *Table) { + table = &Table{ + Name: "Vulnerability", + Category: category, + AllHostValues: []HostValues{}, + } + re := regexp.MustCompile(`([A-Z]+)\s.*`) + for _, hv := range tableVuln.AllHostValues { + var vulns []string + for valIdx, valueName := range hv.ValueNames { + longValue := hv.Values[0][valIdx] + match := re.FindStringSubmatch(longValue) + if match != nil { + shortValue := match[1] + vulns = append(vulns, fmt.Sprintf("%s:%s", valueName, shortValue)) + } + } + var summaryHv = HostValues{ + Name: hv.Name, + ValueNames: []string{"Vulnerability"}, + Values: [][]string{{strings.Join(vulns, ", ")}}, + } + table.AllHostValues = append(table.AllHostValues, summaryHv) + } + return +} + +func newSensorTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Sensor", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{"Sensor", "Reading", "Status"}, + Values: [][]string{}, + } + for _, line := range source.getCommandOutputLines("ipmitool sdr list full") { + vals := strings.Split(line, " | ") + if len(vals) != len(hostValues.ValueNames) { + log.Printf("Warning: unexpected number of sensor fields: %s", strings.Join(vals, ",")) + continue + } + for i := range vals { + vals[i] = strings.TrimSpace(vals[i]) + } + hostValues.Values = append(hostValues.Values, vals) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newChassisStatusTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Chassis Status", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Last Power Event", + "Power Overload", + "Main Power Fault", + "Power Restore Policy", + "Drive Fault", + "Cooling/Fan Fault", + "System Time", + }, + Values: [][]string{ + { + source.valFromRegexSubmatch("ipmitool chassis status", `^Last Power Event\s*: (.+?)$`), + source.valFromRegexSubmatch("ipmitool chassis status", `^Power Overload\s*: (.+?)$`), + source.valFromRegexSubmatch("ipmitool chassis status", `^Main Power Fault\s*: (.+?)$`), + source.valFromRegexSubmatch("ipmitool chassis status", `^Power Restore Policy\s*: (.+?)$`), + source.valFromRegexSubmatch("ipmitool chassis status", `^Drive Fault\s*: (.+?)$`), + source.valFromRegexSubmatch("ipmitool chassis status", `^Cooling/Fan Fault\s*: (.+?)$`), + source.getCommandOutputLine("ipmitool sel time get"), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newSystemEventLogTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "System Event Log", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Date", + "Time", + "Sensor", + "Status", + "Event", + }, + Values: [][]string{}, + } + for _, line := range source.getCommandOutputLines("ipmitool sel elist") { + fields := strings.Split(line, " | ") + if len(fields) > len(hostValues.ValueNames) { + fields[len(hostValues.ValueNames)-1] = strings.Join(fields[len(hostValues.ValueNames)-1:], ", ") + fields = fields[:len(hostValues.ValueNames)] + + } + if len(fields) != len(hostValues.ValueNames) { + log.Printf("Warning: unexpected number of event list fields: %s", strings.Join(fields, ",")) + continue + } + hostValues.Values = append(hostValues.Values, fields) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newKernelLogTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Kernel Log", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Entries", + }, + Values: [][]string{}, + } + for _, line := range source.getCommandOutputLines("dmesg") { + hostValues.Values = append(hostValues.Values, []string{line}) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newCPUUtilizationTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "CPU Utilization", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Time", + "CPU", + "CORE", + "SOCK", + "NODE", + "%usr", + "%nice", + "%sys", + "%iowait", + "%irq", + "%soft", + "%steal", + "%guest", + "%gnice", + "%idle", + }, + Values: [][]string{}, + } + reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+(\d+)\s+(\d+)\s+(\d+)\s+(-*\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) + for _, line := range source.getProfileLines("mpstat") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + hostValues.Values = append(hostValues.Values, match[1:]) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newAverageCPUUtilizationTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Average CPU Utilization", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Time", + "%usr", + "%nice", + "%sys", + "%iowait", + "%irq", + "%soft", + "%steal", + "%guest", + "%gnice", + "%idle", + }, + Values: [][]string{}, + } + reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+all\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) + for _, line := range source.getProfileLines("mpstat") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + hostValues.Values = append(hostValues.Values, match[1:]) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newIRQRateTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "IRQ Rate", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Time", + "CPU", + "HI/s", + "TIMER/s", + "NET_TX/s", + "NET_RX/s", + "BLOCK/s", + "IRQ_POLL/s", + "TASKLET/s", + "SCHED/s", + "HRTIMER/s", + "RCU/s", + }, + Values: [][]string{}, + } + reStat := regexp.MustCompile(`^(\d\d:\d\d:\d\d)\s+(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)$`) + for _, line := range source.getProfileLines("mpstat") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + hostValues.Values = append(hostValues.Values, match[1:]) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newDriveStatsTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Drive Stats", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Device", + "tps", + "kB_read/s", + "kB_wrtn/s", + "kB_dscd/s", + }, + Values: [][]string{}, + } + // don't capture the last three vals: "kB_read","kB_wrtn","kB_dscd" -- they aren't the same scale as the others + reStat := regexp.MustCompile(`^(\w+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*\d+\s*\d+\s*\d+$`) + for _, line := range source.getProfileLines("iostat") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + hostValues.Values = append(hostValues.Values, match[1:]) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newNetworkStatsTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Network Stats", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Time", + "IFACE", + "rxpck/s", + "txpck/s", + "rxkB/s", + "txkB/s", + }, + Values: [][]string{}, + } + // don't capture the last four vals: "rxcmp/s","txcmp/s","rxcmt/s","%ifutil" -- obscure more important vals + reStat := regexp.MustCompile(`^(\d+:\d+:\d+)\s*(\w*)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*(\d+.\d+)\s*\d+.\d+\s*\d+.\d+\s*\d+.\d+\s*\d+.\d+$`) + for _, line := range source.getProfileLines("sar-network") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + hostValues.Values = append(hostValues.Values, match[1:]) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newMemoryStatsTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Memory Stats", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Time", + "free", + "avail", + "used", + "buffers", + "cached", + "commit", + "active", + "inactive", + "dirty", + }, + Values: [][]string{}, + } + reStat := regexp.MustCompile(`^(\d+:\d+:\d+)\s*(\d+)\s*(\d+)\s*(\d+)\s*\d+\.\d+\s*(\d+)\s*(\d+)\s*(\d+)\s*\d+\.\d+\s*(\d+)\s*(\d+)\s*(\d+)$`) + for _, line := range source.getProfileLines("sar-memory") { + match := reStat.FindStringSubmatch(line) + if len(match) == 0 { + continue + } + hostValues.Values = append(hostValues.Values, match[1:]) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newProfileSummaryTable(sources []*Source, category TableCategory, averageCPUUtilizationTable, CPUUtilizationTable, IRQRateTable, driveStatsTable, netStatsTable, memStatsTable *Table) (table *Table) { + table = &Table{ + Name: "Summary", + Category: category, + AllHostValues: []HostValues{}, + } + for idx, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "CPU Utilization (%)", + "Kernel Utilization (%)", + "User Utilization (%)", + "Soft IRQ Utilization (%)", + "IRQ Rate (IRQ/s)", + "Drive Reads (kB/s)", + "Drive Writes (kB/s)", + "Network RX (packets/s)", + "Network TX (packets/s)", + "Network RX (kB/s)", + "Network TX (kB/s)", + "Memory Free (kB)", + "Memory Available (kB)", + "Memory Used (kB)", + }, + Values: [][]string{ + { + getCPUAveragePercentage(averageCPUUtilizationTable, idx, "%idle", true), + getCPUAveragePercentage(averageCPUUtilizationTable, idx, "%sys", false), + getCPUAveragePercentage(averageCPUUtilizationTable, idx, "%usr", false), + getCPUAveragePercentage(averageCPUUtilizationTable, idx, "%irq", false), + getMetricAverage(IRQRateTable, idx, []string{"HI/s", "TIMER/s", "NET_TX/s", "NET_RX/s", "BLOCK/s", "IRQ_POLL/s", "TASKLET/s", "SCHED/s", "HRTIMER/s", "RCU/s"}, "Time"), + getMetricAverage(driveStatsTable, idx, []string{"kB_read/s"}, "Device"), + getMetricAverage(driveStatsTable, idx, []string{"kB_wrtn/s"}, "Device"), + getMetricAverage(netStatsTable, idx, []string{"rxpck/s"}, "Time"), + getMetricAverage(netStatsTable, idx, []string{"txpck/s"}, "Time"), + getMetricAverage(netStatsTable, idx, []string{"rxkB/s"}, "Time"), + getMetricAverage(netStatsTable, idx, []string{"txkB/s"}, "Time"), + getMetricAverage(memStatsTable, idx, []string{"free"}, "Time"), + getMetricAverage(memStatsTable, idx, []string{"avail"}, "Time"), + getMetricAverage(memStatsTable, idx, []string{"used"}, "Time"), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newFeatureTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Feature", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "BI_2IFU_4_F_VICTIMS_EN", + "EnableDBPForF", + "NoHmlessPref", + "FBThreadSlicing", + "DISABLE_FASTGO", + "SpecI2MEn", + "disable_llpref", + "DPT_DISABLE", + }, + } + hostValues.Values = append(hostValues.Values, source.getFeatures()) + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newCXLDeviceTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "CXL Device", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + var hostValues = HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Slot", + "Class", + "Vendor", + "Device", + "Rev", + "ProgIf", + "NUMANode", + "IOMMUGroup", + }, + } + hostCxlDevices := source.getPCIDevices("CXL") + for _, device := range hostCxlDevices { + var values []string + for _, key := range hostValues.ValueNames { + if value, ok := device[key]; ok { + values = append(values, value) + } else { + values = append(values, "") + } + } + hostValues.Values = append(hostValues.Values, values) + } + table.AllHostValues = append(table.AllHostValues, hostValues) + } + return +} + +func newCodePathTable(sources []*Source, category TableCategory) (table *Table) { + table = &Table{ + Name: "Code Path Frequency", + Category: category, + AllHostValues: []HostValues{}, + } + for _, source := range sources { + hv := HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "System Paths", + "Java Paths", + }, + Values: [][]string{ + { + source.getSystemFolded(), + source.getJavaFolded(), + }, + }, + } + table.AllHostValues = append(table.AllHostValues, hv) + } + return +} + +func newInsightTable(sources []*Source, configReport, briefReport, profileReport, benchmarkReport *Report, analyzeReport *Report, cpusInfo *cpu.CPU) (table *Table) { + table = &Table{ + Name: "Insight", + Category: NoCategory, + AllHostValues: []HostValues{}, + } + var gruleEngine *engine.GruleEngine + var knowledgeBase *ast.KnowledgeBase + var dataContext ast.IDataContext + rulesEngineContext := &RulesEngineContext{ + insightTable: table, + reportsData: []*Report{configReport, briefReport, profileReport, benchmarkReport, analyzeReport}, + sourceIdx: 0, // will be incremented while looping through sources below + } + gruleEngine = &engine.GruleEngine{MaxCycle: 500} + rules, err := getInsightsRules() + if err != nil { + log.Printf("Failed to load insights rules: %v", err) + } else { + dataContext = ast.NewDataContext() + err = dataContext.Add("Report", rulesEngineContext) // we call it "Report" because that makes sense when writing/reading rules + if err != nil { + log.Panicf("failed to add context: %v", err) + } + knowledgeLibrary := ast.NewKnowledgeLibrary() + ruleBuilder := builder.NewRuleBuilder(knowledgeLibrary) + err = ruleBuilder.BuildRuleFromResource("Rules", "0.1", pkg.NewBytesResource(rules)) + if err != nil { + // Ref: https://github.com/hyperjumptech/grule-rule-engine/blob/master/docs/en/GRL_en.md + // Cast the error into pkg.GruleErrorReporter with typecast checking. + // Typecast checking is necessary because the err might not only parsing error. + if reporter, ok := err.(*pkg.GruleErrorReporter); ok { + // Lets iterate all the error we get during parsing. + for i, er := range reporter.Errors { + log.Printf("rules parsing error #%d : %s\n", i, er.Error()) + } + } else { + log.Printf("failed to load rules into engine, %v", err) + } + } else { + knowledgeBase = knowledgeLibrary.NewKnowledgeBaseInstance("Rules", "0.1") + } + } + for sourceIdx, source := range configReport.Sources { + hv := HostValues{ + Name: source.getHostname(), + ValueNames: []string{ + "Recommendation", + "Justification", + }, + } + table.AllHostValues = append(table.AllHostValues, hv) + if knowledgeBase != nil { + rulesEngineContext.sourceIdx = sourceIdx + err = gruleEngine.Execute(dataContext, knowledgeBase) + if err != nil { + log.Printf("failed to execute rules, %v", err) + continue + } + } + } + return +} diff --git a/src/reporter/report_tables_helpers.go b/src/reporter/report_tables_helpers.go new file mode 100644 index 0000000..88f8fd0 --- /dev/null +++ b/src/reporter/report_tables_helpers.go @@ -0,0 +1,589 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +package main + +import ( + "fmt" + "log" + "os" + "regexp" + "strconv" + "strings" + + "intel.com/svr-info/pkg/core" +) + +func enabledIfVal(val string) string { + if val != "" { + return "Enabled" + } + return "Disabled" +} + +func enabledIfValAndTrue(val string) string { + if val == "true" { + return "Enabled" + } + if val == "false" { + return "Disabled" + } + return "" +} + +func yesIfTrue(val string) string { + if val == "true" { + return "Yes" + } + return "No" +} + +func getDIMMsSummary(tableDIMM *Table, sourceIdx int) (val string) { + // counts of unique dimm types + dimmTypeCount := make(map[string]int) + for _, dimm := range tableDIMM.AllHostValues[sourceIdx].Values { + dimmKey := dimm[TypeIdx] + ":" + dimm[SizeIdx] + ":" + dimm[SpeedIdx] + ":" + dimm[ConfiguredSpeedIdx] + if count, ok := dimmTypeCount[dimmKey]; ok { + dimmTypeCount[dimmKey] = count + 1 + } else { + dimmTypeCount[dimmKey] = 1 + } + } + var summaries []string + re := regexp.MustCompile(`(\d+)\s*(\w*)`) + for dimmKey, count := range dimmTypeCount { + fields := strings.Split(dimmKey, ":") + match := re.FindStringSubmatch(fields[1]) // size field + if match != nil { + size, err := strconv.Atoi(match[1]) + if err != nil { + log.Printf("Don't recognize DIMM size format: %s", fields[1]) + return + } + sum := count * size + unit := match[2] + dimmType := fields[0] + speed := fields[2] + configuredSpeed := fields[3] + summary := fmt.Sprintf("%d%s (%dx%d%s %s %s [%s])", sum, unit, count, size, unit, dimmType, speed, configuredSpeed) + summaries = append(summaries, summary) + } + } + val = strings.Join(summaries, "; ") + return +} + +func getPopulatedMemoryChannels(tableDIMMPopulation *Table, sourceIdx int) string { + channelsMap := make(map[string]bool) + for _, dimm := range tableDIMMPopulation.AllHostValues[sourceIdx].Values { + if !strings.Contains(dimm[SizeIdx], "No") { + channelsMap[dimm[DerivedSocketIdx]+","+dimm[DerivedChannelIdx]] = true + } + } + if len(channelsMap) > 0 { + return fmt.Sprintf("%d", len(channelsMap)) + } + return "" +} + +/* +Get DIMM socket and slot from Bank Locator or Locator field from dmidecode. +This method is inherently unreliable/incomplete as each OEM can set +these fields as they see fit. +Returns None when there's no match. +*/ +func getDIMMSocketSlot(dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regexp.Regexp, bankLocator string, locator string) (socket int, slot int, err error) { + if dimmType == DIMMType0 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[3]) + } + return + } else if dimmType == DIMMType1 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[3]) + return + } + } else if dimmType == DIMMType2 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[3]) + return + } + } else if dimmType == DIMMType3 { + match := reBankLoc.FindStringSubmatch(bankLocator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[3]) + return + } + } else if dimmType == DIMMType4 { + match := reBankLoc.FindStringSubmatch(bankLocator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[4]) + return + } + } else if dimmType == DIMMType5 { + match := reBankLoc.FindStringSubmatch(bankLocator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[3]) + return + } + } else if dimmType == DIMMType6 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + socket -= 1 + slot, _ = strconv.Atoi(match[3]) + slot -= 1 + return + } + } else if dimmType == DIMMType7 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[3]) + slot -= 1 + return + } + } else if dimmType == DIMMType8 { + match := reBankLoc.FindStringSubmatch(bankLocator) + if match != nil { + match2 := reLoc.FindStringSubmatch(locator) + if match2 != nil { + socket, _ = strconv.Atoi(match[1]) + socket -= 1 + slot, _ = strconv.Atoi(match2[2]) + slot -= 1 + return + } + } + } else if dimmType == DIMMType9 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket, _ = strconv.Atoi(match[1]) + slot, _ = strconv.Atoi(match[2]) + return + } + } else if dimmType == DIMMType10 { + match := reBankLoc.FindStringSubmatch(bankLocator) + if match != nil { + socket = 0 + slot, _ = strconv.Atoi(match[2]) + return + } + } else if dimmType == DIMMType11 { + match := reLoc.FindStringSubmatch(locator) + if match != nil { + socket = 0 + slot, _ = strconv.Atoi(match[2]) + return + } + } + err = fmt.Errorf("unrecognized bank locator and/or locator in dimm info: %s %s", bankLocator, locator) + return +} + +type DIMMType int + +const ( + DIMMTypeUNKNOWN = -1 + DIMMType0 DIMMType = iota + DIMMType1 + DIMMType2 + DIMMType3 + DIMMType4 + DIMMType5 + DIMMType6 + DIMMType7 + DIMMType8 + DIMMType9 + DIMMType10 + DIMMType11 +) + +func getDIMMParseInfo(bankLocator string, locator string) (dimmType DIMMType, reBankLoc *regexp.Regexp, reLoc *regexp.Regexp) { + dimmType = DIMMTypeUNKNOWN + // Inspur ICX 2s system + // Needs to be before next regex pattern to differentiate + reLoc = regexp.MustCompile(`CPU([0-9])_C([0-9])D([0-9])`) + if reLoc.FindStringSubmatch(locator) != nil { + dimmType = DIMMType0 + return + } + reLoc = regexp.MustCompile(`CPU([0-9])_([A-Z])([0-9])`) + if reLoc.FindStringSubmatch(locator) != nil { + dimmType = DIMMType1 + return + } + reLoc = regexp.MustCompile(`CPU([0-9])_MC._DIMM_([A-Z])([0-9])`) + if reLoc.FindStringSubmatch(locator) != nil { + dimmType = DIMMType2 + return + } + reBankLoc = regexp.MustCompile(`NODE ([0-9]) CHANNEL ([0-9]) DIMM ([0-9])`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + dimmType = DIMMType3 + return + } + reBankLoc = regexp.MustCompile(`P([0-9])_Node([0-9])_Channel([0-9])_Dimm([0-9])`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + dimmType = DIMMType4 + return + } + reBankLoc = regexp.MustCompile(`_Node([0-9])_Channel([0-9])_Dimm([0-9])`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + dimmType = DIMMType5 + return + } + /* SKX SDP + * Locator: CPU1_DIMM_A1, Bank Locator: NODE 1 + * Locator: CPU1_DIMM_A2, Bank Locator: NODE 1 + */ + reLoc = regexp.MustCompile(`CPU([1-4])_DIMM_([A-Z])([1-2])`) + if reLoc.FindStringSubmatch(locator) != nil { + reBankLoc = regexp.MustCompile(`NODE ([1-8])`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + dimmType = DIMMType6 + return + } + } + /* ICX SDP + * Locator: CPU0_DIMM_A1, Bank Locator: NODE 0 + * Locator: CPU0_DIMM_A2, Bank Locator: NODE 0 + */ + reLoc = regexp.MustCompile(`CPU([0-7])_DIMM_([A-Z])([1-2])`) + if reLoc.FindStringSubmatch(locator) != nil { + reBankLoc = regexp.MustCompile(`NODE ([0-9]+)`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + dimmType = DIMMType7 + return + } + } + reBankLoc = regexp.MustCompile(`NODE ([1-9]\d*)`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + reLoc = regexp.MustCompile(`DIMM_([A-Z])([1-9]\d*)`) + if reLoc.FindStringSubmatch(locator) != nil { + dimmType = DIMMType8 + return + } + } + /* GIGABYTE MILAN + * Locator: DIMM_P0_A0, Bank Locator: BANK 0 + * Locator: DIMM_P0_A1, Bank Locator: BANK 1 + * Locator: DIMM_P0_B0, Bank Locator: BANK 0 + * ... + * Locator: DIMM_P1_I0, Bank Locator: BANK 0 + */ + reLoc = regexp.MustCompile(`DIMM_P([0-1])_[A-Z]([0-1])`) + if reLoc.FindStringSubmatch(locator) != nil { + dimmType = DIMMType9 + return + } + /* my NUC + * Locator: SODIMM0, Bank Locator: CHANNEL A DIMM0 + * Locator: SODIMM1, Bank Locator: CHANNEL B DIMM0 + */ + reBankLoc = regexp.MustCompile(`CHANNEL ([A-D]) DIMM([0-9])`) + if reBankLoc.FindStringSubmatch(bankLocator) != nil { + dimmType = DIMMType10 + return + } + /* Alder Lake Client Desktop + * Locator: Controller0-ChannelA-DIMM0, Bank Locator: BANK 0 + * Locator: Controller1-ChannelA-DIMM0, Bank Locator: BANK 0 + */ + reLoc = regexp.MustCompile(`Controller([0-1]).*DIMM([0-1])`) + if reLoc.FindStringSubmatch(locator) != nil { + dimmType = DIMMType11 + return + } + return +} + +func deriveDIMMInfoOther(dimms *[][]string, numSockets int, channelsPerSocket int) (err error) { + previousSocket, channel := -1, 0 + if len(*dimms) == 0 { + err = fmt.Errorf("no DIMMs") + return + } + dimmType, reBankLoc, reLoc := getDIMMParseInfo((*dimms)[0][BankLocatorIdx], (*dimms)[0][LocatorIdx]) + if dimmType == DIMMTypeUNKNOWN { + err = fmt.Errorf("unknown DIMM identification format") + return + } + for _, dimm := range *dimms { + var socket, slot int + socket, slot, err = getDIMMSocketSlot(dimmType, reBankLoc, reLoc, dimm[BankLocatorIdx], dimm[LocatorIdx]) + if err != nil { + log.Printf("Couldn't extract socket and slot from DIMM info: %v", err) + return + } + if socket > previousSocket { + channel = 0 + } else if previousSocket == socket && slot == 0 { + channel++ + } + previousSocket = socket + dimm[DerivedSocketIdx] = fmt.Sprintf("%d", socket) + dimm[DerivedChannelIdx] = fmt.Sprintf("%d", channel) + dimm[DerivedSlotIdx] = fmt.Sprintf("%d", slot) + } + return +} + +/* as seen on 2 socket HPE systems...2 slots per channel +* Locator field has these: PROC 1 DIMM 1, PROC 1 DIMM 2, etc... +* DIMM/slot numbering on board follows logic shown below + */ +func deriveDIMMInfoHPE(dimms *[][]string, numSockets int, channelsPerSocket int) (err error) { + slotsPerChannel := len(*dimms) / (numSockets * channelsPerSocket) + re := regexp.MustCompile(`PROC ([1-9]\d*) DIMM ([1-9]\d*)`) + for _, dimm := range *dimms { + if !strings.Contains(dimm[BankLocatorIdx], "Not Specified") { + err = fmt.Errorf("doesn't conform to expected HPE Bank Locator format: %s", dimm[BankLocatorIdx]) + return + } + match := re.FindStringSubmatch(dimm[LocatorIdx]) + if match == nil { + err = fmt.Errorf("doesn't conform to expected HPE Locator format: %s", dimm[LocatorIdx]) + return + } + socket, _ := strconv.Atoi(match[1]) + socket -= 1 + dimm[DerivedSocketIdx] = fmt.Sprintf("%d", socket) + dimmNum, _ := strconv.Atoi(match[2]) + channel := (dimmNum - 1) / slotsPerChannel + dimm[DerivedChannelIdx] = fmt.Sprintf("%d", channel) + var slot int + if (dimmNum < channelsPerSocket && dimmNum%2 != 0) || (dimmNum > channelsPerSocket && dimmNum%2 == 0) { + slot = 0 + } else { + slot = 1 + } + dimm[DerivedSlotIdx] = fmt.Sprintf("%d", slot) + } + return +} + +/* as seen on 2 socket Dell systems... +* "Bank Locator" for all DIMMs is "Not Specified" and "Locator" is A1-A12 and B1-B12. +* A1 and A7 are channel 0, A2 and A8 are channel 1, etc. + */ +func deriveDIMMInfoDell(dimms *[][]string, numSockets int, channelsPerSocket int) (err error) { + re := regexp.MustCompile(`([ABCD])([1-9]\d*)`) + for _, dimm := range *dimms { + if !strings.Contains(dimm[BankLocatorIdx], "Not Specified") { + err = fmt.Errorf("doesn't conform to expected Dell Bank Locator format") + return + } + match := re.FindStringSubmatch(dimm[LocatorIdx]) + if match == nil { + err = fmt.Errorf("doesn't conform to expected Dell Locator format") + return + } + alpha := match[1] + var numeric int + numeric, err = strconv.Atoi(match[2]) + if err != nil { + err = fmt.Errorf("doesn't conform to expected Dell Locator numeric format") + return + } + // Socket + // A = 0, B = 1, C = 2, D = 3 + dimm[DerivedSocketIdx] = fmt.Sprintf("%d", int(alpha[0])-int('A')) + // Slot + if numeric <= channelsPerSocket { + dimm[DerivedSlotIdx] = "0" + } else { + dimm[DerivedSlotIdx] = "1" + } + // Channel + if numeric <= channelsPerSocket { + dimm[DerivedChannelIdx] = fmt.Sprintf("%d", numeric-1) + } else { + dimm[DerivedChannelIdx] = fmt.Sprintf("%d", numeric-(channelsPerSocket+1)) + } + } + return +} + +/* as seen on Amazon EC2 bare-metal systems... + * BANK LOC LOCATOR + * c5.metal + * NODE 1 DIMM_A0 + * NODE 1 DIMM_A1 + * ... + * NODE 2 DIMM_G0 + * NODE 2 DIMM_G1 + * ... <<< there's no 'I' + * NODE 2 DIMM_M0 + * NODE 2 DIMM_M1 + * + * c6i.metal + * NODE 0 CPU0 Channel0 DIMM0 + * NODE 0 CPU0 Channel0 DIMM1 + * NODE 0 CPU0 Channel1 DIMM0 + * NODE 0 CPU0 Channel1 DIMM1 + * ... + * NODE 7 CPU1 Channel7 DIMM0 + * NODE 7 CPU1 Channel7 DIMM1 + */ +func deriveDIMMInfoEC2(dimms *[][]string, numSockets int, channelsPerSocket int) (err error) { + c5bankLocRe := regexp.MustCompile(`NODE\s+([1-9])`) + c5locRe := regexp.MustCompile(`DIMM_(.)(.)`) + c6ibankLocRe := regexp.MustCompile(`NODE\s+(\d+)`) + c6ilocRe := regexp.MustCompile(`CPU(\d+)\s+Channel(\d+)\s+DIMM(\d+)`) + for _, dimm := range *dimms { + // try c5.metal format + bankLocMatch := c5bankLocRe.FindStringSubmatch(dimm[BankLocatorIdx]) + locMatch := c5locRe.FindStringSubmatch(dimm[LocatorIdx]) + if locMatch != nil && bankLocMatch != nil { + var socket, channel, slot int + socket, _ = strconv.Atoi(bankLocMatch[1]) + socket -= 1 + if int(locMatch[1][0]) < int('I') { // there is no 'I' + channel = (int(locMatch[1][0]) - int('A')) % channelsPerSocket + } else if int(locMatch[1][0]) > int('I') { + channel = (int(locMatch[1][0]) - int('B')) % channelsPerSocket + } else { + err = fmt.Errorf("doesn't conform to expected EC2 format") + return + } + slot, _ = strconv.Atoi(locMatch[2]) + dimm[DerivedSocketIdx] = fmt.Sprintf("%d", socket) + dimm[DerivedChannelIdx] = fmt.Sprintf("%d", channel) + dimm[DerivedSlotIdx] = fmt.Sprintf("%d", slot) + continue + } + // try c6i.metal format + bankLocMatch = c6ibankLocRe.FindStringSubmatch(dimm[BankLocatorIdx]) + locMatch = c6ilocRe.FindStringSubmatch(dimm[LocatorIdx]) + if locMatch != nil && bankLocMatch != nil { + var socket, channel, slot int + socket, _ = strconv.Atoi(locMatch[1]) + channel, _ = strconv.Atoi(locMatch[2]) + slot, _ = strconv.Atoi(locMatch[3]) + dimm[DerivedSocketIdx] = fmt.Sprintf("%d", socket) + dimm[DerivedChannelIdx] = fmt.Sprintf("%d", channel) + dimm[DerivedSlotIdx] = fmt.Sprintf("%d", slot) + continue + } + err = fmt.Errorf("doesn't conform to expected EC2 format") + return + } + return +} + +/* "1,3-5,8" -> [1,3,4,5,8] */ +func expandCPUList(cpuList string) (cpus []int) { + if cpuList != "" { + tokens := strings.Split(cpuList, ",") + for _, token := range tokens { + if strings.Contains(token, "-") { + subTokens := strings.Split(token, "-") + if len(subTokens) == 2 { + begin, errA := strconv.Atoi(subTokens[0]) + end, errB := strconv.Atoi(subTokens[1]) + if errA != nil || errB != nil { + log.Printf("Failed to parse CPU affinity") + return + } + for i := begin; i <= end; i++ { + cpus = append(cpus, i) + } + } + } else { + cpu, err := strconv.Atoi(token) + if err != nil { + log.Printf("CPU isn't integer!") + return + } + cpus = append(cpus, cpu) + } + } + } + return +} + +func getCPUAveragePercentage(table *Table, sourceIndex int, fieldName string, inverse bool) (average string) { + hostValues := &table.AllHostValues[sourceIndex] + sum, _, err := getSumOfFields(hostValues, []string{fieldName}, "Time") + if err != nil { + log.Printf("failed to get sum of fields for CPU metrics: %v", err) + return + } + if len(hostValues.Values) > 0 { + averageFloat := sum / float64(len(hostValues.Values)) + if inverse { + averageFloat = 100.0 - averageFloat + } + average = fmt.Sprintf("%0.2f", averageFloat) + } + return +} + +func getMetricAverage(table *Table, sourceIndex int, fieldNames []string, separatorFieldName string) (average string) { + hostValues := &table.AllHostValues[sourceIndex] + sum, seps, err := getSumOfFields(hostValues, fieldNames, separatorFieldName) + if err != nil { + log.Printf("failed to get sum of fields for IO metrics: %v", err) + return + } + if len(fieldNames) > 0 && seps > 0 { + averageFloat := sum / float64(seps/len(fieldNames)) + average = fmt.Sprintf("%0.2f", averageFloat) + } + return +} + +func getSumOfFields(hostValues *HostValues, fieldNames []string, separatorFieldName string) (sum float64, numSeparators int, err error) { + prevSeparator := "" + separatorIdx, err := findValueIndex(hostValues, separatorFieldName) + if err != nil { + return + } + for _, fieldName := range fieldNames { + var fieldIdx int + fieldIdx, err = findValueIndex(hostValues, fieldName) + if err != nil { + return + } + for _, entry := range hostValues.Values { + valueStr := entry[fieldIdx] + var valueFloat float64 + valueFloat, err = strconv.ParseFloat(valueStr, 64) + if err != nil { + return + } + separator := entry[separatorIdx] + if separator != prevSeparator { + numSeparators++ + prevSeparator = separator + } + sum += valueFloat + } + } + return +} + +func getInsightsRules() (rules []byte, err error) { + rulesFilePath, err := core.FindAsset("insights.grl") + if err != nil { + err = fmt.Errorf("could not find rules file, %v", err) + return + } + rules, err = os.ReadFile(rulesFilePath) + if err != nil { + err = fmt.Errorf("failed to read rules file, %v", err) + return + } + return +} diff --git a/src/reporter/rules_engine_context.go b/src/reporter/rules_engine_context.go new file mode 100644 index 0000000..3449b24 --- /dev/null +++ b/src/reporter/rules_engine_context.go @@ -0,0 +1,247 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* Defines the context and functions used by the rules engine */ + +package main + +import ( + "log" + "regexp" + "strconv" + "strings" +) + +// RulesEngineContext struct is used as context for rules engine, i.e. the rules +// can call the exported functions below and access any exported data in the +// struct (currently none) +type RulesEngineContext struct { + insightTable *Table + reportsData []*Report + sourceIdx int +} + +// GetValue returns a string value from a table +func (r *RulesEngineContext) GetValue(reportName string, tableName string, valueName string) (value string) { + var reportData *Report + for _, rd := range r.reportsData { + if rd.InternalName == reportName { + reportData = rd + break + } + } + if reportData == nil { + log.Printf("report specified in rule not found: %s", reportName) + return + } + table := reportData.findTable(tableName) + if table == nil { + log.Printf("table specified in rule not found: %s", tableName) + return + } + value, err := table.getValue(r.sourceIdx, valueName) + if err != nil { + log.Printf("failed to get value from table, %s:%s, %v", tableName, valueName, err) + } + return +} + +func (r *RulesEngineContext) GetValueFromColumn(reportName, tableName, rowValueName, rowValue, targetValueName string) (value string) { + var reportData *Report + for _, rd := range r.reportsData { + if rd.InternalName == reportName { + reportData = rd + break + } + } + if reportData == nil { + log.Printf("report specified in rule not found: %s", reportName) + return + } + table := reportData.findTable(tableName) + if table == nil { + log.Printf("table specified in rule not found: %s", tableName) + return + } + hv := &table.AllHostValues[r.sourceIdx] + rowValueIndex, err := findValueIndex(hv, rowValueName) + if err != nil { + log.Printf("%v", err) + } + targetValueIndex, err := findValueIndex(hv, targetValueName) + if err != nil { + log.Printf("%v", err) + } + for _, values := range hv.Values { + if values[rowValueIndex] == rowValue { + value = values[targetValueIndex] + break + } + } + return +} + +// GetValuesFromColumn returns all values in specified valueIndex as a string (comma separated list) +func (r *RulesEngineContext) GetValuesFromColumn(reportName string, tableName string, valueIndex int64) (values string) { + var reportData *Report + for _, rd := range r.reportsData { + if rd.InternalName == reportName { + reportData = rd + break + } + } + if reportData == nil { + log.Printf("report specified in rule not found: %s", reportName) + return + } + table := reportData.findTable(tableName) + if table == nil { + log.Printf("table specified in rule not found: %s", tableName) + return + } + hv := &table.AllHostValues[r.sourceIdx] + if int64(len(hv.Values)) > valueIndex { + values = strings.Join(hv.Values[0], ",") + } + return +} + +// GetValueAsInt returns an integer value from a table +func (r *RulesEngineContext) GetValueAsInt(reportName string, tableName string, valueName string) (value int) { + v := r.GetValue(reportName, tableName, valueName) + re := regexp.MustCompile(`.*?(\d*)`) + match := re.FindStringSubmatch(v) + var num string + if match != nil { + num = match[1] + } + value, err := strconv.Atoi(num) + if err != nil { + log.Printf("failed to convert string to int: %s", v) + } + return +} + +// GetValueAsFloat returns a float64 value from a table +// if value doesn't contain a float, result will be 0 +func (r *RulesEngineContext) GetValueAsFloat(reportName string, tableName string, valueName string) (value float64) { + v := r.GetValue(reportName, tableName, valueName) + if v == "" { + return + } + re := regexp.MustCompile(`.*?(\d*\.\d*).*`) + match := re.FindStringSubmatch(v) + var num string + if match != nil { + num = match[1] + } + value, err := strconv.ParseFloat(num, 64) + if err != nil { + log.Printf("failed to convert string to float: %s", v) + } + return +} + +// GetValueFromColumnAsFloat returns a float64 value from a table +// if column value doesn't contain a float, result will be 0 +func (r *RulesEngineContext) GetValueFromColumnAsFloat(reportName, tableName, rowValueName, rowValue, targetValueName string) (value float64) { + v := r.GetValueFromColumn(reportName, tableName, rowValueName, rowValue, targetValueName) + re := regexp.MustCompile(`.*?(\d*\.\d*).*`) + match := re.FindStringSubmatch(v) + var num string + if match != nil { + num = match[1] + } + value, err := strconv.ParseFloat(num, 64) + if err != nil { + log.Printf("failed to convert string to float: %s", v) + } + return +} + +// CompareVersions -- compares two version strings +// Note: both input versions need to be of the same format +// Supported formats: +// - single integer, ex. 10 +// - two integers, ex. 10.7 +// - three integers, ex. 10.7.33 +// - three integers and a alpha character, ex. 1.1.1m (OpenSSL version format) +// returns 0 if x == y, -1 if x < y, 1 if x > y....and -2 if error +func (r *RulesEngineContext) CompareVersions(x, y string) int { + var res []*regexp.Regexp + res = append(res, regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)([a-z])`)) + res = append(res, regexp.MustCompile(`([0-9]+)\.([0-9]+)\.([0-9]+)`)) + res = append(res, regexp.MustCompile(`([0-9]+)\.([0-9]+)`)) + res = append(res, regexp.MustCompile(`([0-9]+)`)) + + var xMatch, yMatch []string + for _, re := range res { + xMatch = re.FindStringSubmatch(x) + yMatch = re.FindStringSubmatch(y) + if len(xMatch) != len(yMatch) { + return -2 // inconsistent format + } + if xMatch != nil { + break // found a matching format + } + } + if xMatch == nil { + return -2 // unsupported format + } + for i := 1; i <= len(xMatch)-1; i++ { + if i == 4 { // special case for openssl 1.1.1e style format + if xMatch[i] < yMatch[i] { + return -1 + } else if xMatch[i] > yMatch[i] { + return 1 + } + continue + } + xVal, _ := strconv.Atoi(xMatch[i]) + yVal, _ := strconv.Atoi(yMatch[i]) + if xVal < yVal { + return -1 + } else if xVal > yVal { + return 1 + } + } + return 0 // they are the same version +} + +// CompareMicroarchitecture -- comparison of CPU micro-architectures +// returns 0 if x == y, -1 if x < y, 1 if x > y....and -2 if error +func (r *RulesEngineContext) CompareMicroarchitecture(x, y string) int { + uArchs := map[string]int{ + "HSX": 1, + "BDX": 2, + "SKX": 3, + "CLX": 4, + "ICX": 5, + "SPR": 6, + "EMR": 7, + } + var xArch, yArch int + var ok bool + if xArch, ok = uArchs[x]; !ok { + return -2 + } + if yArch, ok = uArchs[y]; !ok { + return -2 + } + if xArch < yArch { + return -1 + } + if xArch > yArch { + return 1 + } + return 0 // equal +} + +// AddInsight -- appends an insight to the table +func (r *RulesEngineContext) AddInsight(justification string, recommendation string) { + r.insightTable.AllHostValues[r.sourceIdx].Values = append( + r.insightTable.AllHostValues[r.sourceIdx].Values, + []string{recommendation, justification}, + ) +} diff --git a/src/reporter/source.go b/src/reporter/source.go new file mode 100644 index 0000000..c073a75 --- /dev/null +++ b/src/reporter/source.go @@ -0,0 +1,841 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* Reads, parses, and provides access functions to json-formatted data file produced by the collector */ + +package main + +import ( + "encoding/json" + "fmt" + "log" + "math" + "os" + "regexp" + "strconv" + "strings" +) + +type CommandData struct { + Command string `json:"command"` + ExitStatus string `json:"exitstatus"` + Label string `json:"label"` + Stderr string `json:"stderr"` + Stdout string `json:"stdout"` + SuperUser string `json:"superuser"` +} + +type Source struct { + inputFilePath string + Hostname string + ParsedData map[string]CommandData // command label string: command data structure +} + +func newSource(inputFilePath string) (source *Source) { + source = &Source{ + inputFilePath: inputFilePath, + Hostname: "", + ParsedData: map[string]CommandData{}, + } + return +} + +func (s *Source) parse() (err error) { + inputBytes, err := os.ReadFile(s.inputFilePath) + if err != nil { + return + } + var jsonData map[string][]CommandData // hostname: array of command data (this is the format of collector output file) + err = json.Unmarshal(inputBytes, &jsonData) + if err != nil { + return + } + // get the hostname + var hostname string + for hostname = range jsonData { + break + } + s.Hostname = hostname + // put the data in a map for faster lookup by command label + for _, c := range jsonData[hostname] { + s.ParsedData[c.Label] = c + } + return +} + +func (s *Source) getHostname() (hostname string) { + return s.Hostname +} + +// return command output or empty string if no match +func (s *Source) getCommandOutput(cmdLabel string) (output string) { + if c, ok := s.ParsedData[cmdLabel]; ok { + output = c.Stdout + } + return +} + +// return array of lines from command output, or empty array if no match or all empty lines +func (s *Source) getCommandOutputLines(cmdLabel string) (lines []string) { + cmdout := s.getCommandOutput(cmdLabel) + dirtyLines := strings.Split(cmdout, "\n") + for _, dirtyLine := range dirtyLines { + line := strings.TrimSpace(dirtyLine) + if line != "" { + lines = append(lines, line) + } + } + return +} + +// get the first line from command output, or empty string +func (s *Source) getCommandOutputLine(cmdLabel string) (line string) { + lines := s.getCommandOutputLines(cmdLabel) + if len(lines) > 0 { + line = lines[0] + } + return +} + +func (s *Source) getCommandOutputSections(cmdLabel string) (sections map[string]string) { + reHeader := regexp.MustCompile(`^##########\s+(.+)\s+##########$`) + sections = make(map[string]string, 0) + var header string + var sectionLines []string + lines := s.getCommandOutputLines(cmdLabel) + lineCount := len(lines) + for idx, line := range lines { + match := reHeader.FindStringSubmatch(line) + if match != nil { + if header != "" { + sections[header] = strings.Join(sectionLines, "\n") + sectionLines = []string{} + } + header = match[1] + if _, ok := sections[header]; ok { + log.Panic("can't have same header twice") + } + continue + } + sectionLines = append(sectionLines, line) + if idx == lineCount-1 { + sections[header] = strings.Join(sectionLines, "\n") + } + } + return +} + +// getCommandOutputLabeled -- some collector commands collect output from more than one +// command. We separate that output data with a header that allows us to more easily +// parse it. This function loads that data into a map where the key is extracted +// from the header and the value is the output data itself +// note: only output from those sections whose header matches the provided labelPattern +func (s *Source) getCommandOutputLabeled(cmdLabel string, labelPattern string) (sections map[string]string) { + sections = make(map[string]string, 0) + allSections := s.getCommandOutputSections(cmdLabel) + reLabel := regexp.MustCompile(labelPattern) + for header, content := range allSections { + if reLabel.FindString(header) != "" { + sections[header] = content + } + } + return +} + +// return first match or empty string if no match +func (s *Source) valFromRegexSubmatch(cmdLabel string, regex string) (val string) { + re := regexp.MustCompile(regex) + for _, line := range s.getCommandOutputLines(cmdLabel) { + match := re.FindStringSubmatch(line) + if len(match) > 1 { + val = match[1] + return + } + } + return +} + +// return first match or empty string if no match +func (s *Source) valFromOutputRegexSubmatch(cmdLabel string, regex string) (val string) { + re := regexp.MustCompile(regex) + cmdout := s.getCommandOutput(cmdLabel) + match := re.FindStringSubmatch(cmdout) + if match != nil { + val = match[1] + return + } + return +} + +// return all matches for first capture group in regex +func (s *Source) valsFromRegexSubmatch(cmdLabel string, regex string) (vals []string) { + re := regexp.MustCompile(regex) + for _, line := range s.getCommandOutputLines(cmdLabel) { + match := re.FindStringSubmatch(line) + if len(match) > 1 { + vals = append(vals, match[1]) + } + } + return +} + +// return all matches for all capture groups in regex +func (s *Source) valsArrayFromRegexSubmatch(cmdLabel string, regex string) (vals [][]string) { + re := regexp.MustCompile(regex) + for _, line := range s.getCommandOutputLines(cmdLabel) { + match := re.FindStringSubmatch(line) + if len(match) > 1 { + vals = append(vals, match[1:]) + } + } + return +} + +// return all lines of dmi type specified +func (s *Source) getDmiDecodeLines(dmiType string) (lines []string) { + start := false + for _, line := range s.getCommandOutputLines("dmidecode") { + if start && strings.HasPrefix(line, "Handle ") { + start = false + } + if strings.Contains(line, "DMI type "+dmiType+",") { + start = true + } + if start { + lines = append(lines, line) + } + } + return +} + +// return single value from first regex submatch or empty string +func (s *Source) valFromDmiDecodeRegexSubmatch(dmiType string, regex string) (val string) { + re := regexp.MustCompile(regex) + for _, line := range s.getDmiDecodeLines(dmiType) { + match := re.FindStringSubmatch(line) + if len(match) > 1 { + val = match[1] + break + } + } + return +} + +// finds first match in dmiType section of DMI Decode output +// return array of values from regex submatches or zero-length array if no match +func (s *Source) valsFromDmiDecodeRegexSubmatch(dmiType string, regex string) (vals []string) { + re := regexp.MustCompile(regex) + for _, line := range s.getDmiDecodeLines(dmiType) { + match := re.FindStringSubmatch(line) + if match == nil { + continue + } + for i := 1; i < len(match); i++ { + vals = append(vals, match[i]) + } + break + } + return +} + +func (s *Source) getDmiDecodeEntries(dmiType string) (entries [][]string) { + output := s.getCommandOutput("dmidecode") + lines := strings.Split(output, "\n") + var entry []string + typeMatch := false + for _, line := range lines { + if strings.HasPrefix(line, "Handle ") { + if strings.Contains(line, "DMI type "+dmiType+",") { + // type match + typeMatch = true + entry = []string{} + } else { + // not a type match + typeMatch = false + } + } + if !typeMatch { + continue + } + if line == "" { + // end of type match entry + entries = append(entries, entry) + } else { + // a line in the entry + entry = append(entry, line) + } + } + return +} + +// return table of matches +func (s *Source) valsArrayFromDmiDecodeRegexSubmatch(dmiType string, regexes ...string) (vals [][]string) { + var res []*regexp.Regexp + for _, r := range regexes { + re := regexp.MustCompile(r) + res = append(res, re) + } + for _, entry := range s.getDmiDecodeEntries(dmiType) { + row := make([]string, len(res)) + for _, line := range entry { + for i, re := range res { + match := re.FindStringSubmatch(strings.TrimSpace(line)) + if len(match) > 1 { + row[i] = match[1] + } + } + } + vals = append(vals, row) + } + return +} + +// return all PCI Devices of specified class +func (s *Source) getPCIDevices(class string) (devices []map[string]string) { + device := make(map[string]string) + cmdout := s.getCommandOutput("lspci -vmm") + re := regexp.MustCompile(`^(\w+):\s+(.*)$`) + for _, line := range strings.Split(cmdout, "\n") { + if line == "" { // end of device + if devClass, ok := device["Class"]; ok { + if devClass == class { + devices = append(devices, device) + } + } + device = make(map[string]string) + continue + } + match := re.FindStringSubmatch(line) + if len(match) > 0 { + key := match[1] + value := match[2] + device[key] = value + } + } + return +} + +// return all lines of profile that matches profileRegex +func (s *Source) getProfileLines(profileRegex string) (lines []string) { + re, err := regexp.Compile(profileRegex) + if err != nil { + log.Panicf("regex %s failed to compile", profileRegex) + } + labeledCmdout := s.getCommandOutputSections("profile") + for label, cmdout := range labeledCmdout { + if re.FindString(label) != "" { + lines = strings.Split(cmdout, "\n") + return + } + } + return +} + +func (s *Source) getOperatingSystem() (os string) { + os = s.valFromRegexSubmatch("/etc/*-release", `^PRETTY_NAME=\"(.+?)\"`) + centos := s.valFromRegexSubmatch("/etc/*-release", `^(CentOS Linux release .*)`) + if centos != "" { + os = centos + } + return +} + +func (s *Source) getBaseFrequency() (val string) { + /* add Base Frequency + 1st option) /sys/devices/system/cpu/cpu0/cpufreq/base_frequency + 2nd option) from dmidecode "Current Speed" + 3nd option) parse it from the model name + */ + cmdout := s.getCommandOutputLine("base frequency") + if cmdout != "" { + freqf, err := strconv.ParseFloat(cmdout, 64) + if err == nil { + freqf = freqf / 1000000 + val = fmt.Sprintf("%.1fGHz", freqf) + } + } + if val == "" { + currentSpeedVals := s.valsFromDmiDecodeRegexSubmatch("4", `Current Speed:\s(\d+)\s(\w+)`) + if len(currentSpeedVals) > 0 { + num, err := strconv.ParseFloat(currentSpeedVals[0], 64) + if err == nil { + unit := currentSpeedVals[1] + if unit == "MHz" { + num = num / 1000 + unit = "GHz" + } + val = fmt.Sprintf("%.1f%s", num, unit) + } + } + } + if val == "" { + modelName := s.valFromRegexSubmatch("lscpu", `^[Mm]odel name.*:\s*(.+?)$`) + // the frequency (if included) is at the end of the model name + tokens := strings.Split(modelName, " ") + if len(tokens) > 0 { + lastToken := tokens[len(tokens)-1] + if len(lastToken) > 0 && lastToken[len(lastToken)-1] == 'z' { + val = lastToken + } + } + } + return +} + +func (s *Source) getMaxFrequency() (val string) { + /* get max frequency + * 1st option) /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq + * 2nd option) from MSR + * 3rd option) from dmidecode "Max Speed" + */ + cmdout := s.getCommandOutputLine("maximum frequency") + if cmdout != "" { + freqf, err := strconv.ParseFloat(cmdout, 64) + if err == nil { + freqf = freqf / 1000000 + val = fmt.Sprintf("%.1fGHz", freqf) + } + } + if val == "" { + countFreqs, err := s.getSpecCountFrequencies() + // the first entry is the max single-core frequency + if err == nil && len(countFreqs) > 0 && len(countFreqs[0]) > 1 { + val = countFreqs[0][1] + } + } + if val == "" { + val = s.valFromDmiDecodeRegexSubmatch("4", `Max Speed:\s(.*)`) + } + return +} + +func (s *Source) getAllCoreMaxFrequency() (val string) { + countFreqs, err := s.getSpecCountFrequencies() + // the last entry is the max all-core frequency + if err == nil && len(countFreqs) > 0 && len(countFreqs[len(countFreqs)-1]) > 1 { + val = countFreqs[len(countFreqs)-1][1] + "GHz" + } + return +} + +func (s *Source) getNUMACPUList() (val string) { + nodeCPUs := s.valsFromRegexSubmatch("lscpu", `^NUMA node[0-9] CPU\(.*:\s*(.+?)$`) + val = strings.Join(nodeCPUs, " :: ") + return +} + +func (s *Source) getUncoreMaxFrequency() (val string) { + hex := s.getCommandOutputLine("uncore max frequency") + if hex != "" && hex != "0" { + parsed, err := strconv.ParseInt(hex, 16, 64) + if err == nil { + val = fmt.Sprintf("%.1fGhz", float64(parsed)/10) + } + } + return +} + +func (s *Source) getUncoreMinFrequency() (val string) { + hex := s.getCommandOutputLine("uncore min frequency") + if hex != "" && hex != "0" { + parsed, err := strconv.ParseInt(hex, 16, 64) + if err == nil { + val = fmt.Sprintf("%.1fGHz", float64(parsed)/10) + } + } + return +} + +func (s *Source) getCHACount() (val string) { + options := []string{"uncore client cha count", "uncore cha count", "uncore cha count spr"} + for _, option := range options { + hexCount := s.getCommandOutputLine(option) + if hexCount != "" && hexCount != "0" { + count, err := strconv.ParseInt(hexCount, 16, 64) + if err == nil { + val = fmt.Sprintf("%d", count) + break + } + } + } + return +} + +func (s *Source) getPrefetchers() (val string) { + prefetchers := s.valFromRegexSubmatch("rdmsr 0x1a4", `^([0-9a-fA-F]+)`) + if prefetchers != "" { + prefetcherInt, err := strconv.ParseInt(prefetchers, 16, 64) + if err == nil { + // prefetchers are enabled when associated bit is 0 + // 1: "L2 HW" + // 2: "L2 Adj." + // 4: "DCU HW" + // 8: "DCU IP" + var prefList []string + for i, pref := range []string{"L2 HW", "L2 Adj.", "DCU HW", "DCU IP"} { + bitMask := int64(math.Pow(2, float64(i))) + // if bit is zero + if bitMask&prefetcherInt == 0 { + prefList = append(prefList, pref) + } + } + if len(prefList) > 0 { + val = strings.Join(prefList, ", ") + } else { + val = "None" + } + } + } + return +} + +/* +.................... bit default +"BI to IFU", 2 0 +"EnableDBPForF", 3 0 +"NoHmlessPref", 14 0 +"DisFBThreadSlicing", 15 1 +"DISABLE_FASTGO", 27 0 +"SpecI2MEn", 30 1 +"disable_llpref", 42 0 +"DPT_DISABLE", 45 0 +*/ +func (s *Source) getFeatures() (vals []string) { + features := s.valFromRegexSubmatch("rdmsr 0x6d", `^([0-9a-fA-F]+)`) + if features != "" { + featureInt, err := strconv.ParseInt(features, 16, 64) + if err == nil { + for _, bit := range []int{2, 3, 14, 15, 27, 30, 42, 45} { + bitMask := int64(math.Pow(2, float64(bit))) + vals = append(vals, fmt.Sprintf("%d", bitMask&featureInt>>bit)) + } + } + } + return +} + +func (s *Source) getPPINs() (val string) { + ppins := s.getCommandOutputLines("rdmsr 0x4f") + uniquePpins := []string{} + for _, ppin := range ppins { + found := false + for _, p := range uniquePpins { + if string(p) == ppin { + found = true + break + } + } + if !found && ppin != "" { + uniquePpins = append(uniquePpins, ppin) + } + } + val = strings.Join(uniquePpins, ",") + return +} + +func (s *Source) getHyperthreading() (val string) { + // lscpu on Alder Lake (hybrid cores) reports one thread per core even when hyper-threading is enabled, so + // use this approach to detect hyperthreading... + numCPUs, err1 := strconv.Atoi(s.valFromRegexSubmatch("lscpu", `^CPU\(.*:\s*(.+?)$`)) // logical CPUs + numSockets, err2 := strconv.Atoi(s.valFromRegexSubmatch("lscpu", `^Socket\(.*:\s*(.+?)$`)) + numCores, err3 := strconv.Atoi(s.valFromRegexSubmatch("lscpu", `^Core\(.*:\s*(.+?)$`)) // physical cores + if err1 != nil || err2 != nil || err3 != nil { + return + } + if numCPUs > numCores*numSockets { + val = "Enabled" + } else { + val = "Disabled" + } + return +} + +func convertMsrToDecimals(msr string) (decVals []int64, err error) { + re := regexp.MustCompile(`[0-9a-fA-F][0-9a-fA-F]`) + hexVals := re.FindAll([]byte(msr), -1) + if hexVals == nil { + err = fmt.Errorf("no hex values found in msr") + return + } + decVals = make([]int64, len(hexVals)) + decValsIndex := len(decVals) - 1 + for _, hexVal := range hexVals { + var decVal int64 + decVal, err = strconv.ParseInt(string(hexVal), 16, 64) + if err != nil { + return + } + decVals[decValsIndex] = decVal + decValsIndex-- + } + return +} + +func (s *Source) getSpecCountFrequencies() (countFreqs [][]string, err error) { + hexCounts := s.valFromRegexSubmatch("rdmsr 0x1ae", `^([0-9a-fA-F]+)`) + hexFreqs := s.valFromRegexSubmatch("rdmsr 0x1ad", `^([0-9a-fA-F]+)`) + if hexCounts != "" && hexFreqs != "" { + var decCounts, decFreqs []int64 + decCounts, err = convertMsrToDecimals(hexCounts) + if err != nil { + return + } + decFreqs, err = convertMsrToDecimals(hexFreqs) + if err != nil { + return + } + if len(decCounts) != 8 || len(decFreqs) != 8 { + err = fmt.Errorf("unexpected number of core counts or frequencies") + return + } + for i, decCount := range decCounts { + countFreqs = append(countFreqs, []string{fmt.Sprintf("%d", decCount), fmt.Sprintf("%.1f", float64(decFreqs[i])/10.0)}) + } + } + return +} + +func (s *Source) getMemoryNUMABalancing() (val string) { + out := s.getCommandOutputLine("automatic numa balancing") + if out == "1" { + val = "Enabled" + } else if out == "0" { + val = "Disabled" + } + return +} + +func geoMean(vals []float64) (val float64) { + m := 0.0 + for i, x := range vals { + lx := math.Log(x) + m += (lx - m) / float64(i+1) + } + val = math.Exp(m) + return +} + +func (s *Source) getCPUSpeed() (val string) { + var vals []float64 + for _, line := range s.getCommandOutputLines("stress-ng cpu methods") { + tokens := strings.Split(line, " ") + if len(tokens) == 2 { + fv, err := strconv.ParseFloat(tokens[1], 64) + if err != nil { + continue + } + vals = append(vals, fv) + } + } + if len(vals) > 0 { + geoMean := geoMean(vals) + val = fmt.Sprintf("%.0f ops/s", geoMean) + } + return +} + +func (s *Source) getTurbo() (singleCoreTurbo, allCoreTurbo, turboTDP string) { + var turbos []string + var tdps []string + var headers []string + var idxTurbo, idxTdp int + var turbo, tdp string + re := regexp.MustCompile(`\s+`) // whitespace + for _, line := range s.getCommandOutputLines("CPU Turbo Test") { + if strings.Contains(line, "stress-ng") { + if strings.Contains(line, "completed") { + if turbo != "" && tdp != "" { + turbos = append(turbos, turbo) + tdps = append(tdps, tdp) + } + } + continue + } + if strings.Contains(line, "Package") || strings.Contains(line, "CPU") || strings.Contains(line, "Core") || strings.Contains(line, "Node") { + headers = re.Split(line, -1) // split by whitespace + for i, h := range headers { + if h == "Bzy_MHz" { + idxTurbo = i + } else if h == "PkgWatt" { + idxTdp = i + } + } + continue + } + tokens := re.Split(line, -1) + turbo = tokens[idxTurbo] + tdp = tokens[idxTdp] + } + if len(turbos) == 2 { + singleCoreTurbo = turbos[0] + " MHz" + allCoreTurbo = turbos[1] + " MHz" + } + if len(tdps) == 2 { + turboTDP = tdps[1] + " Watts" + } + return +} + +func (s *Source) getIdleTDP() (val string) { + cmdout := s.getCommandOutputLine("CPU Idle") + if cmdout != "" && cmdout != "0.00" { + val = cmdout + " Watts" + } + return +} + +func (s *Source) getPeakBandwidth(table *Table) (val string) { + for _, hv := range table.AllHostValues { + if hv.Name == s.getHostname() { + var peak float64 + for _, values := range hv.Values { + if len(values) == 2 { + bandwidth := values[1] + bw, err := strconv.ParseFloat(bandwidth, 64) + if err != nil { + continue + } + peak = math.Max(peak, bw) + } + } + if peak > 0 { + val = fmt.Sprintf("%.1f GB/s", peak) + } + break + } + } + return +} + +func (s *Source) getMinLatency(table *Table) (val string) { + for _, hv := range table.AllHostValues { + if hv.Name == s.getHostname() { + var min float64 = math.MaxFloat64 + for _, values := range hv.Values { + if len(values) == 2 { + latency := values[0] + l, err := strconv.ParseFloat(latency, 64) + if err != nil { + continue + } + min = math.Min(l, min) + } + } + if min < math.MaxFloat64 { + val = fmt.Sprintf("%.1f ns", min) + } + break + } + } + return +} + +func (s *Source) getDiskSpeed() (val string) { + for _, line := range s.getCommandOutputLines("fio") { + if strings.Contains(line, "read: IOPS") { + re := regexp.MustCompile(`[=,]`) + tokens := re.Split(line, 3) + val = tokens[1] + " iops" + return + } + } + return +} + +func (s *Source) getPowerPerfPolicy() (val string) { + msrHex := s.getCommandOutputLine("rdmsr 0x1b0") + msr, err := strconv.ParseInt(msrHex, 16, 0) + if err == nil { + if msr < 7 { + val = "Performance" + } else if msr > 10 { + val = "Power" + } else { + val = "Balanced" + } + } + return +} + +func (s *Source) getTDP() (val string) { + msrHex := s.getCommandOutputLine("rdmsr 0x610") + msr, err := strconv.ParseInt(msrHex, 16, 0) + if err == nil && msr != 0 { + val = fmt.Sprint(msr/8) + " watts" + } + return +} + +// get the FwRev for the given device from hdparm +func (s *Source) getDiskFwRev(device string) (fwRev string) { + reFwRev := regexp.MustCompile(`FwRev=(\w+)`) + reDev := regexp.MustCompile(fmt.Sprintf(`/dev/%s:`, device)) + devFound := false + for _, line := range s.getCommandOutputLines("hdparm") { + if !devFound { + if reDev.FindString(line) != "" { + devFound = true + continue + } + } else { + match := reFwRev.FindStringSubmatch(line) + if match != nil { + fwRev = match[1] + break + } + } + } + return +} + +// getJavaFolded -- retrieves folded code path frequency data for java processes +func (s *Source) getJavaFolded() (folded string) { + asyncProfilerOutput := s.getCommandOutputLabeled("analyze", `async-profiler \d+`) + javaFolded := make(map[string]string) + re := regexp.MustCompile(`^async-profiler (\d+) (.*)$`) + for header, stacks := range asyncProfilerOutput { + if stacks == "" { + log.Printf("no stacks for: %s", header) + continue + } + match := re.FindStringSubmatch(header) + if match == nil { + log.Printf("header didn't match regex: %s", header) + continue + } + pid := match[1] + processName := match[2] + _, ok := javaFolded[processName] + if processName == "" { + processName = "java (" + pid + ")" + } else if ok { + processName = processName + " (" + pid + ")" + } + javaFolded[processName] = stacks + } + folded, err := mergeJavaFolded(javaFolded) + if err != nil { + log.Printf("%v", err) + } + return +} + +// getSystemFolded -- retrieves folded code path frequency data, i.e., merged output +// from fp and dwarf perf +func (s *Source) getSystemFolded() (folded string) { + perfSections := s.getCommandOutputLabeled("analyze", `perf_`) + var dwarfFolded, fpFolded string + for header, content := range perfSections { + if header == "pwerf_dwarf" { + dwarfFolded = content + } else if header == "perf_fp" { + fpFolded = content + } + } + folded, err := mergeSystemFolded(fpFolded, dwarfFolded) + if err != nil { + log.Printf("error merging folded stacks: %v", err) + } + return +} diff --git a/src/reporter/table.go b/src/reporter/table.go new file mode 100644 index 0000000..ce03bcf --- /dev/null +++ b/src/reporter/table.go @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2023 Intel Corporation + * SPDX-License-Identifier: MIT + */ +/* reports are made up of tables, the Table data structure and some helpful functions are defined here */ + +package main + +import "fmt" + +// HostValues ... a single host's table values +type HostValues struct { + Name string // host's name + ValueNames []string + Values [][]string //[record][field] +} + +type TableCategory int + +const ( + System TableCategory = iota + Software + CPU + Power + Memory + Network + Storage + GPU + CXL + Security + Status + NoCategory +) + +var TableCategoryLabels = []string{"System", "Software", "CPU", "Power", "Memory", "Network", "Storage", "GPU", "CXL", "Security", "Status"} + +// Table ... all hosts +type Table struct { + Name string // table's name + Category TableCategory + AllHostValues []HostValues +} + +func (t *Table) getValue(sourceIdx int, valueName string) (value string, err error) { + valueIndex, err := findValueIndex(&t.AllHostValues[sourceIdx], valueName) + if err != nil { + return + } + if len(t.AllHostValues[sourceIdx].Values) == 0 { + err = fmt.Errorf("no values in table for this host") + return + } + value = t.AllHostValues[sourceIdx].Values[0][valueIndex] + return +} + +// findValueIndex returns the index of the specified value name or error +func findValueIndex(srcHv *HostValues, valueName string) (index int, err error) { + for i, valName := range srcHv.ValueNames { + if valName == valueName { + index = i + return + } + } + err = fmt.Errorf("value name not found: %s", valueName) + return +} + +// copy specified values from one table to another +func copyValues(src *Table, dst *Table, valueNames []string) { + for _, srcHv := range src.AllHostValues { + dstHv := HostValues{ + Name: "", + ValueNames: valueNames, + Values: [][]string{}, + } + var valueIndices []int + for _, valueName := range valueNames { + idx, err := findValueIndex(&srcHv, valueName) + if err == nil { + valueIndices = append(valueIndices, idx) + } + } + for srcRecordIndex, srcRecord := range srcHv.Values { + dstHv.Values = append(dstHv.Values, []string{}) + for valueIndex := range valueNames { + dstHv.Values[srcRecordIndex] = append(dstHv.Values[srcRecordIndex], srcRecord[valueIndices[valueIndex]]) + } + } + dst.AllHostValues = append(dst.AllHostValues, dstHv) + } +} diff --git a/staticcheck.conf b/staticcheck.conf new file mode 100644 index 0000000..06eb720 --- /dev/null +++ b/staticcheck.conf @@ -0,0 +1,4 @@ +checks = ["all"] +initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DIMM", "DNS", "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", "IP", "IRQ", "JSON", "QPS", "RAM", "RPC", "SLA", "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", "UDP", "UI", "GID", "UID", "UUID", "URI", "URL", "UTF8", "VM", "XML", "XMPP", "XSRF", "XSS", "SIP", "RTP", "AMQP", "DB", "TS"] +dot_import_whitelist = ["github.com/mmcloughlin/avo/build", "github.com/mmcloughlin/avo/operand", "github.com/mmcloughlin/avo/reg"] +http_status_code_whitelist = ["200", "400", "404", "500"] \ No newline at end of file diff --git a/test/functional b/test/functional new file mode 100755 index 0000000..630c299 --- /dev/null +++ b/test/functional @@ -0,0 +1,103 @@ +#!/usr/bin/env bash + +app="svr-info/svr-info" +testout="./svr-info/testout" +testout_base=$( basename $testout ) + +failures=0 + +# helper function: test_output_files_exist +# arguments: +# $1 - test name +# $2 - arguments to pass to the app +# $3 - the file names expected to exist +test_output_files_exist () { + mkdir -p $testout + # shellcheck disable=SC2086 + output=$( $app -output $testout $2 2>&1 ) + missing=false + for filename in $3; do + if [ ! -f "$testout/$filename" ]; then + echo "> missing expected output file: $filename" + missing=true + fi + done + if ! $missing; then + echo "PASS: test_output_files_exist - $1" + else + echo "FAIL: test_output_files_exist - $1" + failures=$((failures+1)) + fi + rm -rf $testout +} + +# helper function: test_stdout +# arguments +# $1 - test name +# $2 - arguments to pass to the app +# $3 - regex describing output to expect +test_stdout () { + mkdir -p $testout + # shellcheck disable=SC2086 + output=$( $app -output $testout $2 2>&1 ) + if [[ $output =~ $3 ]]; then + echo "PASS: test_stdout - $1" + else + echo "> expected: $3" + echo "> got: $output" + echo "FAIL: test_stdout - $1" + failures=$((failures+1)) + fi + rm -rf $testout +} + +test_stdout "local w/ -h" "-h" "^usage:" +test_stdout "local w/ -v" "-v" "^(0|[1-9]\d*)\.(0|[1-9]\d*)\.(0|[1-9]\d*).*$" +test_stdout "local w/ no args" "" "Reports:" + +test_output_files_exist "local w/ no args" "" "$HOSTNAME.html $HOSTNAME.json $HOSTNAME.xlsx $testout_base.tgz" +test_output_files_exist "local w/ -format all" "-format all" "$HOSTNAME.html $HOSTNAME.json $HOSTNAME.xlsx $HOSTNAME.txt $testout_base.tgz" +test_output_files_exist "local w/ -format html" "-format html" "$HOSTNAME.html $testout_base.tgz" +test_output_files_exist "local w/ -format json" "-format json" "$HOSTNAME.json $testout_base.tgz" +test_output_files_exist "local w/ -format xlsx" "-format xlsx" "$HOSTNAME.xlsx $testout_base.tgz" +test_output_files_exist "local w/ -format txt" "-format txt" "$HOSTNAME.txt $testout_base.tgz" +test_output_files_exist "local w/ -format html,json,xlsx,txt" "-format html,json,xlsx,txt" "$HOSTNAME.html $HOSTNAME.json $HOSTNAME.xlsx $HOSTNAME.txt $testout_base.tgz" + +#### test for valid JSON in json report +mkdir -p $testout +output=$( $app -output $testout -format json 2>&1 ) +if jq '.' $testout/"$HOSTNAME".json > /dev/null 2>&1 ; then + echo "PASS: json report is valid JSON" +else + echo "FAIL: json report is NOT valid JSON" + failures=$((failures+1)) +fi +# these sections names should remain consistent in the JSON report +alphaSections=("Analyze" "Brief" "Configuration" "Performance" "Profile" "Recommendations") +numKeys=$( jq 'length' $testout/"$HOSTNAME".json ) +if [[ $numKeys == "${#alphaSections[@]}" ]]; then + echo "PASS: json report has $numKeys sections" +else + echo "FAIL: json report has $numKeys sections but should have ${#alphaSections[@]} sections" + failures=$((failures+1)) +fi +for index in "${!alphaSections[@]}"; do + key=$( jq --raw-output keys["$index"] $testout/"$HOSTNAME".json ) + if [[ $key == "${alphaSections[$index]}" ]]; then + echo "PASS: found the ${alphaSections[$index]} section in the JSON report" + else + echo "FAIL: did not find the ${alphaSections[$index]} section in the JSON report" + failures=$((failures+1)) + fi +done +rm -rf $testout +#### + + +if [ $failures -gt 0 ]; then + echo "FAILED $failures TESTS" + exit 1 +else + echo "PASSED ALL TESTS" + exit 0 +fi \ No newline at end of file diff --git a/third-party-programs.txt b/third-party-programs.txt new file mode 100644 index 0000000..eea3ab6 --- /dev/null +++ b/third-party-programs.txt @@ -0,0 +1,500 @@ +Intel® System Health Inspector Third Party Programs File +This file is the "third-party-programs.txt" file specified in the associated Intel end user license agreement for the Intel software you are licensing. +Third party programs and their corresponding required notices and/or license terms are listed below. +------------------------------------------------------------- +DMidecode +Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +------------------------------------------------------------- +stress-ng +Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +------------------------------------------------------------- +sysstat +License: GPLv2+ +Copyright: + * (C) 1998-2022 by Sebastien GODARD (sysstat orange.fr) + * + *************************************************************************** + * This program is free software; you can redistribute it and/or modify it * + * under the terms of the GNU General Public License as published by the * + * Free Software Foundation; either version 2 of the License, or (at your * + * option) any later version. * + * * + * This program is distributed in the hope that it will be useful, but * + * WITHOUT ANY WARRANTY; without the implied warranty of MERCHANTABILITY * + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * + * for more details. * + * * + * You should have received a copy of the GNU General Public License along * + * with this program; if not, write to the Free Software Foundation, Inc., * + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA * + *************************************************************************** +------------------------------------------------------------- +cpuid +** Copyright 2003,2004,2005,2006,2010,2011,2012,2013,2014,2015,2016,2017,2018, +** 2020 by Todd Allen. +Linux Kernel (turbostat) +Copyright (c) 2013 Intel Corporation. + * Len Brown +------------------------------------------------------------- +ethtool +* Copyright (C) 1998 David S. Miller (davem@dm.cobaltmicro.com) + * Portions Copyright 2001 Sun Microsystems + * Kernel 2.4 update Copyright 2001 Jeff Garzik + * Wake-on-LAN,natsemi,misc support by Tim Hockin + * Portions Copyright 2002 Intel + * Portions Copyright (C) Sun Microsystems 2008 + * do_test support by Eli Kupermann + * ETHTOOL_PHYS_ID support by Chris Leech + * e1000 support by Scott Feldman + * e100 support by Wen Tao + * ixgb support by Nicholas Nunley + * amd8111e support by Reeja John + * long arguments by Andi Kleen. + * SMSC LAN911x support by Steve Glendinning + * Rx Network Flow Control configuration support + * Various features by Ben Hutchings ; + * Copyright 2009, 2010 Solarflare Communications + * MDI-X set support by Jesse Brandeburg + * Copyright 2012 Intel Corporation + * vmxnet3 support by Shrikrishna Khare + * Various features by Ben Hutchings ; + * Copyright 2008-2010, 2013-2016 Ben Hutchings + * QSFP+/QSFP28 DOM support by Vidya Sagar Ravipati +------------------------------------------------------------- +fio +Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. +------------------------------------------------------------- +lshw +Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + +GNU GENERAL PUBLIC LICENSE +Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: + +a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. + +b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. + +c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. + +3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: + +a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + +b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + +c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. + +This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS +------------------------------------------------------------- +sshpass +Copyright – not available/not listed +shpass was written by Shachar Shemesh for Lingnu Open Source Consulting Ltd. + +GNU General Public License v2.0 or later + +GNU GENERAL PUBLIC LICENSE +Version 2, June 1991 + +Copyright (C) 1989, 1991 Free Software Foundation, Inc. +51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + +Everyone is permitted to copy and distribute verbatim copies +of this license document, but changing it is not allowed. + +Preamble + +The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Library General Public License instead.) You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. + +To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. + +For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. + +We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. + +Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. + +Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. + +The precise terms and conditions for copying, distribution and modification follow. + +TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + +0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. + +1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. + +You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. + +2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: + +a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. + +b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. + +c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. + +3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: + +a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + +b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, + +c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. + +If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. + +4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. + +5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. + +6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. + +7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. + +It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. + +This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. + +8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. + +9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. + +Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. + +10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. + +NO WARRANTY + +11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + +12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +END OF TERMS AND CONDITIONS + +------------------------------------------------------------- +spectre-meltdown-checker +Copyright not available/not identified +Preamble + +The GNU General Public License is a free, copyleft license for software and other kinds of works. +The licenses for most software and other practical works are designed to take away your freedom to share and change the works. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change all versions of a program--to make sure it remains free software for all its users. We, the Free Software Foundation, use the GNU General Public License for most of our software; it applies also to any other work released this way by its authors. You can apply it to your programs, too. + +When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for them if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs, and that you know you can do these things. +To protect your rights, we need to prevent others from denying you these rights or asking you to surrender the rights. Therefore, you have certain responsibilities if you distribute copies of the software, or if you modify it: responsibilities to respect the freedom of others. +For example, if you distribute copies of such a program, whether gratis or for a fee, you must pass on to the recipients the same freedoms that you received. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. +Developers that use the GNU GPL protect your rights with two steps: (1) assert copyright on the software, and (2) offer you this License giving you legal permission to copy, distribute and/or modify it. +For the developers' and authors' protection, the GPL clearly explains that there is no warranty for this free software. For both users' and authors' sake, the GPL requires that modified versions be marked as changed, so that their problems will not be attributed erroneously to authors of previous versions. +Some devices are designed to deny users access to install or run modified versions of the software inside them, although the manufacturer can do so. This is fundamentally incompatible with the aim of protecting users' freedom to change the software. The systematic pattern of such abuse occurs in the area of products for individuals to use, which is precisely where it is most unacceptable. Therefore, we have designed this version of the GPL to prohibit the practice for those products. If such problems arise substantially in other domains, we stand ready to extend this provision to those domains in future versions of the GPL, as needed to protect the freedom of users. +Finally, every program is threatened constantly by software patents. States should not allow patents to restrict development and use of software on general-purpose computers, but in those that do, we wish to avoid the special danger that patents applied to a free program could make it effectively proprietary. To prevent this, the GPL assures that patents cannot be used to render the program non-free. +The precise terms and conditions for copying, distribution and modification follow. +TERMS AND CONDITIONS +0. Definitions. +“This License” refers to version 3 of the GNU General Public License. + +“Copyright” also means copyright-like laws that apply to other kinds of works, such as semiconductor masks. +“The Program” refers to any copyrightable work licensed under this License. Each licensee is addressed as “you”. “Licensees” and “recipients” may be individuals or organizations. +To “modify” a work means to copy from or adapt all or part of the work in a fashion requiring copyright permission, other than the making of an exact copy. The resulting work is called a “modified version” of the earlier work or a work “based on” the earlier work. +A “covered work” means either the unmodified Program or a work based on the Program. +To “propagate” a work means to do anything with it that, without permission, would make you directly or secondarily liable for infringement under applicable copyright law, except executing it on a computer or modifying a private copy. Propagation includes copying, distribution (with or without modification), making available to the public, and in some countries other activities as well. +To “convey” a work means any kind of propagation that enables other parties to make or receive copies. Mere interaction with a user through a computer network, with no transfer of a copy, is not conveying. +An interactive user interface displays “Appropriate Legal Notices” to the extent that it includes a convenient and prominently visible feature that (1) displays an appropriate copyright notice, and (2) tells the user that there is no warranty for the work (except to the extent that warranties are provided), that licensees may convey the work under this License, and how to view a copy of this License. If the interface presents a list of user commands or options, such as a menu, a prominent item in the list meets this criterion. +1. Source Code. +The “source code” for a work means the preferred form of the work for making modifications to it. “Object code” means any non-source form of a work. +A “Standard Interface” means an interface that either is an official standard defined by a recognized standards body, or, in the case of interfaces specified for a particular programming language, one that is widely used among developers working in that language. +The “System Libraries” of an executable work include anything, other than the work as a whole, that (a) is included in the normal form of packaging a Major Component, but which is not part of that Major Component, and (b) serves only to enable use of the work with that Major Component, or to implement a Standard Interface for which an implementation is available to the public in source code form. A “Major Component”, in this context, means a major essential component (kernel, window system, and so on) of the specific operating system (if any) on which the executable work runs, or a compiler used to produce the work, or an object code interpreter used to run it. +The “Corresponding Source” for a work in object code form means all the source code needed to generate, install, and (for an executable work) run the object code and to modify the work, including scripts to control those activities. However, it does not include the work's System Libraries, or general-purpose tools or generally available free programs which are used unmodified in performing those activities but which are not part of the work. For example, Corresponding Source includes interface definition files associated with source files for the work, and the source code for shared libraries and dynamically linked subprograms that the work is specifically designed to require, such as by intimate data communication or control flow between those subprograms and other parts of the work. +The Corresponding Source need not include anything that users can regenerate automatically from other parts of the Corresponding Source. +The Corresponding Source for a work in source code form is that same work. +2. Basic Permissions. +All rights granted under this License are granted for the term of copyright on the Program, and are irrevocable provided the stated conditions are met. This License explicitly affirms your unlimited permission to run the unmodified Program. The output from running a covered work is covered by this License only if the output, given its content, constitutes a covered work. This License acknowledges your rights of fair use or other equivalent, as provided by copyright law. +You may make, run and propagate covered works that you do not convey, without conditions so long as your license otherwise remains in force. You may convey covered works to others for the sole purpose of having them make modifications exclusively for you, or provide you with facilities for running those works, provided that you comply with the terms of this License in conveying all material for which you do not control copyright. Those thus making or running the covered works for you must do so exclusively on your behalf, under your direction and control, on terms that prohibit them from making any copies of your copyrighted material outside their relationship with you. +Conveying under any other circumstances is permitted solely under the conditions stated below. Sublicensing is not allowed; section 10 makes it unnecessary. +3. Protecting Users' Legal Rights From Anti-Circumvention Law. +No covered work shall be deemed part of an effective technological measure under any applicable law fulfilling obligations under article 11 of the WIPO copyright treaty adopted on 20 December 1996, or similar laws prohibiting or restricting circumvention of such measures. +When you convey a covered work, you waive any legal power to forbid circumvention of technological measures to the extent such circumvention is effected by exercising rights under this License with respect to the covered work, and you disclaim any intention to limit operation or modification of the work as a means of enforcing, against the work's users, your or third parties' legal rights to forbid circumvention of technological measures. +4. Conveying Verbatim Copies. +You may convey verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice; keep intact all notices stating that this License and any non-permissive terms added in accord with section 7 apply to the code; keep intact all notices of the absence of any warranty; and give all recipients a copy of this License along with the Program. + +You may charge any price or no price for each copy that you convey, and you may offer support or warranty protection for a fee. +5. Conveying Modified Source Versions. +You may convey a work based on the Program, or the modifications to produce it from the Program, in the form of source code under the terms of section 4, provided that you also meet all of these conditions: +a) The work must carry prominent notices stating that you modified it, and giving a relevant date. +b) The work must carry prominent notices stating that it is released under this License and any conditions added under section 7. This requirement modifies the requirement in section 4 to “keep intact all notices”. +c) You must license the entire work, as a whole, under this License to anyone who comes into possession of a copy. This License will therefore apply, along with any applicable section 7 additional terms, to the whole of the work, and all its parts, regardless of how they are packaged. This License gives no permission to license the work in any other way, but it does not invalidate such permission if you have separately received it. +d) If the work has interactive user interfaces, each must display Appropriate Legal Notices; however, if the Program has interactive interfaces that do not display Appropriate Legal Notices, your work need not make them do so. +A compilation of a covered work with other separate and independent works, which are not by their nature extensions of the covered work, and which are not combined with it such as to form a larger program, in or on a volume of a storage or distribution medium, is called an “aggregate” if the compilation and its resulting copyright are not used to limit the access or legal rights of the compilation's users beyond what the individual works permit. Inclusion of a covered work in an aggregate does not cause this License to apply to the other parts of the aggregate. +6. Conveying Non-Source Forms. +You may convey a covered work in object code form under the terms of sections 4 and 5, provided that you also convey the machine-readable Corresponding Source under the terms of this License, in one of these ways: +a) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by the Corresponding Source fixed on a durable physical medium customarily used for software interchange. +b) Convey the object code in, or embodied in, a physical product (including a physical distribution medium), accompanied by a written offer, valid for at least three years and valid for as long as you offer spare parts or customer support for that product model, to give anyone who possesses the object code either (1) a copy of the Corresponding Source for all the software in the product that is covered by this License, on a durable physical medium customarily used for software interchange, for a price no more than your reasonable cost of physically performing this conveying of source, or (2) access to copy the Corresponding Source from a network server at no charge. +c) Convey individual copies of the object code with a copy of the written offer to provide the Corresponding Source. This alternative is allowed only occasionally and noncommercially, and only if you received the object code with such an offer, in accord with subsection 6b. +d) Convey the object code by offering access from a designated place (gratis or for a charge), and offer equivalent access to the Corresponding Source in the same way through the same place at no further charge. You need not require recipients to copy the Corresponding Source along with the object code. If the place to copy the object code is a network server, the Corresponding Source may be on a different server (operated by you or a third party) that supports equivalent copying facilities, provided you maintain clear directions next to the object code saying where to find the Corresponding Source. Regardless of what server hosts the Corresponding Source, you remain obligated to ensure that it is available for as long as needed to satisfy these requirements. +e) Convey the object code using peer-to-peer transmission, provided you inform other peers where the object code and Corresponding Source of the work are being offered to the general public at no charge under subsection 6d. +A separable portion of the object code, whose source code is excluded from the Corresponding Source as a System Library, need not be included in conveying the object code work. +A “User Product” is either (1) a “consumer product”, which means any tangible personal property which is normally used for personal, family, or household purposes, or (2) anything designed or sold for incorporation into a dwelling. In determining whether a product is a consumer product, doubtful cases shall be resolved in favor of coverage. For a particular product received by a particular user, “normally used” refers to a typical or common use of that class of product, regardless of the status of the particular user or of the way in which the particular user actually uses, or expects or is expected to use, the product. A product is a consumer product regardless of whether the product has substantial commercial, industrial or non-consumer uses, unless such uses represent the only significant mode of use of the product. +“Installation Information” for a User Product means any methods, procedures, authorization keys, or other information required to install and execute modified versions of a covered work in that User Product from a modified version of its Corresponding Source. The information must suffice to ensure that the continued functioning of the modified object code is in no case prevented or interfered with solely because modification has been made. +If you convey an object code work under this section in, or with, or specifically for use in, a User Product, and the conveying occurs as part of a transaction in which the right of possession and use of the User Product is transferred to the recipient in perpetuity or for a fixed term (regardless of how the transaction is characterized), the Corresponding Source conveyed under this section must be accompanied by the Installation Information. But this requirement does not apply if neither you nor any third party retains the ability to install modified object code on the User Product (for example, the work has been installed in ROM). +The requirement to provide Installation Information does not include a requirement to continue to provide support service, warranty, or updates for a work that has been modified or installed by the recipient, or for the User Product in which it has been modified or installed. Access to a network may be denied when the modification itself materially and adversely affects the operation of the network or violates the rules and protocols for communication across the network. +Corresponding Source conveyed, and Installation Information provided, in accord with this section must be in a format that is publicly documented (and with an implementation available to the public in source code form), and must require no special password or key for unpacking, reading or copying. +7. Additional Terms. +“Additional permissions” are terms that supplement the terms of this License by making exceptions from one or more of its conditions. Additional permissions that are applicable to the entire Program shall be treated as though they were included in this License, to the extent that they are valid under applicable law. If additional permissions apply only to part of the Program, that part may be used separately under those permissions, but the entire Program remains governed by this License without regard to the additional permissions. +When you convey a copy of a covered work, you may at your option remove any additional permissions from that copy, or from any part of it. (Additional permissions may be written to require their own removal in certain cases when you modify the work.) You may place additional permissions on material, added by you to a covered work, for which you have or can give appropriate copyright permission. +Notwithstanding any other provision of this License, for material you add to a covered work, you may (if authorized by the copyright holders of that material) supplement the terms of this License with terms: +a) Disclaiming warranty or limiting liability differently from the terms of sections 15 and 16 of this License; or +b) Requiring preservation of specified reasonable legal notices or author attributions in that material or in the Appropriate Legal Notices displayed by works containing it; or +c) Prohibiting misrepresentation of the origin of that material, or requiring that modified versions of such material be marked in reasonable ways as different from the original version; or +d) Limiting the use for publicity purposes of names of licensors or authors of the material; or +e) Declining to grant rights under trademark law for use of some trade names, trademarks, or service marks; or +f) Requiring indemnification of licensors and authors of that material by anyone who conveys the material (or modified versions of it) with contractual assumptions of liability to the recipient, for any liability that these contractual assumptions directly impose on those licensors and authors. +All other non-permissive additional terms are considered “further restrictions” within the meaning of section 10. If the Program as you received it, or any part of it, contains a notice stating that it is governed by this License along with a term that is a further restriction, you may remove that term. If a license document contains a further restriction but permits relicensing or conveying under this License, you may add to a covered work material governed by the terms of that license document, provided that the further restriction does not survive such relicensing or conveying. + +If you add terms to a covered work in accord with this section, you must place, in the relevant source files, a statement of the additional terms that apply to those files, or a notice indicating where to find the applicable terms. +Additional terms, permissive or non-permissive, may be stated in the form of a separately written license, or stated as exceptions; the above requirements apply either way. +8. Termination. +You may not propagate or modify a covered work except as expressly provided under this License. Any attempt otherwise to propagate or modify it is void, and will automatically terminate your rights under this License (including any patent licenses granted under the third paragraph of section 11). +However, if you cease all violation of this License, then your license from a particular copyright holder is reinstated (a) provisionally, unless and until the copyright holder explicitly and finally terminates your license, and (b) permanently, if the copyright holder fails to notify you of the violation by some reasonable means prior to 60 days after the cessation. +Moreover, your license from a particular copyright holder is reinstated permanently if the copyright holder notifies you of the violation by some reasonable means, this is the first time you have received notice of violation of this License (for any work) from that copyright holder, and you cure the violation prior to 30 days after your receipt of the notice. +Termination of your rights under this section does not terminate the licenses of parties who have received copies or rights from you under this License. If your rights have been terminated and not permanently reinstated, you do not qualify to receive new licenses for the same material under section 10. +9. Acceptance Not Required for Having Copies. +You are not required to accept this License in order to receive or run a copy of the Program. Ancillary propagation of a covered work occurring solely as a consequence of using peer-to-peer transmission to receive a copy likewise does not require acceptance. However, nothing other than this License grants you permission to propagate or modify any covered work. These actions infringe copyright if you do not accept this License. Therefore, by modifying or propagating a covered work, you indicate your acceptance of this License to do so. +10. Automatic Licensing of Downstream Recipients. +Each time you convey a covered work, the recipient automatically receives a license from the original licensors, to run, modify and propagate that work, subject to this License. You are not responsible for enforcing compliance by third parties with this License. +An “entity transaction” is a transaction transferring control of an organization, or substantially all assets of one, or subdividing an organization, or merging organizations. If propagation of a covered work results from an entity transaction, each party to that transaction who receives a copy of the work also receives whatever licenses to the work the party's predecessor in interest had or could give under the previous paragraph, plus a right to possession of the Corresponding Source of the work from the predecessor in interest, if the predecessor has it or can get it with reasonable efforts. +You may not impose any further restrictions on the exercise of the rights granted or affirmed under this License. For example, you may not impose a license fee, royalty, or other charge for exercise of rights granted under this License, and you may not initiate litigation (including a cross-claim or counterclaim in a lawsuit) alleging that any patent claim is infringed by making, using, selling, offering for sale, or importing the Program or any portion of it. +11. Patents. +A “contributor” is a copyright holder who authorizes use under this License of the Program or a work on which the Program is based. The work thus licensed is called the contributor's “contributor version”. +A contributor's “essential patent claims” are all patent claims owned or controlled by the contributor, whether already acquired or hereafter acquired, that would be infringed by some manner, permitted by this License, of making, using, or selling its contributor version, but do not include claims that would be infringed only as a consequence of further modification of the contributor version. For purposes of this definition, “control” includes the right to grant patent sublicenses in a manner consistent with the requirements of this License. +Each contributor grants you a non-exclusive, worldwide, royalty-free patent license under the contributor's essential patent claims, to make, use, sell, offer for sale, import and otherwise run, modify and propagate the contents of its contributor version. +In the following three paragraphs, a “patent license” is any express agreement or commitment, however denominated, not to enforce a patent (such as an express permission to practice a patent or covenant not to sue for patent infringement). To “grant” such a patent license to a party means to make such an agreement or commitment not to enforce a patent against the party. +If you convey a covered work, knowingly relying on a patent license, and the Corresponding Source of the work is not available for anyone to copy, free of charge and under the terms of this License, through a publicly available network server or other readily accessible means, then you must either (1) cause the Corresponding Source to be so available, or (2) arrange to deprive yourself of the benefit of the patent license for this particular work, or (3) arrange, in a manner consistent with the requirements of this License, to extend the patent license to downstream recipients. “Knowingly relying” means you have actual knowledge that, but for the patent license, your conveying the covered work in a country, or your recipient's use of the covered work in a country, would infringe one or more identifiable patents in that country that you have reason to believe are valid. +If, pursuant to or in connection with a single transaction or arrangement, you convey, or propagate by procuring conveyance of, a covered work, and grant a patent license to some of the parties receiving the covered work authorizing them to use, propagate, modify or convey a specific copy of the covered work, then the patent license you grant is automatically extended to all recipients of the covered work and works based on it. + +A patent license is “discriminatory” if it does not include within the scope of its coverage, prohibits the exercise of, or is conditioned on the non-exercise of one or more of the rights that are specifically granted under this License. You may not convey a covered work if you are a party to an arrangement with a third party that is in the business of distributing software, under which you make payment to the third party based on the extent of your activity of conveying the work, and under which the third party grants, to any of the parties who would receive the covered work from you, a discriminatory patent license (a) in connection with copies of the covered work conveyed by you (or copies made from those copies), or (b) primarily for and in connection with specific products or compilations that contain the covered work, unless you entered into that arrangement, or that patent license was granted, prior to 28 March 2007. +Nothing in this License shall be construed as excluding or limiting any implied license or other defenses to infringement that may otherwise be available to you under applicable patent law. +12. No Surrender of Others' Freedom. +If conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot convey a covered work so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not convey it at all. For example, if you agree to terms that obligate you to collect a royalty for further conveying from those to whom you convey the Program, the only way you could satisfy both those terms and this License would be to refrain entirely from conveying the Program. +13. Use with the GNU Affero General Public License. +Notwithstanding any other provision of this License, you have permission to link or combine any covered work with a work licensed under version 3 of the GNU Affero General Public License into a single combined work, and to convey the resulting work. The terms of this License will continue to apply to the part which is the covered work, but the special requirements of the GNU Affero General Public License, section 13, concerning interaction through a network will apply to the combination as such. +14. Revised Versions of this License. +The Free Software Foundation may publish revised and/or new versions of the GNU General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. +Each version is given a distinguishing version number. If the Program specifies that a certain numbered version of the GNU General Public License “or any later version” applies to it, you have the option of following the terms and conditions either of that numbered version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of the GNU General Public License, you may choose any version ever published by the Free Software Foundation. + +If the Program specifies that a proxy can decide which future versions of the GNU General Public License can be used, that proxy's public statement of acceptance of a version permanently authorizes you to choose that version for the Program. +Later license versions may give you additional or different permissions. However, no additional obligations are imposed on any author or copyright holder as a result of your choosing to follow a later version. +15. Disclaimer of Warranty. +THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM “AS IS” WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. +16. Limitation of Liability. +IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +17. Interpretation of Sections 15 and 16. +If the disclaimer of warranty and limitation of liability provided above cannot be given local legal effect according to their terms, reviewing courts shall apply local law that most closely approximates an absolute waiver of all civil liability in connection with the Program, unless a warranty or assumption of liability accompanies a copy of the Program in return for a fee. +END OF TERMS AND CONDITIONS +------------------------------------------------------------- +ipmitool +Copyright (c) 2003 Sun Microsystems, Inc. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions +are met: + +Redistribution of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + +Redistribution in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +Neither the name of Sun Microsystems, Inc. or the names of +contributors may be used to endorse or promote products derived +from this software without specific prior written permission. + +This software is provided "AS IS," without a warranty of any kind. +ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, +INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A +PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. +SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE +FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING +OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL +SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, +OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR +PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF +LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, +EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. +------------------------------------------------------------- +Intel® Memory Latency Checker +Copyright: Not available/not listed + +idzla-Software License for Intel® Memory Latency Checker (Intel® MLC) +SOFTWARE TOOLS LICENSE AGREEMENT + +DO NOT DOWNLOAD, INSTALL, ACCESS, COPY, OR USE ANY PORTION OF THE MATERIALS (DEFINED BELOW) UNTIL YOU HAVE READ AND ACCEPTED THE TERMS AND CONDITIONS OF THIS AGREEMENT. BY INSTALLING, COPYING, ACCESSING, OR USING THE MATERIALS, YOU AGREE TO BE LEGALLY BOUND BY THE TERMS AND CONDITIONS OF THIS AGREEMENT. If You do not agree to be bound by, or the entity for whose benefit You act has not authorized You to accept, these terms and conditions, do not install, access, copy, or use the Software and destroy all copies of the Software in Your possession. + +This DEVELOPMENT TOOLS LICENSE AGREEMENT (this “Agreement”) is entered into between Intel Corporation, a Delaware corporation (“Intel”) and You. “You” refers to you or your employer or other entity for whose benefit you act, as applicable. If you are agreeing to the terms and conditions of this Agreement on behalf of a company or other legal entity, you represent and warrant that you have the legal authority to bind that legal entity to the Agreement, in which case, "You" or "Your" shall be in reference to such entity. Intel and You are referred to herein individually as a “Party” or, together, as the “Parties”. + +The Parties, in consideration of the mutual covenants contained in this Agreement, and for other good and valuable consideration, the receipt and sufficiency of which they acknowledge, and intending to be legally bound, agree as follows: + +1. DEFINITIONS. The following definitions are used throughout this Agreement: + +“Affiliate” means any entity controlling, controlled by or under common control with a Party hereto, where “control” means the direct or indirect ownership of more than fifty percent (50%) of such entity’s capital or equivalent voting rights. An entity will be deemed an “Affiliate” only as long as such control exists during the term of this Agreement. + +“Contractor” means a third party consultant or subcontractor who requires access to or use of the Materials to perform work on Your behalf or at Your behest. + +“Development Tools” means the development, evaluation, production, or test tool software, and associated documentation or other collateral, identified in the “development_tools.txt” text files, if any, included in the Materials. + +“Derivatives” means derivative works as defined in 17 U.S.C § 101 et seq. + +“Intel-based Device” means a device designed, manufactured, or configured by You or Your Affiliates to include or operate Intel hardware, software, or services. + +"Materials" means the software, documentation, the software product serial number and license key codes (if applicable), Development Tools, Redistributables, and other materials or collateral, including any updates and upgrades thereto, in source code or object code form where applicable, that are provided or otherwise made available by Intel to You under this Agreement. “Materials” do not include Open Source Software or any computer programming code that is subject to an agreement, obligation or license (whether or not accompanying the Materials) intended to supersede this Agreement. + +"Redistributables" means the software, documentation, or other collateral identified in the “redist.txt” text files, if any, included in the Materials. + +2. LIMITED LICENSE. + +(A) Subject to the terms and conditions of this Agreement, Intel grants You and Your Affiliates, a limited, nonexclusive, nontransferable, revocable, worldwide, fully paid-up license during the term of this Agreement, without the right to sublicense, under Intel’s copyrights (subject to any third party licensing requirements), unless expressly stated otherwise, to: + +(1) internally reproduce and install a reasonable number of copies of the Materials for Your internal use solely for the purposes of designing, developing, manufacturing and testing Intel-based Devices; + +(2) internally reproduce the source code of the Development Tools, if provided to You by Intel, and to internally create and reproduce Derivatives of the Development Tools, and to internally reproduce the binary code of the Development Tools, or any Derivatives created by You, in each case solely for the purpose of designing, developing, manufacturing and testing the Intel-based Device, solely as necessary for the integration of any Intel software and the output generated by the Development Tools, with and into Intel-based Devices; + +(3) create Derivatives of the Redistributables, or any portions thereof, provided to You by Intel in source code form solely for the purposes of designing, developing, debugging, modifying, distributing and testing software containing significantly more functionality and features than the Redistributables in the form provided to You by Intel; + +(4) distribute (or otherwise make available) on a royalty-free basis, subject to any other terms and conditions which may appear in the Redistributables text files, the Redistributables, including any Derivatives of the Redistributables pursuant to Section 2(A)(3), or any portions thereof, only as integrated or embedded in software (and not on a stand-alone basis) solely for use on an Intel-based Device; and + +(5) have the tasks set forth in Section 2(A)(1) and (2) above performed by a Contractor on the conditions that You enter into a written confidentiality agreement with any such Contractor, subject to Section 7 (Confidentiality), and You remain fully liable to Intel for the actions and inactions of Your Contractors. + +(B) You will be liable for Your Affiliate’s breach of these terms. In addition, You acknowledge that Your Affiliates are beneficiaries of the licenses granted by Intel under Section 2. + +(C) Intel hereby grants You the right to sub-license (without rights to further sublicense) the Development Tools, including any accompanying documentation, to Your manufacturing partners, in the code format provided to You by Intel, solely for designing, developing, manufacturing and testing the Intel-based Devices solely as necessary for the integration of any Intel software and the output generated by the Development Tools, with and into Intel-based Devices. The sublicense is subject to a written sublicensing agreement that contains confidentiality obligations and license restrictions that are no less protective of Intel than those provided in this Agreement. You will be fully responsible and liable towards Intel for Your sub-licensees’ compliance with all such confidentiality obligations and license restrictions. You may grant Your manufacturing partners the right to further distribute Redistributables solely as integrated or embedded in software for Your Intel-based Devices. + +3. LICENSE RESTRICTIONS. All right, title and interest in and to the Materials and associated documentation are and will remain the exclusive property of Intel and its suppliers. Unless expressly permitted under the Agreement, You will not, and will not allow any third party to (i) use, copy, distribute, sell or offer to sell the Materials or associated documentation; (ii) modify, adapt, enhance, disassemble, decompile, reverse engineer, change or create derivative works from the Materials except and only to the extent as specifically required by mandatory applicable laws or any applicable third party license terms accompanying the Materials; (iii) use or make the Materials available for the use or benefit of third parties; or (iv) use the Materials on Your products other than those that include the Intel product(s), platform(s), or software identified in the Materials; or (v) publish or provide any Materials benchmark or comparison test results. + +If You received the Materials solely for evaluation purposes, You have no distribution rights to the Materials or any portion thereof. + +Distribution of the Redistributables is also subject to the following conditions: You shall: (i) be solely responsible to Your customers and end users for any update or support obligation or other liability which may arise from the distribution, (ii) not make any statement that Your software is "certified", or that its performance is guaranteed, by Intel, (iii) not use Intel's name or trademarks to promote Your software without prior written permission, (iv) use a license agreement that contains provisions that are at least as restrictive as this Agreement and which prohibits disassembly and reverse engineering of the Materials provided in object code form, and (v) indemnify, hold harmless, and defend Intel, Intel’s Affiliates, and its licensors from and against any claims or lawsuits, including attorney's fees, that arise or result from Your Derivatives or Your distribution of Your software. + +The consideration under this Agreement is only for the licenses Intel expressly grants above. Any other rights including, but not limited to, additional patent rights, will require an additional license and additional consideration. Nothing in this Agreement requires or will be treated to require Intel to grant any additional license. You acknowledge that an essential basis of the bargain in this Agreement is that Intel grants You no licenses or other rights including, but not limited to, patent, copyright, trade secret, trademark, trade name, service mark or other intellectual property licenses or rights with respect to the Materials and associated documentation, by implication, estoppel or otherwise, except for the licenses expressly granted above. You acknowledge there are significant uses of the Materials in their original, unmodified and uncombined form. The consideration for the licenses in this Agreement reflects Intel’s continuing right to assert patent claims against any modifications or derivative works (including, without limitation, error corrections and bug fixes) of, or combinations with, the Materials that You, Your Affiliates or third parties make that infringe any Intel patent claim. + +4. LICENSE TO FEEDBACK. This Agreement does not obligate You to provide Intel with materials, information, comments, suggestions, Your Derivatives or other communication regarding the features, functions, performance or use of the Materials (“Feedback”). If any software included in the Materials is provided or otherwise made available by Intel in source code form, to the extent You provide Intel with Feedback in a tangible form, You grant to Intel and its affiliates a non-exclusive, perpetual, sublicenseable, irrevocable, worldwide, royalty-free, fully paid-up and transferable license, to and under all of Your intellectual property rights, whether perfected or not, to publicly perform, publicly display, reproduce, use, make, have made, sell, offer for sale, distribute, import, create derivative works of and otherwise exploit any comments, suggestions, descriptions, ideas, Your Derivatives or other feedback regarding the Materials provided by You or on Your behalf. + +5. OPEN SOURCE STATEMENT. The Materials may include Open Source Software (OSS) licensed pursuant to OSS license agreement(s) identified in the OSS comments in the applicable source code file(s) and/or file header(s) provided with or otherwise associated with the Materials. Neither You nor any Original Equipment Manufacturer (OEM), Original Device Manufacturer (ODM), customer, or distributor may subject any proprietary portion of the Materials to any OSS license obligations including, without limitation, combining or distributing the Materials with OSS in a manner that subjects Intel, the Materials or any portion thereof to any OSS license obligation. Nothing in this Agreement limits any rights under, or grants rights that supersede, the terms of any applicable OSS license. + +6. THIRD PARTY SOFTWARE. Certain third party software provided with or within the Materials may only be used (a) upon securing a license directly from the owner of the software or (b) in combination with hardware components purchased from such third party and (c) subject to further license limitations by the software owner. A listing of any such third party limitations is in one or more text files accompanying the Materials. You acknowledge Intel is not providing You with a license to such third party software and further that it is Your responsibility to obtain appropriate licenses from such third parties directly. + +7. CONFIDENTIALITY. The terms and conditions of this Agreement, exchanged confidential information, as well as the Materials are subject to the terms and conditions of the Non-Disclosure Agreement(s) or Intel Pre-Release Loan Agreement(s) (referred to herein collectively or individually as “NDA”) entered into by and in force between Intel and You, and in any case no less confidentiality protection than You apply to Your information of similar sensitivity. If You would like to have a Contractor perform work on Your behalf that requires any access to or use of Materials You must obtain a written confidentiality agreement from the Contractor which contains terms and conditions with respect to access to or use of Materials no less restrictive than those set forth in this Agreement, excluding any distribution rights and use for any other purpose, and You will remain fully liable to Intel for the actions and inactions of those Contractors. You may not use Intel's name in any publications, advertisements, or other announcements without Intel's prior written consent. + +8. NO OBLIGATION; NO AGENCY. Intel may make changes to the Software, or items referenced therein, at any time without notice. Intel is not obligated to support, update, provide training for, or develop any further version of the Software or to grant any license thereto. No agency, franchise, partnership, joint- venture, or employee-employer relationship is intended or created by this Agreement. + +9. EXCLUSION OF WARRANTIES. THE MATERIALS ARE PROVIDED "AS IS" WITHOUT ANY EXPRESS OR IMPLIED WARRANTY OF ANY KIND INCLUDING WARRANTIES OF MERCHANTABILITY, NONINFRINGEMENT, OR FITNESS FOR A PARTICULAR PURPOSE. Intel does not warrant or assume responsibility for the accuracy or completeness of any information, text, graphics, links or other items within the Materials. + +10. LIMITATION OF LIABILITY. IN NO EVENT WILL INTEL OR ITS AFFILIATES, LICENSORS OR SUPPLIERS (INCLUDING THEIR RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AND AGENTS) BE LIABLE FOR ANY DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, LOST PROFITS, BUSINESS INTERRUPTION, OR LOST DATA) ARISING OUT OF OR IN RELATION TO THIS AGREEMENT, INCLUDING THE USE OF OR INABILITY TO USE THE MATERIALS, EVEN IF INTEL HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. SOME JURISDICTIONS PROHIBIT EXCLUSION OR LIMITATION OF LIABILITY FOR IMPLIED WARRANTIES OR CONSEQUENTIAL OR INCIDENTAL DAMAGES, SO THE ABOVE LIMITATION MAY IN PART NOT APPLY TO YOU. YOU MAY ALSO HAVE OTHER LEGAL RIGHTS THAT VARY FROM JURISDICTION TO JURISDICTION. THE MATERIALS LICENSED HEREUNDER ARE NOT DESIGNED OR INTENDED FOR USE IN ANY MEDICAL, LIFE SAVING OR LIFE SUSTAINING SYSTEMS, TRANSPORTATION SYSTEMS, NUCLEAR SYSTEMS, OR FOR ANY OTHER MISSION CRITICAL APPLICATION IN WHICH THE FAILURE OF THE DEVELOPMENT TOOLS COULD LEAD TO PERSONAL INJURY OR DEATH. YOU WILL INDEMNIFY AND HOLD INTEL AND ITS AFFILIATES, LICENSORS AND SUPPLIERS (INCLUDING THEIR RESPECTIVE DIRECTORS, OFFICERS, EMPLOYEES, AND AGENTS) HARMLESS AGAINST ALL CLAIMS, LIABILITIES, LOSSES, COSTS, DAMAGES, AND EXPENSES (INCLUDING REASONABLE ATTORNEY FEES), ARISING OUT OF, DIRECTLY OR INDIRECTLY, THE DISTRIBUTION OF THE MATERIALS AND ANY CLAIM OF PRODUCT LIABILITY, PERSONAL INJURY OR DEATH ASSOCIATED WITH ANY UNINTENDED USE, EVEN IF SUCH CLAIM ALLEGES THAT INTEL OR AN INTEL AFFILIATE, LICENSOR OR SUPPLIER WAS NEGLIGENT REGARDING THE DESIGN OR MANUFACTURE OF THE MATERIALS. THE LIMITED REMEDIES, WARRANTY DISCLAIMER AND LIMITED LIABILITY ARE FUNDAMENTAL ELEMENTS OF THE BASIS OF THE BARGAIN BETWEEN INTEL AND YOU AND INTEL WOULD NOT BE ABLE TO PROVIDE THE MATERIALS WITHOUT SUCH LIMITATIONS. + +11. TERMINATION AND SURVIVAL. Intel may terminate this Agreement for any reason with thirty (30) days’ notice and immediately if You or someone acting on Your behalf or at Your behest violates any of its terms or conditions. Upon termination You will immediately destroy and ensure the destruction of the Materials (including providing certification of such destruction or return back to Intel). Upon termination of this Agreement, all licenses granted to You hereunder terminate immediately. All Sections of this Agreement, except Section 2, will survive termination. In the event of termination of this Agreement, the license grant to any Redistributables, including Your Derivatives of the Redistributables, distributed by You prior to the effective date of such termination and in accordance with the terms and conditions of this Agreement shall survive any such termination of this Agreement. +12. GOVERNING LAW AND JURISDICTION. This Agreement and any dispute arising out of or relating to it will be governed by the laws of the U.S.A. and Delaware, without regard to conflict of laws principles. The Parties exclude the application of the United Nations Convention on Contracts for the International Sale of Goods (1980). The state and federal courts sitting in Delaware, U.S.A. will have exclusive jurisdiction over any dispute arising out of or relating to this Agreement. The Parties consent to personal jurisdiction and venue in those courts. A Party that obtains a judgment against the other Party in the courts identified in this section may enforce that judgment in any court that has jurisdiction over the Parties. +13. EXPORT REGULATIONS/EXPORT CONTROL. You agree that neither You nor Your subsidiaries or Affiliates will export/re-export the Materials, directly or indirectly, to any country for which the U.S. Department of Commerce or any other agency or department of the U.S. Government or the foreign government from where it is shipping requires an export license, or other governmental approval, without first obtaining any such required license or approval. In the event the Materials are exported from the U.S.A. or re-exported from a foreign destination by You, Your subsidiaries, or Your Affiliates, You will ensure that the distribution and export/re-export or import of the Materials complies with all laws, regulations, orders, or other restrictions of the U.S. Export Administration Regulations and the appropriate foreign government. +14. GOVERNMENT RESTRICTED RIGHTS. The Materials are a commercial item (as defined in 48 C.F.R. 2.101) consisting of commercial computer software and commercial computer software documentation (as those terms are used in 48 C.F.R. 12.212). Consistent with 48 C.F.R. 12.212 and 48 C.F.R 227.7202- 1 through 227.7202-4, You will not provide the Materials to the U.S. Government. Contractor or Manufacturer is Intel Corporation, 2200 Mission College Blvd., Santa Clara, CA 95054. +15. TRADEMARKS. Third party trademarks, trade names, product names and logos (the “Trademarks”) contained in or used by the Materials are the trademarks or registered trademarks of their respective owners, and the use of such Trademarks shall inure to the benefit of the trademark owner. The reference to such Trademarks (if any) by Intel in any of the Materials does not constitute: (i) an affiliation by Intel and its licensors with such company, or (ii) an endorsement or approval of such company of Intel and its licensors and its products or services. +16. ASSIGNMENT. You may not delegate, assign or transfer this Agreement, the license(s) granted or any of Your rights or duties hereunder, expressly, by implication, by operation of law, or otherwise and any attempt to do so, without Intel’s express prior written consent, will be null and void. Intel may assign, delegate and transfer this Agreement, and its rights and obligations hereunder, in its sole discretion. +17. ENTIRE AGREEMENT; SEVERABILITY. The terms and conditions of this Agreement and any NDA with Intel constitute the entire agreement between the Parties with respect to the subject matter hereof, and merge and supersede all prior or contemporaneous agreements, understandings, negotiations and discussions. Neither Party will be bound by any terms, conditions, definitions, warranties, understandings, or representations with respect to the subject matter hereof other than as expressly provided herein. In the event any provision of this Agreement is unenforceable or invalid under any applicable law or applicable court decision, such unenforceability or invalidity will not render this Agreement unenforceable or invalid as a whole, instead such provision will be changed and interpreted so as to best accomplish the objectives of such provision within legal limits. +18. WAIVER. The failure of a Party to require performance by the other Party of any provision hereof will not affect the full right to require such performance at any time thereafter; nor will waiver by a Party of a breach of any provision hereof constitute a waiver of the provision itself. +19. PRIVACY. YOUR PRIVACY RIGHTS ARE SET FORTH IN INTEL’S PRIVACY NOTICE, WHICH FORMS A PART OF THIS AGREEMENT. PLEASE REVIEW THE PRIVACY NOTICE AT HTTP://WWW.INTEL.COM/PRIVACY TO LEARN HOW INTEL COLLECTS, USES AND SHARES INFORMATION ABOUT YOU. +------------------------------------------------------------- +Other names and brands may be claimed as the property of others. diff --git a/version.txt b/version.txt new file mode 100644 index 0000000..e3a4f19 --- /dev/null +++ b/version.txt @@ -0,0 +1 @@ +2.2.0 \ No newline at end of file