diff --git a/.github/workflows/Compiler-settings/config.yaml b/.github/workflows/Compiler-settings/config.yaml deleted file mode 100644 index 7d97b439..00000000 --- a/.github/workflows/Compiler-settings/config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -ignored_issues: - - "Binary Not Signed" - -ignored_severity: - - Info - -ignored_file: - - concrt140.dll - - msvcp140_1.dll - - msvcp140_2.dll - - msvcp140_atomic_wait.dll - - msvcp140.dll - - msvcp140_codecvt_ids.dll - - msvcp140_codecvt_ids.dll - - vcruntime140.dll - - vcruntime140_1.dll \ No newline at end of file diff --git a/.github/workflows/Compiler-settings/tool.py b/.github/workflows/Compiler-settings/tool.py deleted file mode 100644 index d8183e6c..00000000 --- a/.github/workflows/Compiler-settings/tool.py +++ /dev/null @@ -1,254 +0,0 @@ -""" -This script provides functionality for processing issue tracking data -extracted from JSON and YAML files. It includes the ability to count -issues by severity, filter issues based on configuration, and determine -an overall pass/fail status based on the issues present. -""" -import json -import argparse -import yaml - - -# Function to load data from a JSON file -def load_json(file_name): - """ - Load data from a JSON file. - - Args: - file_name (str): The name of the file to load. - - Returns: - dict: The data loaded from the JSON file. - """ - with open(file_name, 'r') as file: - return json.load(file) - - -# Function to load data from a YAML file -def load_yaml(file_name): - """ - Load data from a YAML file. - - Args: - file_name (str): The name of the file to load. - - Returns: - dict: The data loaded from the YAML file. - """ - with open(file_name, 'r') as file: - return yaml.safe_load(file) - - -# Function to count the number of high severity issues -def high_count(issues): - """ - Count the number of high severity issues. - - Args: - issues (list of dict): A list of issues. - - Returns: - int: The count of high severity issues. - """ - return sum(1 for issue in issues if issue['severity'] == 'High') - - -# Function to count the number of medium severity issues -def medium_count(issues): - """ - Count the number of medium severity issues. - - Args: - issues (list of dict): A list of issues. - - Returns: - int: The count of medium severity issues. - """ - return sum(1 for issue in issues if issue['severity'] == 'Medium') - - -# Function to count the number of low severity issues -def low_count(issues): - """ - Count the number of low severity issues. - - Args: - issues (list of dict): A list of issues. - - Returns: - int: The count of low severity issues. - """ - return sum(1 for issue in issues if issue['severity'] == 'Low') - - -# Function to count the number of informational severity issues -def info_count(issues): - """ - Count the number of informational severity issues. - - Args: - issues (list of dict): A list of issues. - - Returns: - int: The count of informational severity issues. - """ - return sum(1 for issue in issues if issue['severity'] == 'Info') - - -# Retrieve ignored issues from the YAML configuration -def get_ignored_issues(config): - """ - Retrieve ignored issues from the YAML configuration. - - Args: - config (dict): A dictionary containing configuration, including ignored issues. - - Returns: - tuple: A tuple containing lists of ignored issues, ignored severity, and ignored file names. - """ - ignored_issues = config.get('ignored_issues', []) - ignored_severity = config.get('ignored_severity', []) - ignored_file = config.get('ignored_file', []) - return ignored_issues, ignored_severity, ignored_file - - -# Function to extract the file name from a given path -def extract_file_name(full_file_name): - """ - Extracts the third component from a file path. - - Args: - full_file_name (str): The full file path. - - Returns: - str: The extracted file name or 'Unknown' if not available. - """ - parts = full_file_name.split('|') - return parts[2] if len(parts) >= 3 else 'Unknown' - - -# Function to determine if an issue should be included based on various criteria -def issues_included(issue_key, severity, ignored_issues, ignored_severities, - ignored_file, file_name): - """ - Determines if an issue should be included based on various criteria. - - Args: - issue_key (str): The issue key. - severity (str): The severity of the issue. - ignored_issues (list): A list of issue keys to ignore. - ignored_severities (list): A list of severities to ignore. - ignored_file (list): A list of file names to ignore. - file_name (str): The file name of the issue. - - Returns: - bool: True if the issue should be included, False otherwise. - """ - return (issue_key not in ignored_issues - and severity not in ignored_severities - and file_name not in ignored_file) - - -# Main function to extract issues from JSON data -def get_issues(data, ignored_issues, ignored_severities, ignored_file): - """ - Extract issues from the parsed JSON data, excluding specific criteria. - - Args: - data (dict): The parsed data from JSON. - ignored_issues (list): A list of issue keys to ignore. - ignored_severities (list): A list of severities to ignore. - ignored_file (list): A list of file names to ignore. - - Returns: - list of dict: A list of issue dictionaries. - """ - issues = [] - for file_key, issue_details in data.items(): - if not file_key.startswith('File'): - continue - - full_file_name = issue_details.get('File Name', 'Unknown') - file_name = extract_file_name(full_file_name) - - for key, value in issue_details.items(): - if not (isinstance(value, str) and '[' in value and ']' in value): - continue - - start = value.find('[') + 1 - end = value.find(']') - severity_with_info = value[start:end].strip().split(' ')[0] - - if issues_included(key, severity_with_info, ignored_issues, - ignored_severities, ignored_file, file_name): - issues.append({ - "issue_name": key, - "severity": severity_with_info, - "file": file_name - }) - return issues - - -def pass_or_fail(issues): - """ - Determine the overall pass/fail status based on the list of issues. - - Args: - issues (list of dict): A list of issues. - - Returns: - str: "Pass" if there are no issues, otherwise "Fail". - """ - # Determine if the result is pass or fail based on remaining issues - return "Pass" if not issues else "Fail" - - -def main(): - """ - Main part of the script. - Parses command-line arguments to obtain JSON and YAML file names, - loads data from these files, processes it to filter out ignored issues, - and prints a summary of the analysis including the overall status - and details of each issue. - """ - # Create the parser - parser = argparse.ArgumentParser() - - # Add arguments for the JSON and YAML file names - parser.add_argument('json_file') - parser.add_argument('yaml_file') - - # Parse the command-line arguments - args = parser.parse_args() - - # Use the parsed arguments to get the file names - json_data = load_json(args.json_file) - ignore_config = load_yaml(args.yaml_file) - - # Retrieve ignored issues from YAML configuration - ignored_issues_list, ignored_severities_list, ignored_file_list = get_ignored_issues( - ignore_config) - - # Get the list of issues while excluding the ignored ones - issues_list = get_issues(json_data, ignored_issues_list, - ignored_severities_list, ignored_file_list) - - # Determine the overall pass or fail status - result = pass_or_fail(issues_list) - print("Status:", result) - - # Output the overall status and counts of each severity level - print("High severity count:", high_count(issues_list)) - print("Medium severity count:", medium_count(issues_list)) - print("Low severity count:", low_count(issues_list)) - print("Info severity count:", info_count(issues_list)) - - # Output the details of each issue not ignored - for issue in issues_list: - print(f"Issue Name: {issue['issue_name']}; " - f"Severity: {issue['severity']}; " - f"File: {issue['file']}") - - -if __name__ == "__main__": - main() diff --git a/.github/workflows/acceptance.yaml b/.github/workflows/acceptance.yaml deleted file mode 100644 index 2ddf64b7..00000000 --- a/.github/workflows/acceptance.yaml +++ /dev/null @@ -1,152 +0,0 @@ -name: Test package -on: - workflow_call: - inputs: - TEST_REF: - description: 'The branch, tag or SHA to find tests on' - required: false - default: '' - type: string - OS: - description: 'Operating system' - required: true - type: string - CONFIG: - description: 'Build configuration' - required: true - type: string - GPU_FAMILY: - description: 'GPU Family' - required: true - type: string - ARTIFACT: - description: 'Artifact to test' - required: true - type: string - NONFATAL: - description: 'Errors are nonfatal' - required: false - default: false - type: boolean - -jobs: - acceptance: - runs-on: [self-hosted, "${{ inputs.OS }}", "${{ inputs.GPU_FAMILY }}"] - continue-on-error: ${{ inputs.NONFATAL }} - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - - name: Load runtime overrides - uses: actions/cache/restore@v3 - with: - path: runtimes - key: runtimes - - - name: Checkout tests - uses: actions/checkout@v3 - id: checkout-tests-from-same-branch - continue-on-error: true - with: - repository: ${{ vars.TEST_REPO }} - token: ${{ secrets.TEST_REPO_TOKEN }} - path: tests - ref: ${{ inputs.TEST_REF }} - - - name: Download package - uses: actions/download-artifact@v3 - with: - name: ${{ inputs.ARTIFACT }} - path: package - - - name: Extract package (Linux) - if: success() && runner.os == 'Linux' - run: unzip package/*.zip -d _install - - - name: Extract package (Windows) - if: success() && runner.os == 'Windows' - run: Expand-Archive -Force -Path package\*.zip -DestinationPath _install - - - name: Build Docker image for GPU testing (Linux) - if: success() && runner.os == 'Linux' - run: > - docker build "tests/environments" - -f "tests/environments/Dockerfile.ubuntu.15070.gputest" - -t vpl_gputestenv:ubuntu15070 - - - name: Test package in container (Linux) - if: success() && runner.os == 'Linux' - continue-on-error: ${{ inputs.NONFATAL }} - run: | - cat >test.sh <{"  " * indent}{sym} {root.name}' - result += '' - result += f'{root.file_size("left")}' - result += f'{root.flags("left")}' - - if relation == 'match': - sym = '=' - result += f'{sym}' - elif relation == 'mismatch': - sym = '≠' - result += f'{sym}' - elif relation == 'orphan-left': - sym = '' - result += f'{sym}' - elif relation == 'orphan-right': - sym = '' - result += f'{sym}' - elif relation == 'missing': - sym = '🦨' - result += f'{sym}' - else: - sym = relation - result += f'{sym}' - - if relation == 'match': - result += '' - elif relation == 'mismatch': - result += '' - elif relation == 'orphan-left': - result += '' - elif relation == 'orphan-right': - result += '' - elif relation == 'missing': - result += '' - else: - result += '' - if root.exists('right'): - if root.isdir('right'): - sym = '📂' - else: - sym = '🗎' - result += f'{"  " * indent}{sym} {root.name}' - result += '' - result += f'{root.file_size("right")}' - result += f'{root.flags("right")}' - result += '' - return result - - -def print_tree(root, mode, indent=0): - """Print root item and all children for the report""" - result = '' - if indent: - result += print_row(root, mode, indent) - for child in sorted(root.children): - result += print_tree(root.children[child], mode, indent + 1) - return result - - -def write_report(root, title, mode): - """Print report - - Supported modes are: - - All: Include all nodes - - Diff: Include only nodes with diffrences - - Orphan: Include only orphan nodes - """ - now = datetime.datetime.now() - result = '' - if mode == 'All': - mode_name = 'All' - elif mode == 'Diff': - mode_name = 'Differences' - elif mode == 'Orphan': - mode_name = 'Orphans' - else: - mode_name = mode - - result += """ - - -""" - result += f"""{title} - - -{title}
-Produced: {now.strftime("%Y-%m-%d %H:%M:%S")}
-    -
-Mode:  {mode_name}   -
-Left base folder: {os.path.abspath(root.path('left'))}  -
-Right base folder: {os.path.abspath(root.path('right'))}  -
- - - - - - - - - -""" - result += print_tree(root, mode, 0) - result += """ -
NameSizeFlags NameSizeFlags
-
- -""" - return result - - -def main(): - """Main entrypoint""" - parser = argparse.ArgumentParser( - description=globals()['__doc__'], - formatter_class=argparse.RawTextHelpFormatter) - parser.add_argument('left', action='store') - parser.add_argument('right', action='store') - parser.add_argument('--mode', - '-m', - default="All", - action='store', - choices=['All', 'Diff', 'Orphan']) - parser.add_argument('--output', '-o', default=None, action='store') - parser.add_argument('--title', '-t', default="BOM Diff", action='store') - - args = parser.parse_args() - root = DiffInfo(args.left, args.right) - root.cmp() - report = write_report(root, args.title, args.mode) - with open_output(args.output) as output: - output.write(report) - - -if __name__ == "__main__": - main() diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml deleted file mode 100644 index 49e75e88..00000000 --- a/.github/workflows/build.yaml +++ /dev/null @@ -1,195 +0,0 @@ -name: Build package -on: - workflow_call: - inputs: - OS: - description: 'Operating system' - required: true - type: string - CONFIG: - description: 'Build configuration' - required: true - type: string - REF: - description: 'The branch, tag or SHA to build' - required: false - default: '' - type: string - ARTIFACT: - description: 'Artifact to test' - required: true - type: string - -jobs: - build: - name: Build - runs-on: [self-hosted, "${{ inputs.OS }}"] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - - name: Checkout dispatcher source - uses: actions/checkout@v3 - with: - path: source - ref: ${{ inputs.REF }} - - - name: Install dependencies (Windows) - if: success() && runner.os == 'Windows' - shell: cmd - run: | - echo on - - call source\script\bootstrap.bat - if %errorlevel% neq 0 exit /b %errorlevel% - - - name: Build package (Windows, Release) - if: success() && runner.os == 'Windows' && inputs.CONFIG == 'release' - shell: cmd - run: | - echo on - - call source\script\build.bat - if %errorlevel% neq 0 exit /b %errorlevel% - - - name: Build package (Windows, Debug) - if: success() && runner.os == 'Windows' && inputs.CONFIG == 'debug' - shell: cmd - run: | - echo on - - cmake -B "source\_build" -S "source" -DBUILD_TESTS=ON -DENABLE_WARNING_AS_ERROR=ON - if %errorlevel% neq 0 exit /b %errorlevel% - - cmake --build "source\_build" --parallel %NUMBER_OF_PROCESSORS% --verbose --config Debug - if %errorlevel% neq 0 exit /b %errorlevel% - - cmake --build "source\_build" --parallel %NUMBER_OF_PROCESSORS% --config Debug --target package - if %errorlevel% neq 0 exit /b %errorlevel% - - - name: Build package (Windows, Disable Experimental APIs) - if: success() && runner.os == 'Windows' && inputs.CONFIG == 'experimental-off' - shell: cmd - run: | - echo on - - cmake -B "source\_build" -S "source" -DBUILD_TESTS=ON -DENABLE_WARNING_AS_ERROR=ON ^ - -DBUILD_DISPATCHER_ONEVPL_EXPERIMENTAL=OFF -DBUILD_TOOLS_ONEVPL_EXPERIMENTAL=OFF - - if %errorlevel% neq 0 exit /b %errorlevel% - - cmake --build "source\_build" --parallel %NUMBER_OF_PROCESSORS% --verbose --config Release - if %errorlevel% neq 0 exit /b %errorlevel% - - cmake --build "source\_build" --parallel %NUMBER_OF_PROCESSORS% --config Release --target package - if %errorlevel% neq 0 exit /b %errorlevel% - - - name: Build Docker image for building (Linux) - if: success() && runner.os == 'Linux' - run: > - docker build "source/script" - -f "source/script/Dockerfile.centos7.build" - -t vpl_build:centos - - - name: Build package (Linux, Release) - if: success() && runner.os == 'Linux' && inputs.CONFIG == 'release' - run: | - docker run --rm -v $(pwd):/tmp/work -w /tmp/work vpl_build:centos source/script/build - - - name: Build package (Linux, Debug) - if: success() && runner.os == 'Linux' && inputs.CONFIG == 'debug' - run: | - cat <<'EOL' > build.sh - #!/bin/bash - set -o errexit - cmake -B "source/_build" -S "source" \ - -DBUILD_TESTS=ON \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_INSTALL_LIBDIR=lib \ - -DENABLE_DRI3=ON \ - -DENABLE_DRM=ON \ - -DENABLE_VA=ON \ - -DENABLE_WAYLAND=ON \ - -DENABLE_X11=ON \ - -DENABLE_WARNING_AS_ERROR=ON - - cmake --build "source/_build" --parallel $(nproc) --verbose - - pushd "source/_build" - cpack . - popd - EOL - chmod +x build.sh - - docker run --rm -v $(pwd):/tmp/work -w /tmp/work vpl_build:centos ./build.sh - - - name: Build package (Linux, Disable Experimental APIs) - if: success() && runner.os == 'Linux' && inputs.CONFIG == 'experimental-off' - run: | - cat <<'EOL' > build.sh - #!/bin/bash - set -o errexit - cmake -B "source/_build" -S "source" \ - -DBUILD_TESTS=ON \ - -DCMAKE_BUILD_TYPE=Debug \ - -DCMAKE_INSTALL_LIBDIR=lib \ - -DENABLE_DRI3=ON \ - -DENABLE_DRM=ON \ - -DENABLE_VA=ON \ - -DENABLE_WAYLAND=ON \ - -DENABLE_X11=ON \ - -DENABLE_WARNING_AS_ERROR=ON \ - -DBUILD_DISPATCHER_ONEVPL_EXPERIMENTAL=OFF \ - -DBUILD_TOOLS_ONEVPL_EXPERIMENTAL=OFF - - cmake --build "source/_build" --parallel $(nproc) --verbose - - pushd "source/_build" - cpack . - popd - EOL - chmod +x build.sh - - docker run --rm -v $(pwd):/tmp/work -w /tmp/work vpl_build:centos ./build.sh - - - name: Run unit tests (Linux) - if: success() && runner.os == 'Linux' - run: >- - docker run --rm -v $(pwd):/tmp/work -w /tmp/work - vpl_build:centos - source/script/test unit - - - name: Run unit tests (Windows) - if: success() && runner.os == 'Windows' - shell: cmd - run: | - echo on - call source\script\test.bat unit - if %errorlevel% neq 0 exit /b %errorlevel% - - - name: Upload artifact - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: ${{ inputs.ARTIFACT }} - path: source/_build/*-all.zip - - - name: Upload Unit Test Results - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: ${{ inputs.ARTIFACT }}-unit-tests - path: source/_build/Testing/*/Test.xml - - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml deleted file mode 100644 index 230e42de..00000000 --- a/.github/workflows/check.yaml +++ /dev/null @@ -1,369 +0,0 @@ -name: Check -run-name: Check (Triggered by ${{ github.event_name }} by @${{ github.actor }}) -on: - pull_request: - # Run on pull requests - workflow_dispatch: - # Run on user request - -concurrency: - # Cancel any exisitng jobs related to the target branch - group: ci-${{ github.ref }} - cancel-in-progress: true - -jobs: - setup: - name: Setup Testing - runs-on: [self-hosted, linux] - outputs: - refrence_ref: ${{ env.refrence_ref }} - test_ref: ${{ env.test_ref }} - env: - test_ref: '' - refrence_ref: '' - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - # Get ref of code to be used as reference - - name: Checkout dispatcher source - uses: actions/checkout@v3 - with: - path: source - fetch-depth: 0 - ref: '${{ github.event.pull_request.head.sha }}' - - name: Build variables - id: run - run: | - cd source - echo "refrence_ref=$(git describe --abbrev=0 --tags --match=v*)" >> $GITHUB_ENV - - # Get ref of test to be used - # If this is a pull request prefer a branch of the same name as the branch being merged into - # Otherwise try to use a branch of the same name - # Otherwise use main - - name: Checkout tests from base_ref - if: github.base_ref - id: check-tests-from-base_ref - uses: actions/checkout@v3 - continue-on-error: true - with: - repository: ${{ vars.TEST_REPO }} - token: ${{ secrets.TEST_REPO_TOKEN }} - path: tests - fetch-depth: 0 - ref: ${{ github.base_ref }} - - name: Use tests from base_ref - if: steps.check-tests-from-base_ref.outcome == 'success' - id: use-tests-from-base_ref - run: | - echo "test_ref=${{ github.base_ref }}" >> $GITHUB_ENV - - - name: Checkout tests from ref_name - if: steps.check-tests-from-base_ref.outcome != 'success' - id: check-tests-from-ref_name - uses: actions/checkout@v3 - continue-on-error: true - with: - repository: ${{ vars.TEST_REPO }} - token: ${{ secrets.TEST_REPO_TOKEN }} - path: tests - fetch-depth: 0 - ref: ${{ github.ref_name }} - - name: Use tests from ref_name - if: steps.check-tests-from-ref_name.outcome == 'success' - id: use-tests-from-ref_name - run: | - echo "test_ref=${{ github.ref_name }}" >> $GITHUB_ENV - - - name: Use tests from default - if: steps.check-tests-from-base_ref.outcome != 'success' && steps.check-tests-from-ref_name.outcome != 'success' - run: | - echo "test_ref=main" >> $GITHUB_ENV - - - name: Report - if: always() - run: | - echo "test_ref=${{ env.test_ref }}" - echo "refrence_ref=${{ env.refrence_ref }}" - - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Checkout tests - uses: actions/checkout@v3 - with: - repository: ${{ vars.TEST_REPO }} - token: ${{ secrets.TEST_REPO_TOKEN }} - path: tests - ref: ${{ needs.setup.outputs.test_ref }} - - # To allow override of runtimes we look for a "runtimes" - # folder in the test repo. - - name: load runtimes - run: | - mkdir runtimes - if [ -d tests/runtimes ] - then - cp -r tests/runtimes/. ./runtimes/ - fi - # Other methods may be used to override runtimes, whatever - # method is used should result in the runtimes being stored in - # the "runtimes" - - name: Cache Runtimes - uses: actions/cache@v3 - with: - path: runtimes - key: runtimes - - lint: - name: Lint - runs-on: [self-hosted, linux, docker] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout dispatcher source - uses: actions/checkout@v3 - with: - path: source - ref: '${{ github.event.pull_request.head.sha }}' - - name: Create Docker Image - run: > - docker build "source/script" - -f "source/script/Dockerfile.ubuntu.build" - -t vpl_build:ubuntu - - name: Lint source in container - run: | - cat <<'EOL' > lint.sh - #!/bin/bash - set -x - set -o pipefail - source/script/test lint | tee lint.log - ret=$? - set +o pipefail - exit $ret - EOL - chmod +x lint.sh - - docker run --rm -v $(pwd):/tmp/work -w /tmp/work \ - vpl_build:ubuntu ./lint.sh - - name: Report - if: success() || failure() - run: | - echo '```' >> $GITHUB_STEP_SUMMARY - cat lint.log >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - hadolint: - name: Hadolint - uses: ./.github/workflows/hadolint.yaml - secrets: inherit - - trivy: - name: Trivy - uses: ./.github/workflows/trivy.yaml - secrets: inherit - - build: - name: Build - strategy: - fail-fast: false - matrix: - os: [windows, linux] - config: [release, debug, experimental-off] - uses: ./.github/workflows/build.yaml - secrets: inherit - with: - OS: ${{ matrix.os }} - CONFIG: ${{ matrix.config }} - ARTIFACT: ${{ matrix.os }}-${{ matrix.config }}-build - - ref-build: - name: Build Reference - needs: [setup] - strategy: - fail-fast: false - matrix: - os: [windows, linux] - config: [release] - reference: ['${{ needs.setup.outputs.refrence_ref }}'] - uses: ./.github/workflows/build.yaml - secrets: inherit - with: - OS: ${{ matrix.os }} - CONFIG: ${{ matrix.config }} - ARTIFACT: ref-${{ matrix.os }}-${{ matrix.config }}-build - REF: ${{ matrix.reference }} - - diff-report: - name: Diff Report - needs: [setup, ref-build, build] - strategy: - fail-fast: false - matrix: - os: [windows, linux] - uses: ./.github/workflows/diff-report.yaml - secrets: inherit - with: - DEST: ${{ matrix.os }}-diff-report - LEFT: ref-${{ matrix.os }}-release-build - RIGHT: ${{ matrix.os }}-release-build - - linux-distro-support: - name: Linux Distro Test - needs: [setup, build] - strategy: - fail-fast: false - matrix: - include: - - distro: rhel - version: 8 - fatal: true - - distro: rhel - version: 9 - fatal: true - - distro: centos - version: 9 - fatal: true - - distro: ubuntu - version: 20.04 - fatal: false - - distro: ubuntu - version: 22.04 - fatal: false - uses: ./.github/workflows/linux-distro-test.yaml - secrets: inherit - with: - DISTRO_FAMILY: ${{ matrix.distro }} - DISTRO_VERSION: ${{ matrix.version }} - TEST_REF: ${{ needs.setup.outputs.test_ref }} - NONFATAL: ${{ !matrix.fatal }} - - acceptance-test: - name: Acceptance Test - needs: [setup, build] - strategy: - fail-fast: false - matrix: - os: [windows, linux] - config: [release, debug] - accelerator: [gen12.5, gen9.5] - uses: ./.github/workflows/acceptance.yaml - secrets: inherit - with: - TEST_REF: ${{ needs.setup.outputs.test_ref }} - OS: ${{ matrix.os }} - CONFIG: ${{ matrix.config }} - GPU_FAMILY: ${{ matrix.accelerator }} - ARTIFACT: ${{ matrix.os }}-${{ matrix.config }}-build - NONFATAL: ${{ matrix.CONFIG == 'debug' && true || false }} - - compiler-settings: - name: Compiler Settings Test - needs: build - strategy: - fail-fast: false - matrix: - os: [windows, linux] - config: [release] - uses: ./.github/workflows/compiler-settings.yaml - secrets: inherit - with: - OS: ${{ matrix.os }} - ARTIFACT: ${{ matrix.os }}-${{ matrix.config }}-build - NONFATAL: ${{ matrix.CONFIG == 'debug' && true || false }} - - ip-leak-scan: - name: IP Leak Scan - uses: ./.github/workflows/ip-leak-scan.yaml - secrets: inherit - - binary-malware-scan: - name: Malware Scan - needs: build - strategy: - fail-fast: false - matrix: - os: [windows, linux] - config: [release] - uses: ./.github/workflows/malware-scan.yaml - secrets: inherit - with: - ARTIFACT: ${{ matrix.os }}-${{ matrix.config }}-build - - source-malware-scan: - name: Malware Scan Source - uses: ./.github/workflows/malware-scan.yaml - secrets: inherit - - vulnerability-scan: - name: Vulnerability Scan - needs: build - uses: ./.github/workflows/vulnerability-scan.yaml - secrets: inherit - - coverity: - name: Coverity Scan - strategy: - fail-fast: false - matrix: - os: [windows, linux] - # WA - not really a dependency, but force this scan to run later in the workflow - # so that other tests can run (and fail) earlier in the CI checks - needs: diff-report - uses: ./.github/workflows/coverity.yaml - secrets: inherit - with: - OS: ${{ matrix.os }} - - dynamic-analysis: - name: Dynamic Analysis - needs: [setup, build] - uses: ./.github/workflows/dynamic-analysis.yaml - secrets: inherit - with: - ARTIFACT: linux-release-build - - summary: - name: Summary - needs: - - ip-leak-scan - - build - - acceptance-test - - compiler-settings - - coverity - - vulnerability-scan - - linux-distro-support - - diff-report - - dynamic-analysis - - binary-malware-scan - - source-malware-scan - uses: ./.github/workflows/release-summary.yaml - secrets: inherit - - required: - name: required - needs: - - trivy - - hadolint - - lint - - build - - linux-distro-support - - acceptance-test - - compiler-settings - - ip-leak-scan - - vulnerability-scan - - coverity - - dynamic-analysis - - binary-malware-scan - - source-malware-scan - runs-on: [self-hosted] - steps: - - name: Required Checks - run: echo Done diff --git a/.github/workflows/compiler-settings.yaml b/.github/workflows/compiler-settings.yaml deleted file mode 100644 index 78dd8ce5..00000000 --- a/.github/workflows/compiler-settings.yaml +++ /dev/null @@ -1,151 +0,0 @@ -name: Complier Settings -on: - workflow_call: - inputs: - OS: - description: 'Operating system' - required: true - type: string - ARTIFACT: - description: 'Artifact to test' - required: true - type: string - NONFATAL: - description: 'Errors are nonfatal' - required: false - default: false - type: boolean - -jobs: - compiler-settings: - runs-on: [self-hosted, "${{ inputs.OS }}"] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - - name: Download package - uses: actions/download-artifact@v3 - with: - name: ${{ inputs.ARTIFACT }} - path: package - - - name: Extract package (Linux) - if: success() && runner.os == 'Linux' - run: unzip package/*.zip -d _install - - - name: Extract package (Windows) - if: success() && runner.os == 'Windows' - run: Expand-Archive -Force -Path package\*.zip -DestinationPath _install - - - name: Build Docker image (Linux) - if: success() && runner.os == 'Linux' - run: | - USER_ID=$(id -u ${USER}) - GROUP_ID=$(id -g ${USER}) - # Dockerfile will contain everything between <Dockerfile <action.sh <Dockerfile < /etc/profile.d/entrypoint.sh \ - && echo ". /opt/rh/devtoolset-9/enable" >> /etc/profile.d/entrypoint.sh \ - # && echo "exec \"\$@\"" >> /tmp/entrypoint.sh \ - && chmod 755 /etc/profile.d/entrypoint.sh - - ENV BASH_ENV /etc/profile.d/entrypoint.sh - - # Enable dev tools when using this image - #ENTRYPOINT ["/tmp/entrypoint.sh"] - RUN groupadd -g \$GROUP_ID user || true - RUN useradd --disabled-password --gecos "" --uid \$USER_ID --gid \$GROUP_ID user || true - - FROM maven:3-jdk-11 AS coverity_install - SHELL ["/bin/bash", "-xo", "pipefail", "-c"] - ARG COV_ANALYSIS_LINUX_URL - ARG COV_REPORTS_LINUX_URL - ARG COV_LICENSE_URL - RUN mkdir /opt/coverity - RUN curl --silent --show-error -o /tmp/cov-analysis-linux64.sh -k \${COV_ANALYSIS_LINUX_URL} - RUN curl --silent --show-error -o /tmp/cov-reports-linux64.sh -k \${COV_REPORTS_LINUX_URL} - RUN curl --silent --show-error -o /opt/coverity/license.dat -k \${COV_LICENSE_URL} - run chmod 777 /tmp/*.sh - RUN /tmp/cov-reports-linux64.sh -q \ - --installation.dir=/opt/coverity/reports/ - RUN /tmp/cov-analysis-linux64.sh -q \ - --installation.dir=/opt/coverity/analysis/ \ - --license.agreement=agree --license.region=0 --license.type.choice=0 --license.cov.path=/opt/coverity/license.dat \ - --component.sdk=false --component.skip.documentation=true - - FROM buildenv - RUN yum install -y fontconfig - ENV PATH="/opt/coverity/analysis/bin:/opt/coverity/reports/bin:\${PATH}" - COPY --from=coverity_install /opt/coverity /opt/coverity - - EOL - docker build -t vplcoverity:github_runner . \ - --build-arg http_proxy=$http_proxy \ - --build-arg https_proxy=$https_proxy \ - --build-arg no_proxy=$no_proxy \ - --build-arg "COV_ANALYSIS_LINUX_URL=${{ vars.COV_ANALYSIS_LINUX_URL }}" \ - --build-arg "COV_REPORTS_LINUX_URL=${{ vars.COV_REPORTS_LINUX_URL }}" \ - --build-arg "COV_LICENSE_URL=${{ vars.COV_LICENSE_URL }}" - - - name: Write Script (Linux) - if: success() && runner.os == 'Linux' - run: | - cat > action.sh <Dockerfile <&1 | tee results_log.txt - EOL - chmod a+x ./source/.github/workflows/dynamic_analysis/test.sh - chmod a+x ./test.sh - docker run --rm -v $(pwd):/tmp/work \ - -e ONEVPL_PRIORITY_PATH=/tmp/work/runtimes \ - -w /tmp/work --privileged vplgputestenv:github_runner /tmp/work/test.sh - - - name: Upload test results - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: ${{ inputs.ARTIFACT }}-dynamic-analysis - path: results_* - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - \ No newline at end of file diff --git a/.github/workflows/dynamic_analysis/default_suppressions.txt b/.github/workflows/dynamic_analysis/default_suppressions.txt deleted file mode 100644 index 6d3f56cd..00000000 --- a/.github/workflows/dynamic_analysis/default_suppressions.txt +++ /dev/null @@ -1,107 +0,0 @@ -{ - - Memcheck:Addr1 - ... - fun:va* -} -{ - - Memcheck:Addr1 - ... - fun:main -} -{ - - Memcheck:Addr2 - ... - fun:va* -} -{ - - Memcheck:Addr4 - ... - fun:va* -} -{ - - Memcheck:Addr8 - ... - fun:va* -} -{ - - Memcheck:Addr16 - ... - fun:va* -} - - -{ - - Memcheck:Addr1 - ... - obj:*/libmfx* -} -{ - - Memcheck:Addr2 - ... - obj:*/libmfx* -} -{ - - Memcheck:Addr4 - ... - obj:*/libmfx* -} -{ - - Memcheck:Addr8 - ... - obj:*/libmfx* -} -{ - - Memcheck:Addr16 - ... - obj:*/libmfx* -} - -{ - - Memcheck:Addr1 - ... - obj:*/iHD* -} -{ - - Memcheck:Addr2 - ... - obj:*/iHD* -} -{ - - Memcheck:Addr4 - ... - obj:*/iHD* -} -{ - - Memcheck:Addr8 - ... - obj:*/iHD* -} -{ - - Memcheck:Addr16 - ... - obj:*/iHD* -} - -{ - - Memcheck:Cond - ... - obj:*/libmfx* -} - diff --git a/.github/workflows/dynamic_analysis/test.sh b/.github/workflows/dynamic_analysis/test.sh deleted file mode 100644 index 43f6e5ec..00000000 --- a/.github/workflows/dynamic_analysis/test.sh +++ /dev/null @@ -1,72 +0,0 @@ -#!/bin/bash -############################################################################### -# Copyright (C) Intel Corporation -# -# SPDX-License-Identifier: MIT -############################################################################### -## start of boilerplate to switch to project root ------------------------------ -script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" -proj_dir="$( dirname "${script_dir}" )" -cd ${script_dir} - -HOST_OS='linux' -UNAME=$( command -v uname) - -case $( "${UNAME}" | tr '[:upper:]' '[:lower:]') in - msys*|cygwin*|mingw*) - # MinGW/MSys type Bash on windows - HOST_OS='windows' - ;; -esac -## start of commands ----------------------------------------------------------- -RETURNCODE=0 - -export TOOL="valgrind" - -INDEX=0 -RETURNCODE=0 -SUPPRESSIONS=${script_dir}/default_suppressions.txt - -TOOL_OPTS="--tool=memcheck --leak-check=yes --show-leak-kinds=all --num-callers=50 --undef-value-errors=no --errors-for-leak-kinds=definite,indirect,possible --error-exitcode=97 --suppressions=${SUPPRESSIONS}" -while IFS='' read -r COMMAND || [[ -n "${COMMAND}" ]]; do - if [[ ${COMMAND} =~ ^#.* ]]; then - # skip comments - continue - fi - TEST_NAME="${COMMAND}" - echo "Started ${TEST_NAME}" - - echo "" - echo "Test# $INDEX:" - echo "${COMMAND}" - ${COMMAND} - ERR_CODE=$? - if [[ "$ERR_CODE" -ne 0 ]]; then - echo "${TEST_NAME} reported error ${ERR_CODE}" - echo "Finished ${TEST_NAME}" - continue - fi - - echo "" - echo "${TOOL} ${COMMAND}" - ${TOOL} ${TOOL_OPTS} ${COMMAND} - ERR_CODE=$? - if [[ "$ERR_CODE" -eq 1 ]]; then - echo "Dynamic analysis of ${TEST_NAME} reported error ${ERR_CODE}" - echo "Fatal dynamic analysis error" - echo "Exiting..." - RETURNCODE=1 - break - fi - if [[ "$ERR_CODE" -eq 97 ]]; then - echo "Dynamic analysis of ${TEST_NAME} reported error ${ERR_CODE}" - echo "Result: FAIL" - RETURNCODE=1 - else - echo "Result: PASS" - fi - - ((INDEX++)) -done < ${script_dir}/tests.txt - -exit $RETURNCODE diff --git a/.github/workflows/dynamic_analysis/tests.txt b/.github/workflows/dynamic_analysis/tests.txt deleted file mode 100644 index de7a713e..00000000 --- a/.github/workflows/dynamic_analysis/tests.txt +++ /dev/null @@ -1,10 +0,0 @@ -vpl-inspect -sample_decode h265 -i $CONTENT/cars_320x240.h265 -hw -sample_decode h265 -i $CONTENT/cars_320x240.h265 -hw -vaapi -sample_decode h265 -i $CONTENT/cars_320x240.h265 -hw -vaapi -AdapterNum 0 -sample_encode h265 -i $CONTENT/cars_320x240.i420 -hw -w 320 -h 240 -sample_encode h265 -i $CONTENT/cars_320x240.i420 -hw -w 320 -h 240 -vaapi -sample_encode h265 -i $CONTENT/cars_320x240.i420 -hw -w 320 -h 240 -vaapi -AdapterNum 0 -sample_vpp -sw 320 -sh 240 -scc i420 -dw 640 -dh 480 -dcc rgb4 -i $CONTENT/cars_320x240.i420 -o out.rgb4 -lib hw -sample_vpp -sw 320 -sh 240 -scc i420 -dw 640 -dh 480 -dcc rgb4 -i $CONTENT/cars_320x240.i420 -o out.rgb4 -lib hw -vaapi -sample_vpp -sw 320 -sh 240 -scc i420 -dw 640 -dh 480 -dcc rgb4 -i $CONTENT/cars_320x240.i420 -o out.rgb4 -lib hw -vaapi -AdapterNum 0 diff --git a/.github/workflows/hadolint.yaml b/.github/workflows/hadolint.yaml deleted file mode 100644 index 98a6e10f..00000000 --- a/.github/workflows/hadolint.yaml +++ /dev/null @@ -1,78 +0,0 @@ -name: Hadolint -run-name: Hadolint (Triggered by ${{ github.event_name }} by @${{ github.actor }}) -on: - workflow_call: - -jobs: - scan: - runs-on: [self-hosted, linux, docker] - steps: - - name: Cleanup workspace - run: sudo rm -rf ..?* .[!.]* * - - - name: Checkout dispatcher source - uses: actions/checkout@v3 - with: - path: source - - - name: Pull docker image - run: docker pull hadolint/hadolint - - - name: Lint - run: | - mkdir artifact - echo "Hadolint Report" > artifact/hadolint.txt - walk_dir () { - shopt -s nullglob dotglob - - for pathname in "$1"/*; do - retVal=0 - if [ -d "$pathname" ]; then - walk_dir "$pathname" || retVal=$? - if [ $retVal -ne 0 ]; then - RC=$retVal - fi - else - case "$pathname" in - *Dockerfile*|*dockerfile*) - echo "Checking $pathname" - echo "" >> artifact/hadolint.txt - echo " $pathname" >> artifact/hadolint.txt - echo "----------" >> artifact/hadolint.txt - docker run --rm \ - -i --attach stderr --attach stdout \ - -v $(pwd)/source:/source \ - -w /source \ - hadolint/hadolint < $pathname 2>&1 >> artifact/hadolint.txt || retVal=$? - if [ $retVal -ne 0 ]; then - RC=$retVal - fi - esac - fi - done - return $RC - } - walk_dir "$(pwd)/source" - - - name: Summarize - if: (failure()) - run: | - echo '```' >> $GITHUB_STEP_SUMMARY - cat artifact/hadolint.txt >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - - name: Report - if: (success() || failure()) - run: | - cat artifact/hadolint.txt - - - name: Record Artifacts - uses: actions/upload-artifact@v3 - if: (success() || failure()) - with: - name: hadolint - path: artifact/* - - - name: Cleanup workspace - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * diff --git a/.github/workflows/ip-leak-scan.yaml b/.github/workflows/ip-leak-scan.yaml deleted file mode 100644 index 012eb1c9..00000000 --- a/.github/workflows/ip-leak-scan.yaml +++ /dev/null @@ -1,127 +0,0 @@ -name: IP Leak Scan -run-name: IP Leak Scan (Triggered by ${{ github.event_name }} by @${{ github.actor }}) -on: - workflow_dispatch: - # allow direct call to support testing updates to disposition DB - inputs: - DB_REF: - description: 'The branch, tag or SHA to get DB from' - default: ipldt - type: string - workflow_call: - inputs: - DB_REF: - description: 'The branch, tag or SHA to get DB from' - required: false - type: string - -jobs: - ip-leak-scan: - runs-on: [self-hosted, Linux, docker] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout dispatcher source - uses: actions/checkout@v3 - with: - path: source - - name: Check Out Dispositions - uses: actions/checkout@v3 - with: - path: db - ref: ${{ inputs.DB_REF || 'ipldt' }} - - name: Write Docker File - run: | - USER_ID=$(id -u ${USER}) - GROUP_ID=$(id -g ${USER}) - IPLDB_TOOL_URL=${{ vars.IPLDB_TOOL_URL }} - # Dockerfile will contain everything between < Dockerfile - FROM ubuntu:20.04 - ARG IPLDB_TOOL_URL - ARG http_proxy - ARG https_proxy - ARG no_proxy - ARG USER_ID - ARG GROUP_ID - # Suppress oddball issues with interactive apps - ARG DEBIAN_FRONTEND=noninteractive - # map in any proxy settings from the environment - USER root - ENV http_proxy ${http_proxy} - ENV https_proxy ${https_proxy} - ENV no_proxy ${no_proxy} - ENV DEBIAN_FRONTEND=noninteractive - - RUN apt-get update && \ - apt-get install -y -q --no-install-recommends \ - curl \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists/* - # Main environment configuration - RUN apt-get update && apt-get install -y --no-install-recommends curl \ - && addgroup --gid $GROUP_ID user \ - && adduser --disabled-password --gecos '' --uid $USER_ID --gid $GROUP_ID user - RUN mkdir -p /tmp - RUN mkdir -p /opt - RUN curl -s -o /tmp/ipldt3_lin_intel64.tgz -k $IPLDB_TOOL_URL - RUN tar -xzvf /tmp/ipldt3_lin_intel64.tgz -C /opt - RUN ls /opt - RUN ls /opt/ipldt3_lin_intel64 - RUN rm -rf /tmp/ipldt3_lin_intel64.tgz - USER user - EOL - docker build -t ipldt:github_runner . \ - --build-arg "IPLDB_TOOL_URL=$IPLDB_TOOL_URL" \ - --build-arg "USER_ID=${USER_ID}" \ - --build-arg "GROUP_ID=${GROUP_ID}" \ - --build-arg "http_proxy=${http_proxy}" \ - --build-arg "https_proxy=${https_proxy}" \ - --build-arg "no_proxy=${no_proxy}" - - name: Package Source - run: | - pushd source - git archive HEAD -o ../source.zip - popd - - name: Write Script - run: | - cat <<'EOL' > action.sh - #!/bin/bash - set -x - mkdir -p _logs/ip-leak-scan - set +o errexit - set -o pipefail - /opt/ipldt3_lin_intel64/ipldt3_lin_intel64 \ - -i source.zip \ - -c 37 \ - --usedb db/ipldt_results.ip.db \ - --usedb db/ipldt_results.ipevt.db \ - --usedb db/ipldt_results.tm.db \ - --db-rel-path \ - --gendb _logs/ip-leak-scan/hits-linux.db \ - --r-overwrite --r _logs/ip-leak-scan | tee /working/_logs/ipldt.txt - ret=$? - set +o pipefail - exit $ret - EOL - chmod +x action.sh - - name: Run Test - continue-on-error: false - run: | - docker run --rm -v $PWD:/working -w/working ipldt:github_runner ./action.sh - - name: Report - if: success() || failure() - run: | - echo '```' >> $GITHUB_STEP_SUMMARY - cat _logs/ip-leak-scan/ipldt_results.txt >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - name: Record Artifacts - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: ip-leak-scan - path: _logs/ip-leak-scan - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * diff --git a/.github/workflows/linux-distro-test.yaml b/.github/workflows/linux-distro-test.yaml deleted file mode 100644 index b723d3f1..00000000 --- a/.github/workflows/linux-distro-test.yaml +++ /dev/null @@ -1,109 +0,0 @@ -name: Test on Supported Linux OSs -run-name: Test on Supported Linux OSs (Triggered by ${{ github.event_name }} by @${{ github.actor }}) -on: - workflow_call: - inputs: - TEST_REF: - description: 'The branch, tag or SHA to find tests on' - required: false - default: '' - type: string - DISTRO_FAMILY: - description: 'Distribution family' - required: true - type: string - DISTRO_VERSION: - description: 'Distribution version' - required: true - type: string - NONFATAL: - description: 'Errors are nonfatal' - required: false - default: false - type: boolean - -jobs: - test: - runs-on: [self-hosted, Linux, docker, gen12.5] - continue-on-error: ${{ inputs.NONFATAL }} - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Load runtime overrides - uses: actions/cache/restore@v3 - with: - path: runtimes - key: runtimes - - - name: Checkout tests - uses: actions/checkout@v3 - id: checkout-tests-from-same-branch - continue-on-error: true - with: - repository: ${{ vars.TEST_REPO }} - token: ${{ secrets.TEST_REPO_TOKEN }} - path: tests - ref: ${{ inputs.TEST_REF }} - - - name: Download package - uses: actions/download-artifact@v3 - with: - name: linux-release-build - path: package - - name: Extract package - run: unzip package/*.zip -d _install - - - name: Build and Run Docker image - run: | - cd tests/platform/docker - DISTRO=${{ inputs.DISTRO_FAMILY }}${{ inputs.DISTRO_VERSION }} - docker build -f Dockerfile-$DISTRO -t vpl_distro:$DISTRO . - - - name: Run tests - continue-on-error: ${{ inputs.NONFATAL }} - run: | - DISTRO=${{ inputs.DISTRO_FAMILY }}${{ inputs.DISTRO_VERSION }} - - cat >test.sh < /dev/null; then - unzip product/*.zip -d product - fi - - # Write test file if requested - - name: Write EICAR test file - if: success() && inputs.TEST_AV - run: | - echo 'X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*' > product/eicar-com.com - - # CaaS based testing - - name: Create docker image - if: success() && inputs.CAAS - run: | - docker pull ${{ inputs.CAAS }} - - name: Run Test - if: success() && inputs.CAAS - run: | - docker run -v $(realpath product):/scanme --rm ${{ inputs.CAAS }} >> report.txt - - # Local image based testing - - name: Checkout av scanning tool - if: success() && !inputs.CAAS - uses: actions/checkout@v3 - with: - repository: ${{ vars.AV_TOOL_REPO }} - token: ${{ secrets.TEST_REPO_TOKEN }} - path: av-scanning - ref: master - - name: Create docker image - if: success() && !inputs.CAAS - run: | - pushd av-scanning - sed -i 's|FROM.*ubuntu:latest|FROM public.ecr.aws/lts/ubuntu:22.04|' Dockerfile - docker build -t mcafee:latest . - popd - - name: Run Test - if: success() && !inputs.CAAS - run: | - docker run -v $(realpath product):/scanme --rm mcafee:latest >> report.txt - - # Publish - - name: Upload test results - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: ${{ inputs.ARTIFACT || 'source' }}-malware-scan - path: report.txt - - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - \ No newline at end of file diff --git a/.github/workflows/release-summary.yaml b/.github/workflows/release-summary.yaml deleted file mode 100644 index 002d65d9..00000000 --- a/.github/workflows/release-summary.yaml +++ /dev/null @@ -1,71 +0,0 @@ -name: Release Summary -run-name: Release Summary (Triggered by ${{ github.event_name }} by @${{ github.actor }}) -on: - workflow_call: - -jobs: - summary: - runs-on: [self-hosted, Linux, docker] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Create High Level Structure - run: | - mkdir -p summary - mkdir -p summary/bom/linux - mkdir -p summary/bom/windows - mkdir -p summary/legal/ip_leaks - mkdir -p summary/quality/acceptance/Linux/gen9.5 - mkdir -p summary/quality/acceptance/Linux/gen12.5 - mkdir -p summary/quality/acceptance/Windows/gen9.5 - mkdir -p summary/quality/acceptance/Windows/gen12.5 - mkdir -p summary/quality/operating_systems - mkdir -p summary/quality/unit/Linux - mkdir -p summary/quality/unit/Windows - mkdir -p summary/security/CT7-KnownVulnerabilities - mkdir -p summary/security/CT36-3rdPartyComponents - mkdir -p summary/security/CT37-MalwareScan - mkdir -p summary/security/CT39-StaticAnalysis - mkdir -p summary/security/CT40-Custom - mkdir -p summary/security/T256-CompilerSettings - - - name: Download All Artifacts - uses: actions/download-artifact@v3 - with: - path: artifacts - - - name: Copy and Rename Files - run: | - cp -r artifacts/ip-leak-scan/. summary/legal/ip_leaks/ - cp -r artifacts/linux-diff-report/. summary/bom/linux/ - cp -r artifacts/windows-diff-report/. summary/bom/windows/ - cp -r artifacts/windows-release-acceptance-gen12.5/. summary/quality/acceptance/Windows/gen12.5/ - cp -r artifacts/windows-release-acceptance-gen9.5/. summary/quality/acceptance/Windows/gen9.5/ - cp -r artifacts/linux-release-acceptance-gen12.5/. summary/quality/acceptance/Linux/gen12.5/ - cp -r artifacts/linux-release-acceptance-gen9.5/. summary/quality/acceptance/Linux/gen9.5/ - cp -r artifacts/windows-release-build-unit-tests/. summary/quality/unit/Windows/ - cp -r artifacts/linux-release-build-unit-tests/. summary/quality/unit/Linux/ - cp -r artifacts/windows-complier_settings/. summary/security/T256-CompilerSettings/ - cp -r artifacts/linux-complier_settings/. summary/security/T256-CompilerSettings/ - cp -r artifacts/distro-*/. summary/quality/operating_systems/ - cp -r artifacts/linux-release-build-dynamic-analysis/. summary/security/CT40-Custom/ - - cp artifacts/linux-coverity-scan/json/errors_v9_full.json summary/security/CT39-StaticAnalysis/linux-coverity.json - cp artifacts/windows-coverity-scan/json/errors_v9_full.json summary/security/CT39-StaticAnalysis/windows-coverity.json - cp artifacts/vpl-vulnerability-scan/vulns.csv summary/security/CT7-KnownVulnerabilities/vulns.csv - cp artifacts/vpl-vulnerability-scan/components.csv summary/security/CT36-3rdPartyComponents/components.csv - cp artifacts/source-malware-scan/report.txt summary/security/CT37-MalwareScan/source-report.txt - cp artifacts/windows-release-build-malware-scan/report.txt summary/security/CT37-MalwareScan/windows-build-report.txt - cp artifacts/linux-release-build-malware-scan/report.txt summary/security/CT37-MalwareScan/linux-build-report.txt - - - name: Upload Summary - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: source-code-release-summary - path: summary/* - - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * diff --git a/.github/workflows/run_ci_checks.yaml b/.github/workflows/run_ci_checks.yaml new file mode 100644 index 00000000..30c59f16 --- /dev/null +++ b/.github/workflows/run_ci_checks.yaml @@ -0,0 +1,15 @@ +--- +name: Run CI checks +on: + pull_request: + types: "**" + branches: "**" + pull_request_review: + types: "**" + branches: "**" +permissions: read-all +jobs: + TriggerWorkfows: + uses: projectceladon/celadonworkflows/.github/workflows/trigger_ci.yml@v1.0 + with: + EVENT: ${{ toJSON(github.event) }} \ No newline at end of file diff --git a/.github/workflows/specification.yaml b/.github/workflows/specification.yaml deleted file mode 100644 index 4040c4d7..00000000 --- a/.github/workflows/specification.yaml +++ /dev/null @@ -1,498 +0,0 @@ -name: Specification - -on: - pull_request: - # Run on pull requests - # Trigger only when spec files have changed - paths: - - .github/workflows/** - - api/** - - doc/spec/** - workflow_dispatch: - # Run on user request - -jobs: - linux_check_structure_pack: - name: Linux Check Structure Pack - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Run check - run: python3 ./api/tests/abi/check_pack.py -i ./api/vpl - - linux_missed_tests: - name: Linux Missed tests - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.clang - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t clang_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - clang_img python3 api/tests/abi/check_missing_structs.py - -i api/vpl - -t api/tests/abi/mfx_static_assert_structs.cpp - - linux_compile_c: - name: Linux Compile C - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Run ls - run: ls - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -I./api/vpl ./api/tests/compile_headers/test.c - - linux_compile_c_exp: - name: Linux Compile C Experimental - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Run ls - run: ls - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -I./api/vpl -DONEVPL_EXPERIMENTAL ./api/tests/compile_headers/test.c -o ./api/tests/abi/abi_test - - linux_compile_cpp: - name: Linux Compile C++ - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Run ls - run: ls - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -I./api/vpl ./api/tests/compile_headers/test.cpp - - linux_compile_cpp_exp: - name: Linux Compile C++ Experimental - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Run ls - run: ls - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -I./api/vpl -DONEVPL_EXPERIMENTAL ./api/tests/compile_headers/test.cpp - - linux_compile_abi_64: - name: Linux Static Assert Test - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -D _x86_64 -m64 -I./api/vpl ./api/tests/abi/mfx_static_assert_structs.cpp - - name: Execute - run: ./a.out - - linux_compile_abi_64_exp: - name: Linux Static Assert Test Experimental - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -D _x86_64 -m64 -I./api/vpl -DONEVPL_EXPERIMENTAL ./api/tests/abi/mfx_static_assert_structs.cpp - - name: Execute - run: ./a.out - - linux_compile_snippets_c: - name: Linux Compile Snippets C - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img gcc -Wall -Wextra -Wno-unused-function -Werror -I./api/vpl -DONEVPL_EXPERIMENTAL - ./api/tests/doc_examples/dummy_impl.c - ./doc/spec/source/snippets/appnd_b.c ./doc/spec/source/snippets/prg_decoding.c - ./doc/spec/source/snippets/prg_encoding.c ./doc/spec/source/snippets/prg_err.c - ./doc/spec/source/snippets/prg_mem.c ./doc/spec/source/snippets/prg_transcoding.c - ./doc/spec/source/snippets/prg_vpp.c ./doc/spec/source/snippets/appnd_e.c - ./api/tests/doc_examples/main.c - ./doc/spec/source/snippets/prg_decoding_vpp.c - ./doc/spec/source/snippets/prg_disp.c - -ldl -lva -lva-drm - - linux_compile_snippets_cpp: - name: Linux Compile Snippets C++ - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: > - docker build -f ./api/tests/docker/Dockerfile.gcc - --build-arg http_proxy=$http_proxy - --build-arg https_proxy=$https_proxy - -t gcc_img . - - name: Docker run - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/data - --user $(id -u):$(id -g) - gcc_img g++ -Wall -Wextra -Wno-unused-function -Werror -I./api/vpl -DONEVPL_EXPERIMENTAL -DMFX_DEPRECATED_OFF - ./api/tests/doc_examples/dummy_impl.c - ./doc/spec/source/snippets/prg_hw.cpp ./doc/spec/source/snippets/prg_session.cpp - ./doc/spec/source/snippets/prg_surface_sharing.cpp - ./doc/spec/source/snippets/prg_config.cpp - ./api/tests/doc_examples/main.c - - windows_compile_c: - name: Windows Compile C - runs-on: [self-hosted, windows] - steps: - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - name: Checkout source code - uses: actions/checkout@v1 - - name: Setup VisualStudio build enviroment - uses: intel-innersource/frameworks.actions.thirdparty.msvc-dev-cmd@v1 - - name: Build with VisualStudio - run: > - cl.exe /EHsc /I .\api\vpl - /D ONEVPL_EXPERIMENTAL - .\api\tests\compile_headers\test.c - - windows_compile_cpp: - name: Windows Compile CPP - runs-on: [self-hosted, windows] - steps: - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - name: Checkout source code - uses: actions/checkout@v1 - - name: Setup VisualStudio build enviroment - uses: intel-innersource/frameworks.actions.thirdparty.msvc-dev-cmd@v1 - - name: Build with VisualStudio - run: > - cl.exe /EHsc /I .\api\vpl - /D ONEVPL_EXPERIMENTAL - .\api\tests\compile_headers\test.cpp - - windows_compile_abi_64: - name: Windows Static Assert Test (64-bit) - runs-on: [self-hosted, windows] - steps: - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - name: Checkout source code - uses: actions/checkout@v1 - - name: Setup VisualStudio build enviroment - uses: intel-innersource/frameworks.actions.thirdparty.msvc-dev-cmd@v1 - - name: Build with experimental (x64) - run: > - cl.exe /EHsc /I .\api\vpl - /D ONEVPL_EXPERIMENTAL /D _x86_64 - .\api\tests\abi\mfx_static_assert_structs.cpp - - name: Execute - run: .\mfx_static_assert_structs.exe - - name: Build without experimental (x64) - run: > - cl.exe /EHsc /I .\api\vpl - /D _x86_64 - .\api\tests\abi\mfx_static_assert_structs.cpp - - name: Execute - run: .\mfx_static_assert_structs.exe - - windows_compile_abi_32: - name: Windows Static Assert Test (32-bit) - runs-on: [self-hosted, windows] - steps: - - name: Cleanup workspace (Windows) - if: always() && runner.os == 'Windows' - run: Remove-Item -Recurse -Force .\* - - name: Checkout source code - uses: actions/checkout@v1 - - name: Setup VisualStudio build enviroment - uses: intel-innersource/frameworks.actions.thirdparty.msvc-dev-cmd@v1 - with: - arch: x86 - - name: Build with experimental (x86) - run: > - cl.exe /EHsc /I .\api\vpl /D _x86 - /D ONEVPL_EXPERIMENTAL - .\api\tests\abi\mfx_static_assert_structs.cpp - - name: Execute - run: .\mfx_static_assert_structs.exe - - name: Build without experimental (x86) - run: > - cl.exe /EHsc /I .\api\vpl /D _x86 - .\api\tests\abi\mfx_static_assert_structs.cpp - - name: Execute - run: .\mfx_static_assert_structs.exe - - required_checks: - name: Verify Required Checks - needs: - - linux_check_structure_pack - - linux_missed_tests - - linux_compile_c - - linux_compile_c_exp - - linux_compile_cpp - - linux_compile_cpp_exp - - linux_compile_abi_64 - - linux_compile_abi_64_exp - - linux_compile_snippets_c - - linux_compile_snippets_cpp - - windows_compile_abi_64 - - windows_compile_abi_32 - - windows_compile_c - - windows_compile_cpp - runs-on: [self-hosted] - steps: - - name: Required Checks - run: echo Done - - html: - name: HTML - needs: - - required_checks - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: python3 ./doc/spec/build-spec.py dockerbuild - - name: Build spec - HTML - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/build - --workdir=/build - --user $(id -u):$(id -g) - vpl-spec - python3 ./doc/spec/build-spec.py -W html ./doc/spec - - name: Archive HTML - uses: actions/upload-artifact@v3 - with: - name: HTML - path: ./doc/spec/build/html/ - retention-days: 28 - - pdf: - name: PDF - needs: - - required_checks - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - name: Checkout source - uses: actions/checkout@v3 - - name: Docker build - run: python3 ./doc/spec/build-spec.py dockerbuild - - name: Build spec - PDF - run: > - docker run --rm - -e http_proxy=$http_proxy - -e https_proxy=$https_proxy - --volume=$(pwd):/build - --workdir=/build - --user $(id -u):$(id -g) - vpl-spec - python3 ./doc/spec/build-spec.py -W latexpdf ./doc/spec - - name: Archive pdf - uses: actions/upload-artifact@v3 - with: - name: pdf - path: ./doc/spec/build/latex/ - retention-days: 28 - - # if branch name is in the format "specification/vX.Y" then use the basename (vX.Y) - # as the github pages subdirectory to publish into - get_branch_basename: - name: Get branch basename - runs-on: [self-hosted, linux] - outputs: - branch_name: ${{ steps.get_branch_name.outputs.branch_name }} - is_spec_branch: ${{ steps.get_branch_name.outputs.is_spec_branch}} - steps: - - name: Parse name - id: get_branch_name - run: | - if [[ ${{ github.ref }} =~ ^refs/heads/specification/v[0-9]+\.[0-9]+$ ]]; then - echo "Specification branch matched" - echo "${GITHUB_REF#refs/heads/specification/*}" - base_name="${GITHUB_REF#refs/heads/specification/*}" - echo "branch_name=$base_name" >> $GITHUB_OUTPUT - echo "is_spec_branch=true" >> $GITHUB_OUTPUT - else - echo "Specification branch not matched" - echo "${GITHUB_REF#refs/heads/*}" - base_name="${GITHUB_REF#refs/heads/*}" - echo "branch_name=$base_name" >> $GITHUB_OUTPUT - echo "is_spec_branch=false" >> $GITHUB_OUTPUT - fi - - pages: - name: Deploy HTML - needs: - - html - - get_branch_basename - if: (github.ref == 'refs/heads/main' || needs.get_branch_basename.outputs.is_spec_branch == 'true') - runs-on: [self-hosted, linux] - steps: - - name: Download HTML - uses: actions/download-artifact@v3 - with: - name: HTML - path: ./html_files - - name: Move html files to public - run: mv ./html_files ./public - - name: Deploy to latest - if: (github.ref == 'refs/heads/main') - uses: intel-innersource/frameworks.actions.thirdparty.actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./public - destination_dir: latest - user_name: 'github-actions[bot]' - user_email: 'github-actions[bot]@users.noreply.github.com' - - name: Deploy to version subdirectory - if: (needs.get_branch_basename.outputs.is_spec_branch == 'true') - uses: intel-innersource/frameworks.actions.thirdparty.actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - publish_dir: ./public - destination_dir: ${{needs.get_branch_basename.outputs.branch_name}} - user_name: 'github-actions[bot]' - user_email: 'github-actions[bot]@users.noreply.github.com' diff --git a/.github/workflows/trivy.yaml b/.github/workflows/trivy.yaml deleted file mode 100644 index e3e751f0..00000000 --- a/.github/workflows/trivy.yaml +++ /dev/null @@ -1,55 +0,0 @@ -name: Trivy -run-name: Trivy (Triggered by ${{ github.event_name }} by @${{ github.actor }}) -on: - workflow_call: - -jobs: - scan: - runs-on: [self-hosted, linux, docker] - steps: - - name: Cleanup workspace - run: sudo rm -rf ..?* .[!.]* * - - - name: Checkout dispatcher source - uses: actions/checkout@v3 - with: - path: source - - - name: Pull docker image - run: docker pull aquasec/trivy:0.47.0 - - - name: Scan - run: | - mkdir artifact - echo "Trivy Report" > artifact/trivy.txt - docker run \ - -v /var/run/docker.sock:/var/run/docker.sock \ - -v $HOME/Library/Caches:/root/.cache/ \ - -v $(pwd):/work \ - -w /work \ - --attach stderr --attach stdout \ - aquasec/trivy:0.47.0 \ - fs . 2>&1 >> artifact/trivy.txt - - - name: Summarize - if: (failure()) - run: | - echo '```' >> $GITHUB_STEP_SUMMARY - cat artifact/trivy.txt >> $GITHUB_STEP_SUMMARY - echo '```' >> $GITHUB_STEP_SUMMARY - - - name: Report - if: (success() || failure()) - run: | - cat artifact/trivy.txt - - - name: Record Artifacts - uses: actions/upload-artifact@v3 - if: (success() || failure()) - with: - name: Trivy - path: artifact/* - - - name: Cleanup workspace - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * diff --git a/.github/workflows/vulnerability-scan.yaml b/.github/workflows/vulnerability-scan.yaml deleted file mode 100644 index 8594f9b0..00000000 --- a/.github/workflows/vulnerability-scan.yaml +++ /dev/null @@ -1,79 +0,0 @@ -name: Vulnerability Scan -on: - workflow_call: - -jobs: - vulnerability-scan: - name: combined - runs-on: [self-hosted, linux] - steps: - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* * - - - name: Checkout infrastructure - uses: actions/checkout@v3 - with: - repository: ${{ vars.INFRA_REPO }} - token: ${{ secrets.INFRA_REPO_TOKEN }} - path: infrastructure - ref: main - - - name: Download linux-release-build - uses: actions/download-artifact@v3 - with: - name: linux-release-build - path: bdba - - - name: Download windows-release-build - uses: actions/download-artifact@v3 - with: - name: windows-release-build - path: bdba - - - name: Create archive to scan - run: | - cp infrastructure/config/.bdba.yaml bdba/ - pushd bdba - zip --symlinks -r ../vpl-release.zip . - popd - - - name: Build Docker image for scanning - run: | - cat >Dockerfile << EOL - FROM ubuntu:22.04 - - RUN apt-get update && apt-get install -y --no-install-recommends \ - python3-venv \ - && \ - rm -rf /var/lib/apt/lists/* - EOL - - docker build . -t vpl_pythonenv:ubuntu - - - name: Scan package in container - run: | - cat >scan.sh <results.json - - EOL - chmod a+x scan.sh - docker run --rm -v $(pwd):/tmp/work -w /tmp/work vpl_pythonenv:ubuntu ./scan.sh - - - name: Upload scan results - uses: actions/upload-artifact@v3 - if: success() || failure() - with: - name: vpl-vulnerability-scan - path: | - *.csv - *.json - *.pdf - - name: Cleanup workspace (Linux) - if: always() && runner.os == 'Linux' - run: sudo rm -rf ..?* .[!.]* *