Skip to content

Commit

Permalink
add testing for example folder
Browse files Browse the repository at this point in the history
  • Loading branch information
baptistecolle committed Oct 9, 2024
1 parent d58bb25 commit 24c77f9
Show file tree
Hide file tree
Showing 13 changed files with 249 additions and 26 deletions.
Empty file.
Empty file.
Empty file.
Empty file.
Empty file.
Empty file.
51 changes: 51 additions & 0 deletions .github/workflows/test_example_cuda_pytorch.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
name: Examples Folder CUDA Pytorch Tests

on:
workflow_dispatch:
push:
branches:
- main
pull_request:
branches:
- main
types:
- opened
- reopened
- synchronize
- labeled
- unlabeled

concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}

jobs:
run_cli_cuda_pytorch_single_gpu_tests:
if: ${{
(github.event_name == 'push') ||
(github.event_name == 'workflow_dispatch') ||
contains( github.event.pull_request.labels.*.name, 'examples') ||
contains( github.event.pull_request.labels.*.name, 'cuda') ||
contains( github.event.pull_request.labels.*.name, 'pytorch') ||
contains( github.event.pull_request.labels.*.name, 'single_gpu') ||
contains( github.event.pull_request.labels.*.name, 'examples_cuda_pytorch_single_gpu')
}}

runs-on:
group: aws-g5-4xlarge-plus

container:
image: ghcr.io/huggingface/optimum-benchmark:latest-cuda
options: --ipc host --gpus all

steps:
- name: Checkout
uses: actions/checkout@v4

- name: Install dependencies
run: |
pip install -e .[testing]
- name: Run tests
run: |
pytest tests/test_examples_folder.py -x -s -k "cuda and pytorch"
Empty file.
Empty file.
Empty file.
Empty file.
26 changes: 0 additions & 26 deletions examples/pytorch_bert_mps.yaml

This file was deleted.

198 changes: 198 additions & 0 deletions tests/test_examples_folder.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,198 @@
import os
from logging import getLogger
from pathlib import Path

import pytest

from optimum_benchmark.logging_utils import run_subprocess_and_log_stream_output

LOGGER = getLogger("test-example")

EXAMPLES_DIR = Path(__file__).parent.parent / "examples"
OUTPUT_DIR = Path(__file__).parent.parent / "runs"
EXAMPLE_CONFIGS = [f for f in os.listdir(EXAMPLES_DIR) if f.endswith(".yaml") and f != "_base_.yaml"]

# can be run with pytest tests/test_example.py -s -k "cpu and pytorch"
CPU_PYTORCH_EXAMPLE_CONFIGS = [
"pytorch_bert.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cpu and ipex"
CPU_IPEX_EXAMPLE_CONFIGS = [
"ipex_bert.yaml",
"ipex_llama.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cpu and neural-compressor"
CPU_NEURAL_COMPRESSOR_EXAMPLE_CONFIGS = [
"neural_compressor_ptq_bert.yaml",
"numactl_bert.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cpu and onnxruntime"
CPU_ONNXRUNTIME_EXAMPLE_CONFIGS = [
"onnxruntime_static_quant_vit.yaml",
"onnxruntime_timm.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cpu and openvino"
CPU_OPENVINO_EXAMPLE_CONFIGS = [
"openvino_diffusion.yaml",
"openvino_static_quant_bert.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cpu and tei"
CPU_TEI_EXAMPLE_CONFIGS = [
"tei_bge.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cuda and pytorch"
CUDA_PYTORCH_EXAMPLE_CONFIGS = [
"energy_star.yaml",
"pytorch_bert.yaml",
"pytorch_llama.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cuda and tgi"
CUDA_TGI_EXAMPLE_CONFIGS = [
"tgi_llama.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cuda and trt"
CUDA_TRT_EXAMPLE_CONFIGS = [
"trt_llama.yaml",
]

# can be run with pytest tests/test_example.py -s -k "cuda and vllm"
CUDA_VLLM_EXAMPLE_CONFIGS = [
"vllm_llama.yaml",
]

# can be run with pytest tests/test_example.py -s -k "mps and llama_cpp"
MPS_LLAMA_CPP_EXAMPLE_CONFIGS = [
"llama_cpp_embedding.yaml",
"llama_cpp_text_generation.yaml",
]

ALL_CONFIGS = (
CUDA_PYTORCH_EXAMPLE_CONFIGS
+ CPU_PYTORCH_EXAMPLE_CONFIGS
+ CPU_IPEX_EXAMPLE_CONFIGS
+ MPS_LLAMA_CPP_EXAMPLE_CONFIGS
+ CPU_NEURAL_COMPRESSOR_EXAMPLE_CONFIGS
+ CPU_ONNXRUNTIME_EXAMPLE_CONFIGS
+ CPU_OPENVINO_EXAMPLE_CONFIGS
+ CPU_TEI_EXAMPLE_CONFIGS
+ CUDA_TGI_EXAMPLE_CONFIGS
+ CUDA_TRT_EXAMPLE_CONFIGS
+ CUDA_VLLM_EXAMPLE_CONFIGS
)

assert (
set(ALL_CONFIGS) == set(EXAMPLE_CONFIGS)
), f"Please add your new example config to the list of configs in test_example.py for it to be integrated in the CI/CD pipeline.\n" \
f"Difference between ALL_CONFIGS and EXAMPLE_CONFIGS:\n" \
f"In ALL_CONFIGS but not in EXAMPLE_CONFIGS: {set(ALL_CONFIGS) - set(EXAMPLE_CONFIGS)}\n" \
f"In EXAMPLE_CONFIGS but not in ALL_CONFIGS: {set(EXAMPLE_CONFIGS) - set(ALL_CONFIGS)}"


def test_example_configs(config_name):
args = [
"optimum-benchmark",
"--config-dir",
str(EXAMPLES_DIR),
"--config-name",
config_name.split(".")[0],
]

popen = run_subprocess_and_log_stream_output(LOGGER, args)
assert popen.returncode == 0, f"Failed to run {config_name}"

# Check if the benchmark produced any output
output_dir = Path(OUTPUT_DIR) / config_name.split(".")[0]
assert output_dir.exists(), f"No output directory found for {config_name}"

# Check if there's at least one file in the output directory
output_files = list(output_dir.glob("*"))
assert len(output_files) > 0, f"No output files found for {config_name}"


@pytest.mark.cuda
@pytest.mark.pytorch
@pytest.mark.parametrize("config_name", CUDA_PYTORCH_EXAMPLE_CONFIGS)
def test_cuda_pytorch_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cpu
@pytest.mark.pytorch
@pytest.mark.parametrize("config_name", CPU_PYTORCH_EXAMPLE_CONFIGS)
def test_cpu_pytorch_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cpu
@pytest.mark.ipex
@pytest.mark.parametrize("config_name", CPU_IPEX_EXAMPLE_CONFIGS)
def test_cpu_ipex_examples(config_name):
test_example_configs(config_name)


@pytest.mark.mps
@pytest.mark.llama_cpp
@pytest.mark.parametrize("config_name", MPS_LLAMA_CPP_EXAMPLE_CONFIGS)
def test_mps_llama_cpp_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cpu
@pytest.mark.neural_compressor
@pytest.mark.parametrize("config_name", CPU_NEURAL_COMPRESSOR_EXAMPLE_CONFIGS)
def test_cpu_neural_compressor_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cpu
@pytest.mark.onnxruntime
@pytest.mark.parametrize("config_name", CPU_ONNXRUNTIME_EXAMPLE_CONFIGS)
def test_cpu_onnxruntime_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cpu
@pytest.mark.openvino
@pytest.mark.parametrize("config_name", CPU_OPENVINO_EXAMPLE_CONFIGS)
def test_cpu_openvino_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cpu
@pytest.mark.tei
@pytest.mark.parametrize("config_name", CPU_TEI_EXAMPLE_CONFIGS)
def test_cpu_tei_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cuda
@pytest.mark.tgi
@pytest.mark.parametrize("config_name", CUDA_TGI_EXAMPLE_CONFIGS)
def test_cuda_tgi_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cuda
@pytest.mark.trt
@pytest.mark.parametrize("config_name", CUDA_TRT_EXAMPLE_CONFIGS)
def test_cuda_trt_examples(config_name):
test_example_configs(config_name)


@pytest.mark.cuda
@pytest.mark.vllm
@pytest.mark.parametrize("config_name", CUDA_VLLM_EXAMPLE_CONFIGS)
def test_cuda_vllm_examples(config_name):
test_example_configs(config_name)


if __name__ == "__main__":
pytest.main([__file__])

0 comments on commit 24c77f9

Please sign in to comment.