Skip to content

Add llama.cpp backend #365

Add llama.cpp backend

Add llama.cpp backend #365

name: CLI CUDA Torch-ORT Single-GPU Tests
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/workflows/test_cli_cuda_torch_ort_single_gpu.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
pull_request:
branches:
- main
paths:
- .github/workflows/test_cli_cuda_torch_ort_single_gpu.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
env:
IMAGE: ghcr.io/huggingface/optimum-benchmark:latest-cuda-ort
jobs:
build_image_and_run_cli_cuda_torch_ort_single_gpu_tests:
runs-on: [single-gpu, nvidia-gpu, a10, ci]
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Run tests
uses: addnab/docker-run-action@v3
with:
image: ${{ env.IMAGE }}
options: |
--rm
--gpus all
--shm-size 64G
--env MKL_THREADING_LAYER=GNU
--volume ${{ github.workspace }}:/workspace
--workdir /workspace
run: |
pip install -e .[testing,torch-ort,peft]
pytest -x -s -k "cli and cuda and torch_ort and not (dp or ddp or device_map) and not (peft)"