Skip to content

Add llama.cpp backend #533

Add llama.cpp backend

Add llama.cpp backend #533

name: CLI CPU Pytorch tests
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/workflows/test_cli_cpu_pytorch.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
pull_request:
branches:
- main
paths:
- .github/workflows/test_cli_cpu_pytorch.yaml
- "optimum_benchmark/**"
- "docker/**"
- "tests/**"
- "setup.py"
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
run_cli_cpu_pytorch_tests:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python 3.10
uses: actions/setup-python@v3
with:
python-version: "3.10"
- name: Install requirements
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[testing,diffusers,timm,peft]
- name: Run tests
run: pytest -s -k "cli and cpu and pytorch"