Skip to content

Add llama.cpp backend #669

Add llama.cpp backend

Add llama.cpp backend #669

Workflow file for this run

name: CLI Misc Tests
on:
workflow_dispatch:
push:
branches:
- main
paths:
- .github/workflows/test_cli_misc.yaml
- "optimum_benchmark/**"
- "tests/**"
- "setup.py"
pull_request:
branches:
- main
paths:
- .github/workflows/test_cli_misc.yaml
- "optimum_benchmark/**"
- "tests/**"
- "setup.py"
concurrency:
cancel-in-progress: true
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
jobs:
run_misc_cli_tests:
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest]
python: ["3.8", "3.9", "3.10"]
runs-on: ${{ matrix.os }}
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python }}
- name: Install packages
run: |
sudo apt-get update
sudo apt-get install -y numactl
- name: Install requirements
run: |
pip install --upgrade pip
pip install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu
pip install -e .[testing]
- name: Run tests
run: pytest -s -k "cli and not (cpu or cuda or mps)"