Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

gpu fixes #54

Merged
merged 5 commits into from
Apr 4, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ jobs:
- name: build image
run: make build

- name: test-no-docker
run: make test-no-docker

- name: test
run: make test

Expand Down
5 changes: 5 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -22,5 +22,10 @@ RUN --mount=type=cache,target=/root/.cache/pip \
pip install .[dev]

COPY pitch_detectors /app/pitch_detectors

RUN --mount=type=cache,target=/root/.cache/pip \
pip install --no-deps .

COPY tests /app/tests
COPY scripts/ /app/scripts
COPY data /app/data
6 changes: 6 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ push:
docker push $(IMAGE)
docker push tandav/pitch-detectors:latest

# python -m pitch_detectors.util ld_library_path

.PHONY: test-no-docker
test-no-docker:
/home/tandav/.virtualenvs/pitch-detectors/bin/python -m pytest -c no_docker_pytest.ini -x -v --cov pitch_detectors

.PHONY: test
test: build
docker run --rm -t --gpus all \
Expand Down
4 changes: 4 additions & 0 deletions no_docker_pytest.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
[pytest]
env =
PITCH_DETECTORS_PENN_CHECKPOINT_PATH=/home/tandav/docs/bhairava/libmv/data/fcnf0++.pt
PITCH_DETECTORS_SPICE_MODEL_PATH=/home/tandav/docs/bhairava/libmv/data/spice_model
4 changes: 2 additions & 2 deletions pitch_detectors/algorithms/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,6 @@
PipTrack,
)

cpu_algorithms = tuple(a.name() for a in ALGORITHMS if not a.use_gpu) # type: ignore
gpu_algorithms = tuple(a.name() for a in ALGORITHMS if a.use_gpu) # type: ignore
cpu_algorithms = tuple(a.name() for a in ALGORITHMS if not a.gpu_capable) # type: ignore
gpu_algorithms = tuple(a.name() for a in ALGORITHMS if a.gpu_capable) # type: ignore
algorithms = cpu_algorithms + gpu_algorithms
28 changes: 17 additions & 11 deletions pitch_detectors/algorithms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@


class PitchDetector:
use_gpu = False
gpu_capable = False

def __init__(self, a: np.ndarray, fs: int):
self.a = a
Expand All @@ -24,26 +24,32 @@ def name(cls) -> str:


class UsesGPU:
use_gpu = True
gpu_capable = True
memory_limit_initialized = False

def __init__(self) -> None:
if (
os.environ.get('PITCH_DETECTORS_GPU') == 'true' and
not self.gpu_available()
):
def __init__(self, gpu: bool | None = None) -> None:
self.gpu = gpu or os.environ.get('PITCH_DETECTORS_GPU') == 'true'
if self.gpu and not self.gpu_available():
raise ConnectionError('gpu must be available')
if not self.gpu:
self.disable_gpu()
if self.gpu_available():
raise ConnectionError('gpu must not be available')

def gpu_available(self) -> bool:
return False

def disable_gpu(self) -> None:
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'


class TensorflowGPU(UsesGPU):

def __init__(self) -> None:
def __init__(self, gpu: bool | None = None) -> None:

import tensorflow as tf
self.tf = tf
super().__init__()
super().__init__(gpu)
if self.gpu_available() and os.environ.get('PITCH_DETECTORS_GPU_MEMORY_LIMIT') == 'true':
self.set_memory_limit()

Expand All @@ -65,10 +71,10 @@ def gpu_available(self) -> bool:

class TorchGPU(UsesGPU):

def __init__(self) -> None:
def __init__(self, gpu: bool | None = None) -> None:
import torch
self.torch = torch
super().__init__()
super().__init__(gpu)
if self.gpu_available() and os.environ.get('PITCH_DETECTORS_GPU_MEMORY_LIMIT') == 'true':
self.set_memory_limit()

Expand Down
10 changes: 8 additions & 2 deletions pitch_detectors/algorithms/crepe.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,14 @@
class Crepe(TensorflowGPU, PitchDetector):
"""https://github.com/marl/crepe"""

def __init__(self, a: np.ndarray, fs: int, confidence_threshold: float = 0.8):
TensorflowGPU.__init__(self)
def __init__(
self,
a: np.ndarray,
fs: int,
confidence_threshold: float = 0.8,
gpu: bool | None = None,
):
TensorflowGPU.__init__(self, gpu)
PitchDetector.__init__(self, a, fs)
import crepe

Expand Down
9 changes: 7 additions & 2 deletions pitch_detectors/algorithms/ensemble.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,13 +4,15 @@
import numpy as np

from pitch_detectors.algorithms.base import PitchDetector
from pitch_detectors.algorithms.base import TensorflowGPU
from pitch_detectors.algorithms.base import TorchGPU
from pitch_detectors.schemas import F0

PDT: TypeAlias = type[PitchDetector]
AlgoDict: TypeAlias = dict[PDT, PitchDetector] | dict[PDT, F0] | dict


class Ensemble(PitchDetector):
class Ensemble(TensorflowGPU, TorchGPU, PitchDetector):
"""https://github.com/tandav/pitch-detectors/blob/master/pitch_detectors/algorithms/ensemble.py"""

def __init__(
Expand All @@ -24,8 +26,11 @@ def __init__(
algorithms_kwargs: dict[PDT, dict[str, Any]] | None = None,
algorithms_cache: dict[PDT, F0] | None = None,
# algorithm_weights: dict[PDT, float] = {},
gpu: bool | None = None,
):
super().__init__(a, fs)
TensorflowGPU.__init__(self, gpu)
TorchGPU.__init__(self, gpu)
PitchDetector.__init__(self, a, fs)

if algorithms_cache is not None:
if (algorithms is not None or algorithms_kwargs is not None):
Expand Down
7 changes: 6 additions & 1 deletion pitch_detectors/algorithms/penn.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@ def __init__(
hz_max: float = config.HZ_MAX,
periodicity_threshold: float = 0.1,
checkpoint: str | None = None,
gpu: bool | None = None,
):
import torch
from penn.core import from_audio

TorchGPU.__init__(self)
TorchGPU.__init__(self, gpu)
PitchDetector.__init__(self, a, fs)

if checkpoint is None:
Expand All @@ -34,7 +35,11 @@ def __init__(
fmin=hz_min,
fmax=hz_max,
checkpoint=checkpoint,
gpu=0 if self.gpu else None,
)
if self.gpu:
f0 = f0.cpu()
periodicity = periodicity.cpu()
periodicity = periodicity.numpy().ravel()
f0 = f0.numpy().ravel()
f0[periodicity < periodicity_threshold] = np.nan
Expand Down
9 changes: 6 additions & 3 deletions pitch_detectors/algorithms/spice.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,15 +16,18 @@ def __init__(
confidence_threshold: float = 0.8,
expected_sample_rate: int = 16000,
spice_model_path: str | None = None,
gpu: bool | None = None,
):

import resampy
import tensorflow as tf
import tensorflow_hub as hub

a = resampy.resample(a, fs, expected_sample_rate)
TensorflowGPU.__init__(self)
TensorflowGPU.__init__(self, gpu)
PitchDetector.__init__(self, a, fs)

import tensorflow as tf
import tensorflow_hub as hub

if spice_model_path is None:
spice_model_path = os.environ.get('PITCH_DETECTORS_SPICE_MODEL_PATH', '/spice_model')

Expand Down
10 changes: 3 additions & 7 deletions pitch_detectors/algorithms/torchcrepe.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,26 +16,22 @@ def __init__(
hz_max: float = config.HZ_MAX,
confidence_threshold: float = 0.8,
batch_size: int = 2048,
device: str | None = None,
gpu: bool | None = None,
):
import torch
import torchcrepe

TorchGPU.__init__(self)
TorchGPU.__init__(self, gpu)
PitchDetector.__init__(self, a, fs)

if device is None:
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
torch.device(device)

f0, confidence = torchcrepe.predict(
torch.from_numpy(a[np.newaxis, ...]),
fs,
hop_length=int(fs / 100), # 10 ms
fmin=hz_min,
fmax=hz_max,
batch_size=batch_size,
device=device,
device='cuda:0' if self.gpu else 'cpu',
return_periodicity=True,
)
win_length = 3
Expand Down
2 changes: 1 addition & 1 deletion pitch_detectors/evaluation/table.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def add_cls(kv: DictStr) -> DictStr:
cls = getattr(algorithms, kv['algorithm'])
kv['algorithm'] = f'[{cls.name()}]({cls.__doc__})'
kv['cpu'] = '✓'
kv['gpu'] = '✓' if cls.use_gpu else ''
kv['gpu'] = '✓' if cls.gpu_capable else ''
return kv

def sort_keys(kv: DictStr) -> DictStr:
Expand Down
30 changes: 30 additions & 0 deletions pitch_detectors/util.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import hashlib
import math
import sys
from pathlib import Path

import numpy as np
Expand Down Expand Up @@ -34,3 +35,32 @@ def source_hashes() -> dict[str, str]:
h.update(p.read_bytes())
hashes[p.stem] = h.hexdigest()
return hashes


def ld_library_path() -> str:
site_packages = f'{sys.exec_prefix}/lib/python{sys.version_info.major}.{sys.version_info.minor}/site-packages'
libs = [
f'{site_packages}/nvidia/curand/lib',
f'{site_packages}/nvidia/cuda_runtime/lib',
f'{site_packages}/nvidia/cusparse/lib',
f'{site_packages}/nvidia/cudnn/lib',
f'{site_packages}/nvidia/cuda_nvrtc/lib',
f'{site_packages}/nvidia/cuda_cupti/lib',
f'{site_packages}/nvidia/nccl/lib',
f'{site_packages}/nvidia/cusolver/lib',
f'{site_packages}/nvidia/nvtx/lib',
f'{site_packages}/nvidia/cufft/lib',
f'{site_packages}/nvidia/cublas/lib',
f'{site_packages}/tensorrt',
]
return ':'.join(libs)


if __name__ == '__main__':
supported_actions = {'ld_library_path'}
if len(sys.argv) != 2: # noqa: PLR2004
raise ValueError('Pass action as argument. Supported_actions:', supported_actions)
if sys.argv[1] == 'ld_library_path':
print(ld_library_path())
else:
raise ValueError(f'Action {sys.argv[1]} not supported. Supported_actions:', supported_actions)
5 changes: 5 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,14 @@ dependencies = [
"resampy",
"scipy",
"tensorflow<2.12.0",
# "tf-nightly", # trying this instead tensorflow. It support tensorrt8. (regular tensorflow only supports outdated tensorrt 7 which is python3.8 only)
"tensorflow-hub",
"torch",
"torch-yin",
"torchcrepe>=0.0.18",
"penn",
"nvidia-cudnn-cu11",
"tensorrt",
]

[project.optional-dependencies]
Expand All @@ -33,6 +36,7 @@ dev = [
"pytest",
"pytest-order",
"pytest-cov",
"pytest-env",
"mir_eval",
"tqdm",
"redis",
Expand Down Expand Up @@ -135,6 +139,7 @@ ignore = [

[tool.ruff.per-file-ignores]
"examples/*" = ["INP001"]
"scripts/*" = ["INP001", "S101"]
"tests/*" = ["S101"]

[tool.ruff.isort]
Expand Down
38 changes: 38 additions & 0 deletions scripts/run_algorithm.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import argparse
import os

import numpy as np

from pitch_detectors import algorithms
from pitch_detectors import util
from pitch_detectors.schemas import F0


def main(
audio_path: str,
algorithm: str,
) -> None:
fs, a = util.load_wav(audio_path)
if algorithm == 'ensemble':
alg = algorithms.Ensemble(a, fs, algorithms=algorithms.ALGORITHMS)
else:
alg = getattr(algorithms, os.environ['PITCH_DETECTORS_ALGORITHM'])(a, fs)

assert alg.f0.shape == alg.t.shape

if algorithm == 'ensemble':
algorithms_cache = {k: F0(alg.t, alg.f0) for k, alg in alg._algorithms.items()}
alg_from_cache = algorithms.Ensemble(a, fs, algorithms_cache=algorithms_cache)
assert np.array_equal(alg.t, alg_from_cache.t)
assert np.array_equal(alg.f0, alg_from_cache.f0, equal_nan=True)
# data = alg.dict()
# data['algorithms_cache'] = {k: _alg.dict() for k, _alg in alg._algorithms.items()}
# print(json.dumps(alg.dict(), allow_nan=True))


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--audio-path', type=str, default=os.environ.get('PITCH_DETECTORS_AUDIO_PATH', 'data/b1a5da49d564a7341e7e1327aa3f229a.wav'))
parser.add_argument('--algorithm', type=str, default=os.environ.get('PITCH_DETECTORS_ALGORITHM'))
args = parser.parse_args()
main(**vars(args))
15 changes: 12 additions & 3 deletions tests/algorithms_test.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,19 @@
import os
import subprocess
import sys

import pytest

from pitch_detectors.algorithms import ALGORITHMS


@pytest.mark.order(3)
@pytest.mark.parametrize('algorithm', ALGORITHMS)
def test_detection(algorithm, record):
alg = algorithm(record.a, record.fs)
assert alg.f0.shape == alg.t.shape
@pytest.mark.parametrize('gpu', ['false'] if os.environ.get('PITCH_DETECTORS_GPU') == 'false' else ['true', 'false'])
def test_detection(algorithm, environ, gpu, subprocess_warning):
env = environ | {
'PITCH_DETECTORS_ALGORITHM': algorithm.name(),
'PITCH_DETECTORS_GPU': gpu,
}
print(subprocess_warning)
subprocess.check_call([sys.executable, 'scripts/run_algorithm.py'], env=env)
Loading