Skip to content

Commit

Permalink
first round of automatic improvements
Browse files Browse the repository at this point in the history
  • Loading branch information
biphasic committed Feb 7, 2025
1 parent d0cb6df commit 1d52138
Show file tree
Hide file tree
Showing 13 changed files with 317 additions and 775 deletions.
29 changes: 13 additions & 16 deletions docs/conf.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import tonic

# Project information
project = "Tonic"
copyright = "2019-present, the neuromorphs of Telluride"
author = "Gregor Lenz"

master_doc = "index"

# Sphinx extensions
extensions = [
"autoapi.extension",
"myst_nb",
Expand All @@ -17,16 +18,18 @@
"sphinx_gallery.gen_gallery",
]

# Sphinx-gallery configuration
sphinx_gallery_conf = {
"examples_dirs": "gallery/", # path to your example scripts
"gallery_dirs": "auto_examples", # path to where to save gallery generated output
"examples_dirs": "gallery/",
"gallery_dirs": "auto_examples",
"backreferences_dir": None,
"matplotlib_animations": True,
"doc_module": ("tonic",),
"download_all_examples": False,
"ignore_pattern": r"utils\.py",
}

# AutoAPI configuration
autodoc_typehints = "both"
autoapi_type = "python"
autoapi_dirs = ["../tonic"]
Expand All @@ -39,23 +42,21 @@
"imported-members",
]

# Napoleon settings
# Napoleon settings for docstrings
napoleon_google_docstring = True
napoleon_numpy_docstring = True

# MyST settings
# nb_execution_mode = "off"
# MyST-NB settings
nb_execution_timeout = 300
nb_execution_show_tb = True
nb_execution_excludepatterns = ["large_datasets.ipynb"]
suppress_warnings = ["myst.header"]

# Add any paths that contain templates here, relative to this directory.
# Paths for templates and static files
templates_path = ["_templates"]
html_static_path = ["_static"]

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
# Patterns to exclude from processing
exclude_patterns = [
"auto_examples/**.ipynb",
"auto_examples/**.py",
Expand All @@ -67,14 +68,15 @@
"README.rst",
]

# -- Options for HTML output -------------------------------------------------
# HTML output options
html_theme = "sphinx_book_theme"
html_title = tonic.__version__
html_logo = "_static/tonic-logo-black-bg.png"
html_favicon = "_static/tonic_favicon.png"
html_show_sourcelink = True
html_sourcelink_suffix = ""

# HTML theme options
html_theme_options = {
"repository_url": "https://github.com/neuromorphs/tonic",
"use_repository_button": True,
Expand All @@ -84,8 +86,3 @@
"path_to_docs": "docs",
"use_fullscreen_button": True,
}

# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from setuptools import setup

# Setup configuration using Python Build Reasonableness (PBR)
setup(setup_requires=["pbr"], pbr=True)
125 changes: 47 additions & 78 deletions test/test_audio_transforms.py
Original file line number Diff line number Diff line change
@@ -1,139 +1,108 @@
import numpy as np
import pytest
from tonic.audio_transforms import (AddNoise, AmplitudeScale, Bin, FixLength,
LinearButterFilterBank,
MelButterFilterBank, RobustAmplitudeScale,
SwapAxes)


def test_standardize_data_length():
from tonic.audio_transforms import FixLength
class DummyNoiseDataset:
"""Dummy dataset for generating random noise signals of varying lengths."""

def __len__(self) -> int:
return 1000

def __getitem__(self, item: int) -> tuple[np.ndarray, int]:
sig_len = np.random.randint(12000, 20000)
return np.random.random((1, sig_len)), 0


def test_standardize_data_length():
"""Test FixLength transform for both extending and truncating data."""
sdl = FixLength(100, 1)

# Data is longer
data = np.ones((1, 120))
assert sdl(data).shape == (1, 100)
data_long = np.ones((1, 120))
assert sdl(data_long).shape == (1, 100)

# Data is shorter
data = np.ones((1, 80))
assert sdl(data).shape == (1, 100)
data_short = np.ones((1, 80))
assert sdl(data_short).shape == (1, 100)


def test_bin():
import numpy as np

from tonic.audio_transforms import Bin
"""Test Bin transform for re-sampling data frequency."""
bin_transform = Bin(orig_freq=16000, new_freq=100, axis=1)

bin_transform = Bin(orig_freq=16_000, new_freq=100, axis=1)

data = np.random.random((1, 8 * 16_000))
data = np.random.random((1, 8 * 16000))
data_binned = bin_transform(data)

assert data_binned.shape == (1, 8 * 100)
assert data.sum() == pytest.approx(data_binned.sum(), 1e-7)


def test_linear_butter_filter_bank():
import numpy as np

from tonic.audio_transforms import LinearButterFilterBank

"""Test LinearButterFilterBank for filtering data."""
fb = LinearButterFilterBank(
order=2, low_freq=100, sampling_freq=16000, num_filters=16
)
data = np.random.random((1, 16_000))
data = np.random.random((1, 16000))

filter_out = fb(data)
assert filter_out.shape == (16, 16_000)
assert filter_out.shape == (16, 16000)


def test_mel_butter_filter_bank():
import numpy as np

from tonic.audio_transforms import MelButterFilterBank

"""Test MelButterFilterBank for filtering data using Mel scale."""
fb = MelButterFilterBank(order=2, low_freq=100, sampling_freq=16000, num_filters=16)
data = np.random.random((1, 16_000))
data = np.random.random((1, 16000))

filter_out = fb(data)
assert filter_out.shape == (16, 16_000)
assert filter_out.shape == (16, 16000)


def test_add_noise():
import numpy as np

class DummyNoiseDataset:
def __len__(self):
return 1000

def __getitem__(self, item):
sig_len = np.random.randint(12000, 20000, (1,)).item()
return np.random.random((1, sig_len)), 0

data = np.sin(np.arange(0, 16_000 * 0.001, 0.001))[None, ...]
print(data.shape)

from tonic.audio_transforms import AddNoise

"""Test AddNoise transform for adding noise to a signal."""
data = np.sin(np.linspace(0, 2 * np.pi, 16000))[None, ...]
noise_dataset = DummyNoiseDataset()

print(noise_dataset[0][0].shape)

add_noise = AddNoise(noise_dataset, 10, normed=True)

signal = add_noise(data)
assert signal.shape == (1, 16_000)
assert signal.shape == (1, 16000)


def test_swap_axes():
"""Tests SwapAxes transform with synthetic data."""
from tonic.audio_transforms import SwapAxes

np.random.seed(123)
sr = 16_000 # sample rate
sl = 1 # sample length
data = np.random.rand(1, sr * sl)
ax1, ax2 = 0, 1
swap_ax = SwapAxes(ax1=ax1, ax2=ax2)
"""Test SwapAxes transform for swapping axes of the data."""
data = np.random.rand(1, 16000)
swap_ax = SwapAxes(ax1=0, ax2=1)
swaped = swap_ax(data)

assert swaped.shape[0] == data.shape[1]
assert swaped.shape[1] == data.shape[0]
assert swaped.shape == (16000, 1)


def test_amplitude_scale():
"""Tests the amplitude scaling transform with synthetic data."""
from tonic.audio_transforms import AmplitudeScale

np.random.seed(123)
sr = 16_000 # sample rate
sl = 1 # sample length
data = np.random.rand(1, sr * sl)
"""Test AmplitudeScale transform for scaling amplitude of the data."""
data = np.random.rand(1, 16000)
max_amps = np.random.rand(10)

for amp in max_amps:
AmpScale = AmplitudeScale(max_amplitude=amp)
transformed = AmpScale(data)
assert data.shape[1] == transformed.shape[1]
amp_scale = AmplitudeScale(max_amplitude=amp)
transformed = amp_scale(data)
assert transformed.max() == amp


def test_robust_amplitude_scale():
"""Tests robust amplitude scaling transform with a synthetic data."""
from tonic.audio_transforms import RobustAmplitudeScale

np.random.seed(123)
sr = 16_000 # sample rate
sl = 1 # sample length
data = np.random.rand(1, sr * sl)
"""Test RobustAmplitudeScale transform for scaling amplitude considering outliers."""
data = np.random.rand(1, 16000)
max_amps = np.random.rand(10)
percent = 0.01

for amp in max_amps:
RobustAmpScale = RobustAmplitudeScale(
robust_amp_scale = RobustAmplitudeScale(
max_robust_amplitude=amp, outlier_percent=percent
)
transformed = RobustAmpScale(data)
transformed = robust_amp_scale(data)
sorted_transformed = np.sort(np.abs(transformed.ravel()))
non_outlier = sorted_transformed[
0 : int(np.floor(len(sorted_transformed)) * (1 - percent))
]
print(non_outlier)
assert data.shape[1] == transformed.shape[1]
non_outlier = sorted_transformed[: int(len(sorted_transformed) * (1 - percent))]

assert np.all(non_outlier <= amp)
66 changes: 0 additions & 66 deletions test/test_aug_caching.py

This file was deleted.

Loading

0 comments on commit 1d52138

Please sign in to comment.