Skip to content

Commit

Permalink
Merge pull request #80 from neuro-ml/develop
Browse files Browse the repository at this point in the history
Develop
  • Loading branch information
vovaf709 authored Oct 20, 2023
2 parents ab8f6bd + d95e719 commit 804d2bc
Show file tree
Hide file tree
Showing 16 changed files with 215 additions and 59 deletions.
6 changes: 3 additions & 3 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
Expand Up @@ -12,15 +12,15 @@ jobs:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Set up Python 3.9
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: 3.9

- id: get_version
name: Get the release version
uses: battila7/get-version-action@v2
uses: Simply007/get-version-action@v2

- name: Check the version and build the package
run: |
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@ jobs:
python-version: [ '3.6', '3.7', '3.8', '3.9', '3.10', '3.11' ]

steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v3
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v2
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}

Expand Down Expand Up @@ -52,7 +52,7 @@ jobs:
sed -i -e "s|$(echo $MODULE_PARENT/ | tr "/" .)||g" reports/coverage-${{ matrix.python-version }}.xml
- name: Upload artifacts
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v3
with:
name: reports-${{ matrix.python-version }}
path: reports/*-${{ matrix.python-version }}.xml
Expand Down
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -5,3 +5,4 @@ docs/_build/
.ipynb_checkpoints/
.pytest_cache/
*.egg-info/
.asv/
21 changes: 21 additions & 0 deletions asv.conf.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
{
"version": 1,
"project": "deep_pipe",
"project_url": "https://github.com/neuro-ml/deep_pipe",
"repo": ".",
"branches": ["master", "develop"],
"dvcs": "git",
"environment_type": "conda",
"show_commit_url": "https://github.com/neuro-ml/deep_pipe/commit",
"pythons": ["3.10"],
"conda_channels": ["defaults", "conda-forge"],
"matrix": {
"req": {
"pip+imops": ["0.8.3"]
}
},
"env_dir": ".asv/env",
"results_dir": ".asv/results",
"html_dir": ".asv/html",
"build_cache_size": 2
}
Empty file added benchmarks/__init__.py
Empty file.
60 changes: 60 additions & 0 deletions benchmarks/benchmark_patches_grid.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import numpy as np
import torch
from torch import nn

from dpipe.predict import patches_grid
from dpipe.torch.model import inference_step


class IdentityWithParams(nn.Conv3d):
"""Needed for determining device"""
def forward(self, x):
return x


class TimeSuite:
params = (('float16', 'float32', 'float64'),)
param_names = ('dtype',)
timeout = 300

def setup(self, dtype):
self.inp = np.random.randn(512, 512, 512).astype(dtype)
self.predict = patches_grid(200, 100, axis=-1)(lambda x: x)

def time_patches_grid(self, dtype):
self.predict(self.inp)

def peakmem_patches_grid(self, dtype):
self.predict(self.inp)


class TimeTorchSuite:
params = ((False, True),)
param_names = ('amp',)

def setup(self, amp):
self.inp = np.random.randn(512, 512, 512).astype('float32')

try:
inference_step(self.inp[None, None], architecture=IdentityWithParams(1, 1, kernel_size=3), in_dtype=torch.float32)
self.predict = patches_grid(200, 100, axis=-1)(
lambda x: inference_step(
x[None, None],
architecture=IdentityWithParams(1, 1, kernel_size=3),
amp=amp, out_dtype=torch.float32,
)[0][0]
)
except TypeError:
self.predict = patches_grid(200, 100, axis=-1)(
lambda x: inference_step(
x[None, None],
architecture=IdentityWithParams(1, 1, kernel_size=3),
amp=amp,
).astype('float32', copy=False)[0][0]
)

def time_patches_grid(self, amp):
self.predict(self.inp)

def peakmem_patches_grid(self, amp):
self.predict(self.inp)
2 changes: 1 addition & 1 deletion dpipe/__version__.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
__version__ = '0.2.7'
__version__ = '0.2.8'
21 changes: 17 additions & 4 deletions dpipe/batch_iter/pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,10 +90,16 @@ def __init__(self, source: Iterable, *transformers: Union[Callable, Transform],
if batches_per_epoch <= 0:
raise ValueError(f'Expected a positive amount of batches per epoch, but got {batches_per_epoch}')

if not isinstance(combiner, Transform):
combiner = Threads(partial(combiner, **kwargs))
elif kwargs:
raise ValueError('The `combiner` is already wrapped in a `Transform`, passing `kwargs` has no effect')

self.batches_per_epoch = batches_per_epoch
self.pipeline = wrap_pipeline(
self.pipeline = None
self._pipeline_factory = lambda: wrap_pipeline(
source, *transformers,
self._make_stacker(batch_size), Threads(partial(combiner, **kwargs)),
self._make_stacker(batch_size), combiner,
buffer_size=buffer_size
)

Expand Down Expand Up @@ -146,26 +152,33 @@ def closing_callback(self):
class ClosingCallback(Callback):
def teardown(self, trainer, pl_module, stage):
this.close()

def on_exception(self, trainer, pl_module, exception):
this.close()

this = self
return ClosingCallback()

def __iter__(self):
return self()

def __call__(self):
if self.pipeline is None:
self.pipeline = self._pipeline_factory()
if not self.pipeline.pipeline_active:
self.__enter__()
return islice(self.pipeline, self.batches_per_epoch)

def __enter__(self):
if self.pipeline is None:
self.pipeline = self._pipeline_factory()
self.pipeline.__enter__()
return self

def __exit__(self, exc_type, exc_val, exc_tb):
return self.pipeline.__exit__(exc_type, exc_val, exc_tb)
if self.pipeline is not None:
self.pipeline, pipeline = None, self.pipeline
return pipeline.__exit__(exc_type, exc_val, exc_tb)

def __del__(self):
self.close()
Expand Down
16 changes: 11 additions & 5 deletions dpipe/im/grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from typing import Iterable, Type, Tuple, Callable

import numpy as np
from imops.numeric import pointwise_add

from .shape_ops import crop_to_box
from .axes import fill_by_indices, AxesLike, resolve_deprecation, axis_from_dim, broadcast_to_axis
Expand Down Expand Up @@ -87,15 +88,20 @@ def build(self) -> np.ndarray:


class Average(PatchCombiner):
def __init__(self, shape: Tuple[int, ...], dtype: np.dtype):
def __init__(self, shape: Tuple[int, ...], dtype: np.dtype, **imops_kwargs: dict):
super().__init__(shape, dtype)
self._result = np.zeros(shape, dtype)
self._counts = np.zeros(shape, int)
self._imops_kwargs = imops_kwargs

def update(self, box: Box, patch: np.ndarray):
slc = build_slices(*box)
self._result[slc] += patch
self._counts[slc] += 1

result_slc = self._result[slc]
pointwise_add(result_slc, patch.astype(result_slc.dtype, copy=False), result_slc, **self._imops_kwargs)

counts_slc = self._counts[slc]
pointwise_add(counts_slc, 1, counts_slc, **self._imops_kwargs)

def build(self):
np.true_divide(self._result, self._counts, out=self._result, where=self._counts > 0)
Expand All @@ -104,7 +110,7 @@ def build(self):

def combine(patches: Iterable[np.ndarray], output_shape: AxesLike, stride: AxesLike,
axis: AxesLike = None, valid: bool = False,
combiner: Type[PatchCombiner] = Average, get_boxes: Callable = get_boxes) -> np.ndarray:
combiner: Type[PatchCombiner] = Average, get_boxes: Callable = get_boxes, **combiner_kwargs) -> np.ndarray:
"""
Build a tensor of shape ``output_shape`` from ``patches`` obtained in a convolution-like approach
with corresponding parameters.
Expand All @@ -130,7 +136,7 @@ def combine(patches: Iterable[np.ndarray], output_shape: AxesLike, stride: AxesL
if not np.issubdtype(dtype, np.floating):
dtype = float

combiner = combiner(output_shape, dtype)
combiner = combiner(output_shape, dtype, **combiner_kwargs)
for box, patch in zip_equal(get_boxes(output_shape, patch_size, stride, valid=valid), patches):
combiner.update(box, patch)

Expand Down
7 changes: 5 additions & 2 deletions dpipe/layers/fpn.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,11 +112,14 @@ def forward(self, x):
x = down(x)

x = self.bridge(x)
results.append(x)

if not self.last_level:
results.append(x)

for layer, up, left in zip_equal(reversed(self.up_path), self.upsample, reversed(levels)):
x = layer(self.merge(left, up(x)))
results.append(x)
if not self.last_level:
results.append(x)

if self.last_level:
return x
Expand Down
6 changes: 3 additions & 3 deletions dpipe/predict/shape.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def wrapper(x, *args, **kwargs):

def patches_grid(patch_size: AxesLike, stride: AxesLike, axis: AxesLike = None,
padding_values: Union[AxesParams, Callable] = 0, ratio: AxesParams = 0.5,
combiner: Type[PatchCombiner] = Average, get_boxes: Callable = get_boxes):
combiner: Type[PatchCombiner] = Average, get_boxes: Callable = get_boxes, **imops_kwargs):
"""
Divide an incoming array into patches of corresponding ``patch_size`` and ``stride`` and then combine
the predicted patches by aggregating the overlapping regions using the ``combiner`` - Average by default.
Expand All @@ -106,7 +106,7 @@ def wrapper(x, *args, **kwargs):
if valid:
padded_shape = np.maximum(shape, local_size)
new_shape = padded_shape + (local_stride - padded_shape + local_size) % local_stride
x = pad_to_shape(x, new_shape, input_axis, padding_values, ratio)
x = pad_to_shape(x, new_shape, input_axis, padding_values, ratio, **imops_kwargs)
elif ((shape - local_size) < 0).any() or ((local_stride - shape + local_size) % local_stride).any():
raise ValueError('Input cannot be patched without remainder.')

Expand All @@ -118,7 +118,7 @@ def wrapper(x, *args, **kwargs):
)
prediction = combine(
patches, extract(x.shape, input_axis), local_stride, axis,
combiner=combiner, get_boxes=get_boxes
combiner=combiner, get_boxes=get_boxes,
)

if valid:
Expand Down
46 changes: 21 additions & 25 deletions dpipe/torch/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
from .utils import *

__all__ = 'optimizer_step', 'train_step', 'inference_step', 'multi_inference_step'
_inference_ctx_manager = torch.no_grad if torch.__version__[:3] < '1.9' else torch.inference_mode


def optimizer_step(
Expand Down Expand Up @@ -50,8 +51,8 @@ def optimizer_step(
if not accumulate:
if clip_grad is not None:
scaler.unscale_(optimizer)
assert not isinstance(clip_grad, bool), "Use of boolean clip_grad value (e.g. False) can lead to " \
"unexpected behaviour. "
assert not isinstance(clip_grad, bool), 'Use of boolean clip_grad value (e.g. False) can lead to ' \
'unexpected behaviour. '
clip_grad_norm_(get_parameters(optimizer), clip_grad)

scaler.step(optimizer)
Expand Down Expand Up @@ -160,32 +161,31 @@ def train_step(


def inference_step(*inputs: np.ndarray, architecture: Module, activation: Callable = identity,
amp: bool = False) -> np.ndarray:
amp: bool = False, in_dtype: torch.dtype = None, out_dtype: torch.dtype = None) -> np.ndarray:
"""
Returns the prediction for the given ``inputs``.
Notes
-----
Note that both input and output are **not** of type ``torch.Tensor`` - the conversion
to and from ``torch.Tensor`` is made inside this function.
Inputs will be converted to fp16 if ``amp`` is True.
Inputs will be converted to fp16 if ``amp`` is True and ``in_dtype`` is not specified.
"""
architecture.eval()

# NumPy >= 1.24 warns about underflow during cast which is really insignificant
if amp:
with np.errstate(under='ignore'):
inputs = tuple(np.asarray(x, dtype=np.float16) for x in inputs)

with torch.no_grad():
with torch.cuda.amp.autocast(amp or torch.is_autocast_enabled()):
return to_np(activation(architecture(*sequence_to_var(*inputs, device=architecture))))
amp = amp or torch.is_autocast_enabled()
in_dtype = in_dtype or (torch.float16 if amp else None)
with _inference_ctx_manager():
with torch.cuda.amp.autocast(amp):
return to_np(
activation(architecture(*sequence_to_var(*inputs, device=architecture, dtype=in_dtype))),
dtype=out_dtype,
)


@collect
def multi_inference_step(*inputs: np.ndarray, architecture: Module,
activations: Union[Callable, Sequence[Union[Callable, None]]] = identity,
amp: bool = False) -> np.ndarray:
amp: bool = False, in_dtype: torch.dtype = None, out_dtype: torch.dtype = None) -> np.ndarray:
"""
Returns the prediction for the given ``inputs``.
Expand All @@ -195,25 +195,21 @@ def multi_inference_step(*inputs: np.ndarray, architecture: Module,
-----
Note that both input and output are **not** of type ``torch.Tensor`` - the conversion
to and from ``torch.Tensor`` is made inside this function.
Inputs will be converted to fp16 if ``amp`` is True.
Inputs will be converted to fp16 if ``amp`` is True and ``in_dtype`` is not specified.
"""
architecture.eval()

# NumPy >= 1.24 warns about underflow during cast which is really insignificant
if amp:
with np.errstate(under='ignore'):
inputs = tuple(np.asarray(x, dtype=np.float16) for x in inputs)

with torch.no_grad():
with torch.cuda.amp.autocast(amp or torch.is_autocast_enabled()):
results = architecture(*sequence_to_var(*inputs, device=architecture))
amp = amp or torch.is_autocast_enabled()
in_dtype = in_dtype or (torch.float16 if amp else None)
with _inference_ctx_manager():
with torch.cuda.amp.autocast(amp):
results = architecture(*sequence_to_var(*inputs, device=architecture, dtype=in_dtype))
if callable(activations):
activations = [activations] * len(results)

for activation, result in zip_equal(activations, results):
if activation is not None:
result = activation(result)
yield to_np(result)
yield to_np(result, dtype=out_dtype)


@np.deprecate
Expand Down
Loading

0 comments on commit 804d2bc

Please sign in to comment.