Skip to content

Commit

Permalink
Add method to move all tracking data in time
Browse files Browse the repository at this point in the history
This makes it possible to prepend an image to a time lapse.
  • Loading branch information
rutgerkok committed Oct 12, 2023
1 parent 5c1d544 commit ba27597
Show file tree
Hide file tree
Showing 16 changed files with 241 additions and 40 deletions.
9 changes: 8 additions & 1 deletion organoid_tracker/core/beacon_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -174,4 +174,11 @@ def find_single_beacon(self) -> Optional[Position]:
beacons = next(iter(self._beacons.values()))
if len(beacons) == 1:
return beacons[0]
return None
return None

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time point delta."""
new_beacons_dict = dict()
for time_point, values in self._beacons.items():
new_beacons_dict[time_point + time_point_delta] = values
self._beacons = new_beacons_dict
18 changes: 17 additions & 1 deletion organoid_tracker/core/connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,6 @@ def has_full_neighbors(self, position: Position) -> bool:
True if we think they have full neighbors annotated for that position. This is the case
if the neighbor graph is cyclic, or if the neighbor graph contains cycles.
"""
import networkx
if not self._graph.has_node(position):
return False
neighbors = self._graph.subgraph(self._graph.neighbors(position))
Expand All @@ -128,6 +127,15 @@ def to_networkx_graph(self) -> Graph:
another data storage method in the future."""
return self._graph.copy()

def _move_in_time(self, time_point_delta: int):
"""Must only be called from the Connections class, otherwise the time index is out of sync."""
new_graph = networkx.Graph()
for position_a, position_b in self._graph.edges:
new_graph.add_edge(position_a.with_time_point_number(position_a.time_point_number() + time_point_delta),
position_b.with_time_point_number(position_b.time_point_number() + time_point_delta))
self._graph.clear() # Helps garbage collector
self._graph = new_graph


class Connections:
"""Holds the connections of an experiment."""
Expand Down Expand Up @@ -329,3 +337,11 @@ def copy(self) -> "Connections":
for time_point, connections in self._by_time_point.items():
copy._by_time_point[time_point] = connections.copy()
return copy

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time point delta."""
new_connections_dict = dict()
for time_point_number, values in self._by_time_point.items():
values._move_in_time(time_point_delta)
new_connections_dict[time_point_number + time_point_delta] = values
self._by_time_point = new_connections_dict
54 changes: 17 additions & 37 deletions organoid_tracker/core/experiment.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,32 +119,6 @@ def move_position(self, position_old: Position, position_new: Position, update_s
time_point = position_new.time_point()
self.splines.update_for_changed_positions(time_point, self._positions.of_time_point(time_point))

def _scale_to_resolution(self, new_resolution: ImageResolution):
"""Scales this experiment so that it has a different resolution."""
try:
old_resolution = self._images.resolution()
except UserError:
return # No resolution was set, do nothing
else:
x_factor = new_resolution.pixel_size_x_um / old_resolution.pixel_size_x_um
z_factor = new_resolution.pixel_size_z_um / old_resolution.pixel_size_z_um
t_factor = 1 if (new_resolution.time_point_interval_m == 0 or old_resolution.time_point_interval_m == 0) \
else new_resolution.time_point_interval_m / old_resolution.time_point_interval_m
if t_factor < 0.9 or t_factor > 1.1:
# We cannot scale in time, unfortunately. Links must go from one time point to the next time point.
# So we throw an error if the scale changes too much
raise ValueError(f"Cannot change time scale; existing scale {old_resolution.time_point_interval_m},"
f" new scale {new_resolution.time_point_interval_m}")
if abs(x_factor - 1) < 0.0001 and abs(z_factor - 1) < 0.0001:
return # Nothing to scale
scale_factor = Position(x_factor, x_factor, z_factor)
print(f"Scaling to {scale_factor}")
for time_point in self.time_points():
positions = list(self.positions.of_time_point(time_point))
for position in positions:
self.move_position(position, position * scale_factor)
self.images.set_resolution(new_resolution)

def get_time_point(self, time_point_number: int) -> TimePoint:
"""Gets the time point with the given number. Throws ValueError if no such time point exists. This method is
essentially an alternative for `TimePoint(time_point_number)`, but with added bound checks."""
Expand Down Expand Up @@ -341,17 +315,8 @@ def global_data(self, global_data: GlobalData):
self._global_data = global_data

def merge(self, other: "Experiment"):
"""Merges the position, linking and connections data of two experiments. Images, resolution and scores are not
yet merged."""

# Scale the other experiment first
try:
resolution = self.images.resolution()
except UserError:
pass
else:
other._scale_to_resolution(resolution)

"""Merges the position, linking, connections and global data of the other experiment into this one. Images and
their resolution/timings are not merged, so do that yourself in an appropriate way."""
self.positions.add_positions(other.positions)
self.beacons.add_beacons(other.beacons)
self.links.add_links(other.links)
Expand All @@ -360,6 +325,21 @@ def merge(self, other: "Experiment"):
self.connections.add_connections(other.connections)
self.global_data.merge_data(other.global_data)

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time offset.
The only thing that is not moved are the images themselves. So images.get_image_stack(TimePoint(2)) will still
report the same image array as before. However, the timings and offsets of the images are moved.
"""
self.positions.move_in_time(time_point_delta)
self.beacons.move_in_time(time_point_delta)
self.links.move_in_time(time_point_delta)
self.position_data.move_in_time(time_point_delta)
self.link_data.move_in_time(time_point_delta)
self.connections.move_in_time(time_point_delta)
self.images.move_in_time(time_point_delta)
self.splines.move_in_time(time_point_delta)

def copy_selected(self, *, images: bool = False, positions: bool = False, position_data: bool = False,
links: bool = False, link_data: bool = False, global_data: bool = False,
connections: bool = False, name: bool = False) -> "Experiment":
Expand Down
16 changes: 16 additions & 0 deletions organoid_tracker/core/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,14 @@ def copy(self) -> "ImageOffsets":
copy._offset = self._offset.copy() # Positions are immutable, so no need for a deep copy here
return copy

def move_in_time(self, time_point_delta: int):
"""Moves all offsets the given amounts of time points."""
new_offsets = dict()
for old_time_point_number, old_offset in self._offset.items():
new_offsets[old_time_point_number + time_point_delta] =\
old_offset.with_time_point_number(old_time_point_number + time_point_delta)
self._offset = new_offsets


class Image:
"""Represents a single 3D image"""
Expand Down Expand Up @@ -408,6 +416,8 @@ def copy(self) -> "Images":
copy._image_loader = self._image_loader.copy()
copy._resolution = self._resolution # No copy, as this object is immutable
copy._offsets = self._offsets.copy()
if self._timings is not None:
copy._timings = self._timings.copy()
copy.filters = self.filters.copy()
return copy

Expand Down Expand Up @@ -437,3 +447,9 @@ def close_image_loader(self):
images."""
self._image_loader.close()
self._image_loader = NullImageLoader()

def move_in_time(self, time_point_delta: int):
"""Moves all timings and offset data in time. The images themselves cannot be moved in time."""
if self._timings is not None:
self._timings.move_in_time(time_point_delta)
self._offsets.move_in_time(time_point_delta)
10 changes: 10 additions & 0 deletions organoid_tracker/core/link_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,3 +135,13 @@ def find_all_data_of_link(self, position1: Position, position2: Position) -> Ite
def find_all_data_names(self):
"""Finds all data_names"""
return self._link_data.keys()

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time point delta."""
for data_key in list(self._link_data.keys()):
values_new = dict()
values_old = self._link_data[data_key]
for (position_a, position_b), value in values_old.items():
values_new[(position_a.with_time_point_number(position_a.time_point_number() + time_point_delta),
position_b.with_time_point_number(position_b.time_point_number() + time_point_delta))] = value
self._link_data[data_key] = values_new
11 changes: 11 additions & 0 deletions organoid_tracker/core/links.py
Original file line number Diff line number Diff line change
Expand Up @@ -793,3 +793,14 @@ def iterate_to_future(self, position: Position) -> Iterable[Position]:

yield track.find_position_at_time_point_number(time_point_number)
time_point_number += 1

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time point delta."""
# We need to update self._tracks and rebuild self._position_to_track
self._position_to_track.clear()
for track in self._tracks:
track._min_time_point_number += time_point_delta
for i, position in enumerate(track._positions_by_time_point):
moved_position = position.with_time_point_number(position.time_point_number() + time_point_delta)
track._positions_by_time_point[i] = moved_position
self._position_to_track[moved_position] = track
16 changes: 16 additions & 0 deletions organoid_tracker/core/position_collection.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,14 @@ def count_nearby_z(self, z: int) -> int:
return len(self._positions[z])
return 0

def _move_in_time(self, time_point_offset: int):
"""Must only be called from PositionCollection, otherwise the indexing is wrong."""
for z in list(self._positions.keys()):
old_position_set = self._positions[z]
new_position_set = {position.with_time_point_number(position.time_point_number() + time_point_offset)
for position in old_position_set}
self._positions[z] = new_position_set


class PositionCollection:

Expand Down Expand Up @@ -281,3 +289,11 @@ def count_positions(self, *, time_point: Optional[TimePoint], z: Optional[int]):
else:
# All time points, all z
return len(self)

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time point delta."""
new_positions_dict = dict()
for time_point_number, values_old in self._all_positions.items():
values_old._move_in_time(time_point_delta)
new_positions_dict[time_point_number + time_point_delta] = values_old
self._all_positions = new_positions_dict
9 changes: 9 additions & 0 deletions organoid_tracker/core/position_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,3 +136,12 @@ def get_data_names_and_types(self) -> Dict[str, Type]:
return_dict[key] = object # Don't know the type

return return_dict

def move_in_time(self, time_point_delta: int):
"""Moves all data with the given time point delta."""
for data_key in list(self._position_data.keys()):
values_new = dict()
values_old = self._position_data[data_key]
for position, value in values_old.items():
values_new[position.with_time_point_number(position.time_point_number() + time_point_delta)] = value
self._position_data[data_key] = values_new
9 changes: 9 additions & 0 deletions organoid_tracker/core/resolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,3 +161,12 @@ def get_cumulative_timings_array_m(self) -> ndarray:
Normally, you don't need this method, it's mostly for serialization purposes."""
return self._timings_m.copy()

def copy(self) -> "ImageTimings":
"""Returns a copy of the current object. Changes to the copy won't write through to this object."""
return ImageTimings(self._min_time_point_number, self._timings_m.copy())

def move_in_time(self, time_point_delta: int):
"""Moves the timings the given amount of time points in time. So if delta is 2, then the reported timings of
time point 2 will now be what the reported timings of time point 0 were."""
self._min_time_point_number += time_point_delta

12 changes: 12 additions & 0 deletions organoid_tracker/core/spline.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,3 +524,15 @@ def get_marker_names(self) -> Iterable[Tuple[int, str]]:
"""Gets all registered axis markers as (id, name)."""
for axis_id, marker_name in self._spline_markers.items():
yield axis_id, marker_name

def move_in_time(self, time_point_delta: int):
"""Moves all splines the specified amount of time points in time."""
if self._reference_time_point is not None:
self._reference_time_point += time_point_delta

new_splines = dict()
for time_point, splines_of_time_point in self._splines.items():
new_splines[time_point + time_point_delta] = splines_of_time_point
# the Spline object doesn't store time points, so we can just move them to another time point
self._splines = new_splines
self._recalculate_min_max_time_point()
60 changes: 59 additions & 1 deletion organoid_tracker_plugins/plugin_timelapse_append.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

from numpy import ndarray

from organoid_tracker.core import TimePoint
from organoid_tracker.core import TimePoint, UserError
from organoid_tracker.core.experiment import Experiment
from organoid_tracker.core.image_loader import ImageLoader, ImageChannel
from organoid_tracker.gui.undo_redo import UndoableAction
Expand All @@ -11,13 +11,17 @@

def get_menu_items(window: Window) -> Dict[str, Any]:
return {
"Edit//Batch-Prepend image series...": lambda: _prepend_timelapse(window),
"Edit//Batch-Append image series...": lambda: _append_timelapse(window)
}


def _append_timelapse(window: Window):
experiment = window.get_experiment()
temporary_experiment = Experiment()
if experiment.images.image_loader().last_time_point_number() is None:
raise UserError("Cannot append images", "Cannot append images, as the ending time point of the"
" current image series is not defined.")

from organoid_tracker.gui import image_series_loader_dialog
image_series_loader_dialog.prompt_image_series(temporary_experiment)
Expand All @@ -26,6 +30,25 @@ def _append_timelapse(window: Window):
temporary_experiment.images.image_loader()))


def _prepend_timelapse(window: Window):
experiment = window.get_experiment()
if experiment.images.image_loader().first_time_point_number() is None:
raise UserError("Cannot prepend images", "Cannot prepend images, as the starting time point of the"
" current image series is not defined.")

temporary_experiment = Experiment()

from organoid_tracker.gui import image_series_loader_dialog
image_series_loader_dialog.prompt_image_series(temporary_experiment)
prepending_loader = temporary_experiment.images.image_loader()
if prepending_loader.last_time_point_number() is None:
raise UserError("Cannot prepend images", "Cannot prepend images, as the ending time point of the"
" selected image series is not defined.")

window.perform_data_action(_TimelapsePrependAction(old_loader=experiment.images.image_loader(),
prepending_loader=temporary_experiment.images.image_loader()))


class _TimelapseAppendAction(UndoableAction):

_old_loader: ImageLoader
Expand All @@ -46,6 +69,41 @@ def undo(self, experiment: Experiment) -> str:
return f"Removed the appended images again"


class _TimelapsePrependAction(UndoableAction):

_old_loader: ImageLoader
_prepended_loader: ImageLoader

def __init__(self, *, old_loader: ImageLoader, prepending_loader: ImageLoader):
self._old_loader = old_loader
self._prepended_loader = prepending_loader

def do(self, experiment: Experiment) -> str:
# Say: previously the experiment started at time point 1, and the prepended loader runs from time point 0 to 8
# Then the experiment now needs to start at time point 9, so an offset of 8

# Move all tracking data
new_start = self._prepended_loader.last_time_point_number() + 1
old_start = self._old_loader.first_time_point_number()
offset = new_start - old_start
experiment.move_in_time(offset)

# Inject new image loader
appending_image_loader = _AppendingImageLoader([self._prepended_loader, self._old_loader])
experiment.images.image_loader(appending_image_loader)

return f"Prepended the images. We now have {offset} new image(s) at the start."

def undo(self, experiment: Experiment) -> str:
# Move all tracking data back
new_start = self._prepended_loader.last_time_point_number() + 1
old_start = self._old_loader.first_time_point_number()
offset = new_start - old_start
experiment.move_in_time(-offset)

experiment.images.image_loader(self._old_loader)
return f"Removed the prepended images again"

class _AppendingImageLoader(ImageLoader):
"""Combines to image loaders, showing images after each other."""
_internal: List[ImageLoader]
Expand Down
11 changes: 11 additions & 0 deletions tests/test_connections.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,3 +55,14 @@ def test_copy(self):
connections_1.remove_connection(pos1, pos2)
self.assertFalse(connections_1.contains_connection(pos1, pos2))
self.assertTrue(connections_2.contains_connection(pos1, pos2))

def test_move_in_time(self):
connections = Connections()
connections.add_connection(Position(2, 3, 4, time_point_number=3), Position(1, 3, 4, time_point_number=3))

connections.move_in_time(10)

self.assertFalse(connections.contains_connection(
Position(2, 3, 4, time_point_number=3), Position(1, 3, 4, time_point_number=3)))
self.assertTrue(connections.contains_connection(
Position(2, 3, 4, time_point_number=13), Position(1, 3, 4, time_point_number=13)))
11 changes: 11 additions & 0 deletions tests/test_link_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,3 +118,14 @@ def test_find_data_of_link(self):
self.assertEqual("test1 value", found_link_data["test1"])
self.assertEqual("test2 value", found_link_data["test2"])
self.assertEqual(2, len(found_link_data))

def test_move_in_time(self):
link_data = LinkData()
link_data.set_link_data(Position(0, 1, 2, time_point_number=0), Position(3, 4, 5, time_point_number=1),
"test1", "test1 value")

link_data.move_in_time(10)
self.assertIsNone(link_data.get_link_data(
Position(0, 1, 2, time_point_number=0), Position(3, 4, 5, time_point_number=1), "test1"))
self.assertEqual("test1 value", link_data.get_link_data(
Position(0, 1, 2, time_point_number=10), Position(3, 4, 5, time_point_number=11), "test1"))
10 changes: 10 additions & 0 deletions tests/test_links.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,3 +23,13 @@ def test_pasts(self):

self.assertEqual({past_position}, links.find_pasts(position))
self.assertEqual(set(), links.find_pasts(past_position))

def test_move_in_time(self):
links = Links()
links.add_link(Position(0, 1, 2, time_point_number=0), Position(3, 4, 5, time_point_number=1))

links.move_in_time(10)
self.assertFalse(links.contains_link(
Position(0, 1, 2, time_point_number=0), Position(3, 4, 5, time_point_number=1)))
self.assertTrue(links.contains_link(
Position(0, 1, 2, time_point_number=10), Position(3, 4, 5, time_point_number=11)))
Loading

0 comments on commit ba27597

Please sign in to comment.